Merge remote-tracking branch 'lsk/v3.10/topic/juno' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30
31 #define BNX2X_MAX_EMUL_MULTI            16
32
33 /**** Exe Queue interfaces ****/
34
35 /**
36  * bnx2x_exe_queue_init - init the Exe Queue object
37  *
38  * @o:          poiter to the object
39  * @exe_len:    length
40  * @owner:      poiter to the owner
41  * @validate:   validate function pointer
42  * @optimize:   optimize function pointer
43  * @exec:       execute function pointer
44  * @get:        get function pointer
45  */
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47                                         struct bnx2x_exe_queue_obj *o,
48                                         int exe_len,
49                                         union bnx2x_qable_obj *owner,
50                                         exe_q_validate validate,
51                                         exe_q_remove remove,
52                                         exe_q_optimize optimize,
53                                         exe_q_execute exec,
54                                         exe_q_get get)
55 {
56         memset(o, 0, sizeof(*o));
57
58         INIT_LIST_HEAD(&o->exe_queue);
59         INIT_LIST_HEAD(&o->pending_comp);
60
61         spin_lock_init(&o->lock);
62
63         o->exe_chunk_len = exe_len;
64         o->owner         = owner;
65
66         /* Owner specific callbacks */
67         o->validate      = validate;
68         o->remove        = remove;
69         o->optimize      = optimize;
70         o->execute       = exec;
71         o->get           = get;
72
73         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74            exe_len);
75 }
76
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78                                              struct bnx2x_exeq_elem *elem)
79 {
80         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81         kfree(elem);
82 }
83
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86         struct bnx2x_exeq_elem *elem;
87         int cnt = 0;
88
89         spin_lock_bh(&o->lock);
90
91         list_for_each_entry(elem, &o->exe_queue, link)
92                 cnt++;
93
94         spin_unlock_bh(&o->lock);
95
96         return cnt;
97 }
98
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:         driver handle
103  * @o:          queue
104  * @cmd:        new command to add
105  * @restore:    true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110                                       struct bnx2x_exe_queue_obj *o,
111                                       struct bnx2x_exeq_elem *elem,
112                                       bool restore)
113 {
114         int rc;
115
116         spin_lock_bh(&o->lock);
117
118         if (!restore) {
119                 /* Try to cancel this element queue */
120                 rc = o->optimize(bp, o->owner, elem);
121                 if (rc)
122                         goto free_and_exit;
123
124                 /* Check if this request is ok */
125                 rc = o->validate(bp, o->owner, elem);
126                 if (rc) {
127                         DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128                         goto free_and_exit;
129                 }
130         }
131
132         /* If so, add it to the execution queue */
133         list_add_tail(&elem->link, &o->exe_queue);
134
135         spin_unlock_bh(&o->lock);
136
137         return 0;
138
139 free_and_exit:
140         bnx2x_exe_queue_free_elem(bp, elem);
141
142         spin_unlock_bh(&o->lock);
143
144         return rc;
145
146 }
147
148 static inline void __bnx2x_exe_queue_reset_pending(
149         struct bnx2x *bp,
150         struct bnx2x_exe_queue_obj *o)
151 {
152         struct bnx2x_exeq_elem *elem;
153
154         while (!list_empty(&o->pending_comp)) {
155                 elem = list_first_entry(&o->pending_comp,
156                                         struct bnx2x_exeq_elem, link);
157
158                 list_del(&elem->link);
159                 bnx2x_exe_queue_free_elem(bp, elem);
160         }
161 }
162
163 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164                                                  struct bnx2x_exe_queue_obj *o)
165 {
166
167         spin_lock_bh(&o->lock);
168
169         __bnx2x_exe_queue_reset_pending(bp, o);
170
171         spin_unlock_bh(&o->lock);
172
173 }
174
175 /**
176  * bnx2x_exe_queue_step - execute one execution chunk atomically
177  *
178  * @bp:                 driver handle
179  * @o:                  queue
180  * @ramrod_flags:       flags
181  *
182  * (Atomicy is ensured using the exe_queue->lock).
183  */
184 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185                                        struct bnx2x_exe_queue_obj *o,
186                                        unsigned long *ramrod_flags)
187 {
188         struct bnx2x_exeq_elem *elem, spacer;
189         int cur_len = 0, rc;
190
191         memset(&spacer, 0, sizeof(spacer));
192
193         spin_lock_bh(&o->lock);
194
195         /*
196          * Next step should not be performed until the current is finished,
197          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198          * properly clear object internals without sending any command to the FW
199          * which also implies there won't be any completion to clear the
200          * 'pending' list.
201          */
202         if (!list_empty(&o->pending_comp)) {
203                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
204                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
205                         __bnx2x_exe_queue_reset_pending(bp, o);
206                 } else {
207                         spin_unlock_bh(&o->lock);
208                         return 1;
209                 }
210         }
211
212         /*
213          * Run through the pending commands list and create a next
214          * execution chunk.
215          */
216         while (!list_empty(&o->exe_queue)) {
217                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
218                                         link);
219                 WARN_ON(!elem->cmd_len);
220
221                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222                         cur_len += elem->cmd_len;
223                         /*
224                          * Prevent from both lists being empty when moving an
225                          * element. This will allow the call of
226                          * bnx2x_exe_queue_empty() without locking.
227                          */
228                         list_add_tail(&spacer.link, &o->pending_comp);
229                         mb();
230                         list_move_tail(&elem->link, &o->pending_comp);
231                         list_del(&spacer.link);
232                 } else
233                         break;
234         }
235
236         /* Sanity check */
237         if (!cur_len) {
238                 spin_unlock_bh(&o->lock);
239                 return 0;
240         }
241
242         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243         if (rc < 0)
244                 /*
245                  *  In case of an error return the commands back to the queue
246                  *  and reset the pending_comp.
247                  */
248                 list_splice_init(&o->pending_comp, &o->exe_queue);
249         else if (!rc)
250                 /*
251                  * If zero is returned, means there are no outstanding pending
252                  * completions and we may dismiss the pending list.
253                  */
254                 __bnx2x_exe_queue_reset_pending(bp, o);
255
256         spin_unlock_bh(&o->lock);
257         return rc;
258 }
259
260 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
261 {
262         bool empty = list_empty(&o->exe_queue);
263
264         /* Don't reorder!!! */
265         mb();
266
267         return empty && list_empty(&o->pending_comp);
268 }
269
270 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271         struct bnx2x *bp)
272 {
273         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275 }
276
277 /************************ raw_obj functions ***********************************/
278 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
279 {
280         return !!test_bit(o->state, o->pstate);
281 }
282
283 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
284 {
285         smp_mb__before_clear_bit();
286         clear_bit(o->state, o->pstate);
287         smp_mb__after_clear_bit();
288 }
289
290 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
291 {
292         smp_mb__before_clear_bit();
293         set_bit(o->state, o->pstate);
294         smp_mb__after_clear_bit();
295 }
296
297 /**
298  * bnx2x_state_wait - wait until the given bit(state) is cleared
299  *
300  * @bp:         device handle
301  * @state:      state which is to be cleared
302  * @state_p:    state buffer
303  *
304  */
305 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306                                    unsigned long *pstate)
307 {
308         /* can take a while if any port is running */
309         int cnt = 5000;
310
311
312         if (CHIP_REV_IS_EMUL(bp))
313                 cnt *= 20;
314
315         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
316
317         might_sleep();
318         while (cnt--) {
319                 if (!test_bit(state, pstate)) {
320 #ifdef BNX2X_STOP_ON_ERROR
321                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
322 #endif
323                         return 0;
324                 }
325
326                 usleep_range(1000, 2000);
327
328                 if (bp->panic)
329                         return -EIO;
330         }
331
332         /* timeout! */
333         BNX2X_ERR("timeout waiting for state %d\n", state);
334 #ifdef BNX2X_STOP_ON_ERROR
335         bnx2x_panic();
336 #endif
337
338         return -EBUSY;
339 }
340
341 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
342 {
343         return bnx2x_state_wait(bp, raw->state, raw->pstate);
344 }
345
346 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347 /* credit handling callbacks */
348 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
349 {
350         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351
352         WARN_ON(!mp);
353
354         return mp->get_entry(mp, offset);
355 }
356
357 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
358 {
359         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
360
361         WARN_ON(!mp);
362
363         return mp->get(mp, 1);
364 }
365
366 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
367 {
368         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369
370         WARN_ON(!vp);
371
372         return vp->get_entry(vp, offset);
373 }
374
375 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
376 {
377         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
378
379         WARN_ON(!vp);
380
381         return vp->get(vp, 1);
382 }
383
384 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
385 {
386         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
388
389         if (!mp->get(mp, 1))
390                 return false;
391
392         if (!vp->get(vp, 1)) {
393                 mp->put(mp, 1);
394                 return false;
395         }
396
397         return true;
398 }
399
400 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
401 {
402         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
403
404         return mp->put_entry(mp, offset);
405 }
406
407 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
408 {
409         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410
411         return mp->put(mp, 1);
412 }
413
414 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
415 {
416         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
417
418         return vp->put_entry(vp, offset);
419 }
420
421 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
422 {
423         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
424
425         return vp->put(vp, 1);
426 }
427
428 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
429 {
430         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
432
433         if (!mp->put(mp, 1))
434                 return false;
435
436         if (!vp->put(vp, 1)) {
437                 mp->get(mp, 1);
438                 return false;
439         }
440
441         return true;
442 }
443
444 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
445                                 int n, u8 *base, u8 stride, u8 size)
446 {
447         struct bnx2x_vlan_mac_registry_elem *pos;
448         u8 *next = base;
449         int counter = 0;
450
451         /* traverse list */
452         list_for_each_entry(pos, &o->head, link) {
453                 if (counter < n) {
454                         memcpy(next, &pos->u, size);
455                         counter++;
456                         DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457                            counter, next);
458                         next += stride + size;
459
460                 }
461         }
462         return counter * ETH_ALEN;
463 }
464
465 /* check_add() callbacks */
466 static int bnx2x_check_mac_add(struct bnx2x *bp,
467                                struct bnx2x_vlan_mac_obj *o,
468                                union bnx2x_classification_ramrod_data *data)
469 {
470         struct bnx2x_vlan_mac_registry_elem *pos;
471
472         DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
473
474         if (!is_valid_ether_addr(data->mac.mac))
475                 return -EINVAL;
476
477         /* Check if a requested MAC already exists */
478         list_for_each_entry(pos, &o->head, link)
479                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
481                         return -EEXIST;
482
483         return 0;
484 }
485
486 static int bnx2x_check_vlan_add(struct bnx2x *bp,
487                                 struct bnx2x_vlan_mac_obj *o,
488                                 union bnx2x_classification_ramrod_data *data)
489 {
490         struct bnx2x_vlan_mac_registry_elem *pos;
491
492         DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
493
494         list_for_each_entry(pos, &o->head, link)
495                 if (data->vlan.vlan == pos->u.vlan.vlan)
496                         return -EEXIST;
497
498         return 0;
499 }
500
501 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
502                                     struct bnx2x_vlan_mac_obj *o,
503                                    union bnx2x_classification_ramrod_data *data)
504 {
505         struct bnx2x_vlan_mac_registry_elem *pos;
506
507         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
508            data->vlan_mac.mac, data->vlan_mac.vlan);
509
510         list_for_each_entry(pos, &o->head, link)
511                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
512                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
513                                   ETH_ALEN)) &&
514                     (data->vlan_mac.is_inner_mac ==
515                      pos->u.vlan_mac.is_inner_mac))
516                         return -EEXIST;
517
518         return 0;
519 }
520
521
522 /* check_del() callbacks */
523 static struct bnx2x_vlan_mac_registry_elem *
524         bnx2x_check_mac_del(struct bnx2x *bp,
525                             struct bnx2x_vlan_mac_obj *o,
526                             union bnx2x_classification_ramrod_data *data)
527 {
528         struct bnx2x_vlan_mac_registry_elem *pos;
529
530         DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
531
532         list_for_each_entry(pos, &o->head, link)
533                 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
535                         return pos;
536
537         return NULL;
538 }
539
540 static struct bnx2x_vlan_mac_registry_elem *
541         bnx2x_check_vlan_del(struct bnx2x *bp,
542                              struct bnx2x_vlan_mac_obj *o,
543                              union bnx2x_classification_ramrod_data *data)
544 {
545         struct bnx2x_vlan_mac_registry_elem *pos;
546
547         DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
548
549         list_for_each_entry(pos, &o->head, link)
550                 if (data->vlan.vlan == pos->u.vlan.vlan)
551                         return pos;
552
553         return NULL;
554 }
555
556 static struct bnx2x_vlan_mac_registry_elem *
557         bnx2x_check_vlan_mac_del(struct bnx2x *bp,
558                                  struct bnx2x_vlan_mac_obj *o,
559                                  union bnx2x_classification_ramrod_data *data)
560 {
561         struct bnx2x_vlan_mac_registry_elem *pos;
562
563         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
564            data->vlan_mac.mac, data->vlan_mac.vlan);
565
566         list_for_each_entry(pos, &o->head, link)
567                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
568                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
569                              ETH_ALEN)) &&
570                     (data->vlan_mac.is_inner_mac ==
571                      pos->u.vlan_mac.is_inner_mac))
572                         return pos;
573
574         return NULL;
575 }
576
577 /* check_move() callback */
578 static bool bnx2x_check_move(struct bnx2x *bp,
579                              struct bnx2x_vlan_mac_obj *src_o,
580                              struct bnx2x_vlan_mac_obj *dst_o,
581                              union bnx2x_classification_ramrod_data *data)
582 {
583         struct bnx2x_vlan_mac_registry_elem *pos;
584         int rc;
585
586         /* Check if we can delete the requested configuration from the first
587          * object.
588          */
589         pos = src_o->check_del(bp, src_o, data);
590
591         /*  check if configuration can be added */
592         rc = dst_o->check_add(bp, dst_o, data);
593
594         /* If this classification can not be added (is already set)
595          * or can't be deleted - return an error.
596          */
597         if (rc || !pos)
598                 return false;
599
600         return true;
601 }
602
603 static bool bnx2x_check_move_always_err(
604         struct bnx2x *bp,
605         struct bnx2x_vlan_mac_obj *src_o,
606         struct bnx2x_vlan_mac_obj *dst_o,
607         union bnx2x_classification_ramrod_data *data)
608 {
609         return false;
610 }
611
612
613 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
614 {
615         struct bnx2x_raw_obj *raw = &o->raw;
616         u8 rx_tx_flag = 0;
617
618         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
619             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
620                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
621
622         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
623             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
624                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
625
626         return rx_tx_flag;
627 }
628
629
630 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
631                           bool add, unsigned char *dev_addr, int index)
632 {
633         u32 wb_data[2];
634         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
635                          NIG_REG_LLH0_FUNC_MEM;
636
637         if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
638                 return;
639
640         if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
641                 return;
642
643         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
644                          (add ? "ADD" : "DELETE"), index);
645
646         if (add) {
647                 /* LLH_FUNC_MEM is a u64 WB register */
648                 reg_offset += 8*index;
649
650                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
651                               (dev_addr[4] <<  8) |  dev_addr[5]);
652                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
653
654                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
655         }
656
657         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
658                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
659 }
660
661 /**
662  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
663  *
664  * @bp:         device handle
665  * @o:          queue for which we want to configure this rule
666  * @add:        if true the command is an ADD command, DEL otherwise
667  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
668  * @hdr:        pointer to a header to setup
669  *
670  */
671 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
672         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
673         struct eth_classify_cmd_header *hdr)
674 {
675         struct bnx2x_raw_obj *raw = &o->raw;
676
677         hdr->client_id = raw->cl_id;
678         hdr->func_id = raw->func_id;
679
680         /* Rx or/and Tx (internal switching) configuration ? */
681         hdr->cmd_general_data |=
682                 bnx2x_vlan_mac_get_rx_tx_flag(o);
683
684         if (add)
685                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
686
687         hdr->cmd_general_data |=
688                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
689 }
690
691 /**
692  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
693  *
694  * @cid:        connection id
695  * @type:       BNX2X_FILTER_XXX_PENDING
696  * @hdr:        poiter to header to setup
697  * @rule_cnt:
698  *
699  * currently we always configure one rule and echo field to contain a CID and an
700  * opcode type.
701  */
702 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
703                                 struct eth_classify_header *hdr, int rule_cnt)
704 {
705         hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
706                                 (type << BNX2X_SWCID_SHIFT));
707         hdr->rule_cnt = (u8)rule_cnt;
708 }
709
710
711 /* hw_config() callbacks */
712 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
713                                  struct bnx2x_vlan_mac_obj *o,
714                                  struct bnx2x_exeq_elem *elem, int rule_idx,
715                                  int cam_offset)
716 {
717         struct bnx2x_raw_obj *raw = &o->raw;
718         struct eth_classify_rules_ramrod_data *data =
719                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
720         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
721         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
722         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
723         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
724         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
725
726         /*
727          * Set LLH CAM entry: currently only iSCSI and ETH macs are
728          * relevant. In addition, current implementation is tuned for a
729          * single ETH MAC.
730          *
731          * When multiple unicast ETH MACs PF configuration in switch
732          * independent mode is required (NetQ, multiple netdev MACs,
733          * etc.), consider better utilisation of 8 per function MAC
734          * entries in the LLH register. There is also
735          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
736          * total number of CAM entries to 16.
737          *
738          * Currently we won't configure NIG for MACs other than a primary ETH
739          * MAC and iSCSI L2 MAC.
740          *
741          * If this MAC is moving from one Queue to another, no need to change
742          * NIG configuration.
743          */
744         if (cmd != BNX2X_VLAN_MAC_MOVE) {
745                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
746                         bnx2x_set_mac_in_nig(bp, add, mac,
747                                              BNX2X_LLH_CAM_ISCSI_ETH_LINE);
748                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
749                         bnx2x_set_mac_in_nig(bp, add, mac,
750                                              BNX2X_LLH_CAM_ETH_LINE);
751         }
752
753         /* Reset the ramrod data buffer for the first rule */
754         if (rule_idx == 0)
755                 memset(data, 0, sizeof(*data));
756
757         /* Setup a command header */
758         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
759                                       &rule_entry->mac.header);
760
761         DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
762            (add ? "add" : "delete"), mac, raw->cl_id);
763
764         /* Set a MAC itself */
765         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
766                               &rule_entry->mac.mac_mid,
767                               &rule_entry->mac.mac_lsb, mac);
768         rule_entry->mac.inner_mac =
769                 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
770
771         /* MOVE: Add a rule that will add this MAC to the target Queue */
772         if (cmd == BNX2X_VLAN_MAC_MOVE) {
773                 rule_entry++;
774                 rule_cnt++;
775
776                 /* Setup ramrod data */
777                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
778                                         elem->cmd_data.vlan_mac.target_obj,
779                                               true, CLASSIFY_RULE_OPCODE_MAC,
780                                               &rule_entry->mac.header);
781
782                 /* Set a MAC itself */
783                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
784                                       &rule_entry->mac.mac_mid,
785                                       &rule_entry->mac.mac_lsb, mac);
786                 rule_entry->mac.inner_mac =
787                         cpu_to_le16(elem->cmd_data.vlan_mac.
788                                                 u.mac.is_inner_mac);
789         }
790
791         /* Set the ramrod data header */
792         /* TODO: take this to the higher level in order to prevent multiple
793                  writing */
794         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795                                         rule_cnt);
796 }
797
798 /**
799  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800  *
801  * @bp:         device handle
802  * @o:          queue
803  * @type:
804  * @cam_offset: offset in cam memory
805  * @hdr:        pointer to a header to setup
806  *
807  * E1/E1H
808  */
809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811         struct mac_configuration_hdr *hdr)
812 {
813         struct bnx2x_raw_obj *r = &o->raw;
814
815         hdr->length = 1;
816         hdr->offset = (u8)cam_offset;
817         hdr->client_id = cpu_to_le16(0xff);
818         hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819                                 (type << BNX2X_SWCID_SHIFT));
820 }
821
822 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825 {
826         struct bnx2x_raw_obj *r = &o->raw;
827         u32 cl_bit_vec = (1 << r->cl_id);
828
829         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830         cfg_entry->pf_id = r->func_id;
831         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832
833         if (add) {
834                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835                          T_ETH_MAC_COMMAND_SET);
836                 SET_FLAG(cfg_entry->flags,
837                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838
839                 /* Set a MAC in a ramrod data */
840                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841                                       &cfg_entry->middle_mac_addr,
842                                       &cfg_entry->lsb_mac_addr, mac);
843         } else
844                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845                          T_ETH_MAC_COMMAND_INVALIDATE);
846 }
847
848 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851 {
852         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853         struct bnx2x_raw_obj *raw = &o->raw;
854
855         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856                                          &config->hdr);
857         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858                                          cfg_entry);
859
860         DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
861                          (add ? "setting" : "clearing"),
862                          mac, raw->cl_id, cam_offset);
863 }
864
865 /**
866  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867  *
868  * @bp:         device handle
869  * @o:          bnx2x_vlan_mac_obj
870  * @elem:       bnx2x_exeq_elem
871  * @rule_idx:   rule_idx
872  * @cam_offset: cam_offset
873  */
874 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875                                   struct bnx2x_vlan_mac_obj *o,
876                                   struct bnx2x_exeq_elem *elem, int rule_idx,
877                                   int cam_offset)
878 {
879         struct bnx2x_raw_obj *raw = &o->raw;
880         struct mac_configuration_cmd *config =
881                 (struct mac_configuration_cmd *)(raw->rdata);
882         /*
883          * 57710 and 57711 do not support MOVE command,
884          * so it's either ADD or DEL
885          */
886         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887                 true : false;
888
889         /* Reset the ramrod data buffer */
890         memset(config, 0, sizeof(*config));
891
892         bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
893                                      cam_offset, add,
894                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
895                                      ETH_VLAN_FILTER_ANY_VLAN, config);
896 }
897
898 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899                                   struct bnx2x_vlan_mac_obj *o,
900                                   struct bnx2x_exeq_elem *elem, int rule_idx,
901                                   int cam_offset)
902 {
903         struct bnx2x_raw_obj *raw = &o->raw;
904         struct eth_classify_rules_ramrod_data *data =
905                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906         int rule_cnt = rule_idx + 1;
907         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
908         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
909         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911
912         /* Reset the ramrod data buffer for the first rule */
913         if (rule_idx == 0)
914                 memset(data, 0, sizeof(*data));
915
916         /* Set a rule header */
917         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918                                       &rule_entry->vlan.header);
919
920         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921                          vlan);
922
923         /* Set a VLAN itself */
924         rule_entry->vlan.vlan = cpu_to_le16(vlan);
925
926         /* MOVE: Add a rule that will add this MAC to the target Queue */
927         if (cmd == BNX2X_VLAN_MAC_MOVE) {
928                 rule_entry++;
929                 rule_cnt++;
930
931                 /* Setup ramrod data */
932                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933                                         elem->cmd_data.vlan_mac.target_obj,
934                                               true, CLASSIFY_RULE_OPCODE_VLAN,
935                                               &rule_entry->vlan.header);
936
937                 /* Set a VLAN itself */
938                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939         }
940
941         /* Set the ramrod data header */
942         /* TODO: take this to the higher level in order to prevent multiple
943                  writing */
944         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945                                         rule_cnt);
946 }
947
948 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949                                       struct bnx2x_vlan_mac_obj *o,
950                                       struct bnx2x_exeq_elem *elem,
951                                       int rule_idx, int cam_offset)
952 {
953         struct bnx2x_raw_obj *raw = &o->raw;
954         struct eth_classify_rules_ramrod_data *data =
955                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956         int rule_cnt = rule_idx + 1;
957         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
958         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
959         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960         u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961         u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962
963
964         /* Reset the ramrod data buffer for the first rule */
965         if (rule_idx == 0)
966                 memset(data, 0, sizeof(*data));
967
968         /* Set a rule header */
969         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970                                       &rule_entry->pair.header);
971
972         /* Set VLAN and MAC themselvs */
973         rule_entry->pair.vlan = cpu_to_le16(vlan);
974         bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975                               &rule_entry->pair.mac_mid,
976                               &rule_entry->pair.mac_lsb, mac);
977         rule_entry->pair.inner_mac =
978                 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
979         /* MOVE: Add a rule that will add this MAC to the target Queue */
980         if (cmd == BNX2X_VLAN_MAC_MOVE) {
981                 rule_entry++;
982                 rule_cnt++;
983
984                 /* Setup ramrod data */
985                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
986                                         elem->cmd_data.vlan_mac.target_obj,
987                                               true, CLASSIFY_RULE_OPCODE_PAIR,
988                                               &rule_entry->pair.header);
989
990                 /* Set a VLAN itself */
991                 rule_entry->pair.vlan = cpu_to_le16(vlan);
992                 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
993                                       &rule_entry->pair.mac_mid,
994                                       &rule_entry->pair.mac_lsb, mac);
995                 rule_entry->pair.inner_mac =
996                         cpu_to_le16(elem->cmd_data.vlan_mac.u.
997                                                 vlan_mac.is_inner_mac);
998         }
999
1000         /* Set the ramrod data header */
1001         /* TODO: take this to the higher level in order to prevent multiple
1002                  writing */
1003         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1004                                         rule_cnt);
1005 }
1006
1007 /**
1008  * bnx2x_set_one_vlan_mac_e1h -
1009  *
1010  * @bp:         device handle
1011  * @o:          bnx2x_vlan_mac_obj
1012  * @elem:       bnx2x_exeq_elem
1013  * @rule_idx:   rule_idx
1014  * @cam_offset: cam_offset
1015  */
1016 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1017                                        struct bnx2x_vlan_mac_obj *o,
1018                                        struct bnx2x_exeq_elem *elem,
1019                                        int rule_idx, int cam_offset)
1020 {
1021         struct bnx2x_raw_obj *raw = &o->raw;
1022         struct mac_configuration_cmd *config =
1023                 (struct mac_configuration_cmd *)(raw->rdata);
1024         /*
1025          * 57710 and 57711 do not support MOVE command,
1026          * so it's either ADD or DEL
1027          */
1028         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1029                 true : false;
1030
1031         /* Reset the ramrod data buffer */
1032         memset(config, 0, sizeof(*config));
1033
1034         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1035                                      cam_offset, add,
1036                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1037                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1038                                      ETH_VLAN_FILTER_CLASSIFY, config);
1039 }
1040
1041 /**
1042  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1043  *
1044  * @bp:         device handle
1045  * @p:          command parameters
1046  * @ppos:       pointer to the cooky
1047  *
1048  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1049  * previously configured elements list.
1050  *
1051  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1052  * into an account
1053  *
1054  * pointer to the cooky  - that should be given back in the next call to make
1055  * function handle the next element. If *ppos is set to NULL it will restart the
1056  * iterator. If returned *ppos == NULL this means that the last element has been
1057  * handled.
1058  *
1059  */
1060 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1061                            struct bnx2x_vlan_mac_ramrod_params *p,
1062                            struct bnx2x_vlan_mac_registry_elem **ppos)
1063 {
1064         struct bnx2x_vlan_mac_registry_elem *pos;
1065         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1066
1067         /* If list is empty - there is nothing to do here */
1068         if (list_empty(&o->head)) {
1069                 *ppos = NULL;
1070                 return 0;
1071         }
1072
1073         /* make a step... */
1074         if (*ppos == NULL)
1075                 *ppos = list_first_entry(&o->head,
1076                                          struct bnx2x_vlan_mac_registry_elem,
1077                                          link);
1078         else
1079                 *ppos = list_next_entry(*ppos, link);
1080
1081         pos = *ppos;
1082
1083         /* If it's the last step - return NULL */
1084         if (list_is_last(&pos->link, &o->head))
1085                 *ppos = NULL;
1086
1087         /* Prepare a 'user_req' */
1088         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1089
1090         /* Set the command */
1091         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1092
1093         /* Set vlan_mac_flags */
1094         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1095
1096         /* Set a restore bit */
1097         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1098
1099         return bnx2x_config_vlan_mac(bp, p);
1100 }
1101
1102 /*
1103  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1104  * pointer to an element with a specific criteria and NULL if such an element
1105  * hasn't been found.
1106  */
1107 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1108         struct bnx2x_exe_queue_obj *o,
1109         struct bnx2x_exeq_elem *elem)
1110 {
1111         struct bnx2x_exeq_elem *pos;
1112         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1113
1114         /* Check pending for execution commands */
1115         list_for_each_entry(pos, &o->exe_queue, link)
1116                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1117                               sizeof(*data)) &&
1118                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1119                         return pos;
1120
1121         return NULL;
1122 }
1123
1124 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1125         struct bnx2x_exe_queue_obj *o,
1126         struct bnx2x_exeq_elem *elem)
1127 {
1128         struct bnx2x_exeq_elem *pos;
1129         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1130
1131         /* Check pending for execution commands */
1132         list_for_each_entry(pos, &o->exe_queue, link)
1133                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1134                               sizeof(*data)) &&
1135                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1136                         return pos;
1137
1138         return NULL;
1139 }
1140
1141 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1142         struct bnx2x_exe_queue_obj *o,
1143         struct bnx2x_exeq_elem *elem)
1144 {
1145         struct bnx2x_exeq_elem *pos;
1146         struct bnx2x_vlan_mac_ramrod_data *data =
1147                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1148
1149         /* Check pending for execution commands */
1150         list_for_each_entry(pos, &o->exe_queue, link)
1151                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1152                               sizeof(*data)) &&
1153                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1154                         return pos;
1155
1156         return NULL;
1157 }
1158
1159 /**
1160  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1161  *
1162  * @bp:         device handle
1163  * @qo:         bnx2x_qable_obj
1164  * @elem:       bnx2x_exeq_elem
1165  *
1166  * Checks that the requested configuration can be added. If yes and if
1167  * requested, consume CAM credit.
1168  *
1169  * The 'validate' is run after the 'optimize'.
1170  *
1171  */
1172 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1173                                               union bnx2x_qable_obj *qo,
1174                                               struct bnx2x_exeq_elem *elem)
1175 {
1176         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1177         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1178         int rc;
1179
1180         /* Check the registry */
1181         rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1182         if (rc) {
1183                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1184                 return rc;
1185         }
1186
1187         /*
1188          * Check if there is a pending ADD command for this
1189          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1190          */
1191         if (exeq->get(exeq, elem)) {
1192                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1193                 return -EEXIST;
1194         }
1195
1196         /*
1197          * TODO: Check the pending MOVE from other objects where this
1198          * object is a destination object.
1199          */
1200
1201         /* Consume the credit if not requested not to */
1202         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1203                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1204             o->get_credit(o)))
1205                 return -EINVAL;
1206
1207         return 0;
1208 }
1209
1210 /**
1211  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1212  *
1213  * @bp:         device handle
1214  * @qo:         quable object to check
1215  * @elem:       element that needs to be deleted
1216  *
1217  * Checks that the requested configuration can be deleted. If yes and if
1218  * requested, returns a CAM credit.
1219  *
1220  * The 'validate' is run after the 'optimize'.
1221  */
1222 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1223                                               union bnx2x_qable_obj *qo,
1224                                               struct bnx2x_exeq_elem *elem)
1225 {
1226         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1227         struct bnx2x_vlan_mac_registry_elem *pos;
1228         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1229         struct bnx2x_exeq_elem query_elem;
1230
1231         /* If this classification can not be deleted (doesn't exist)
1232          * - return a BNX2X_EXIST.
1233          */
1234         pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1235         if (!pos) {
1236                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1237                 return -EEXIST;
1238         }
1239
1240         /*
1241          * Check if there are pending DEL or MOVE commands for this
1242          * MAC/VLAN/VLAN-MAC. Return an error if so.
1243          */
1244         memcpy(&query_elem, elem, sizeof(query_elem));
1245
1246         /* Check for MOVE commands */
1247         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1248         if (exeq->get(exeq, &query_elem)) {
1249                 BNX2X_ERR("There is a pending MOVE command already\n");
1250                 return -EINVAL;
1251         }
1252
1253         /* Check for DEL commands */
1254         if (exeq->get(exeq, elem)) {
1255                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1256                 return -EEXIST;
1257         }
1258
1259         /* Return the credit to the credit pool if not requested not to */
1260         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1261                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1262             o->put_credit(o))) {
1263                 BNX2X_ERR("Failed to return a credit\n");
1264                 return -EINVAL;
1265         }
1266
1267         return 0;
1268 }
1269
1270 /**
1271  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1272  *
1273  * @bp:         device handle
1274  * @qo:         quable object to check (source)
1275  * @elem:       element that needs to be moved
1276  *
1277  * Checks that the requested configuration can be moved. If yes and if
1278  * requested, returns a CAM credit.
1279  *
1280  * The 'validate' is run after the 'optimize'.
1281  */
1282 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1283                                                union bnx2x_qable_obj *qo,
1284                                                struct bnx2x_exeq_elem *elem)
1285 {
1286         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1287         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1288         struct bnx2x_exeq_elem query_elem;
1289         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1290         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1291
1292         /*
1293          * Check if we can perform this operation based on the current registry
1294          * state.
1295          */
1296         if (!src_o->check_move(bp, src_o, dest_o,
1297                                &elem->cmd_data.vlan_mac.u)) {
1298                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1299                 return -EINVAL;
1300         }
1301
1302         /*
1303          * Check if there is an already pending DEL or MOVE command for the
1304          * source object or ADD command for a destination object. Return an
1305          * error if so.
1306          */
1307         memcpy(&query_elem, elem, sizeof(query_elem));
1308
1309         /* Check DEL on source */
1310         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1311         if (src_exeq->get(src_exeq, &query_elem)) {
1312                 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1313                 return -EINVAL;
1314         }
1315
1316         /* Check MOVE on source */
1317         if (src_exeq->get(src_exeq, elem)) {
1318                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1319                 return -EEXIST;
1320         }
1321
1322         /* Check ADD on destination */
1323         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1324         if (dest_exeq->get(dest_exeq, &query_elem)) {
1325                 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1326                 return -EINVAL;
1327         }
1328
1329         /* Consume the credit if not requested not to */
1330         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1331                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1332             dest_o->get_credit(dest_o)))
1333                 return -EINVAL;
1334
1335         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1336                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1337             src_o->put_credit(src_o))) {
1338                 /* return the credit taken from dest... */
1339                 dest_o->put_credit(dest_o);
1340                 return -EINVAL;
1341         }
1342
1343         return 0;
1344 }
1345
1346 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1347                                    union bnx2x_qable_obj *qo,
1348                                    struct bnx2x_exeq_elem *elem)
1349 {
1350         switch (elem->cmd_data.vlan_mac.cmd) {
1351         case BNX2X_VLAN_MAC_ADD:
1352                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1353         case BNX2X_VLAN_MAC_DEL:
1354                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1355         case BNX2X_VLAN_MAC_MOVE:
1356                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1357         default:
1358                 return -EINVAL;
1359         }
1360 }
1361
1362 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1363                                   union bnx2x_qable_obj *qo,
1364                                   struct bnx2x_exeq_elem *elem)
1365 {
1366         int rc = 0;
1367
1368         /* If consumption wasn't required, nothing to do */
1369         if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1370                      &elem->cmd_data.vlan_mac.vlan_mac_flags))
1371                 return 0;
1372
1373         switch (elem->cmd_data.vlan_mac.cmd) {
1374         case BNX2X_VLAN_MAC_ADD:
1375         case BNX2X_VLAN_MAC_MOVE:
1376                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1377                 break;
1378         case BNX2X_VLAN_MAC_DEL:
1379                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1380                 break;
1381         default:
1382                 return -EINVAL;
1383         }
1384
1385         if (rc != true)
1386                 return -EINVAL;
1387
1388         return 0;
1389 }
1390
1391 /**
1392  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1393  *
1394  * @bp:         device handle
1395  * @o:          bnx2x_vlan_mac_obj
1396  *
1397  */
1398 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1399                                struct bnx2x_vlan_mac_obj *o)
1400 {
1401         int cnt = 5000, rc;
1402         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1403         struct bnx2x_raw_obj *raw = &o->raw;
1404
1405         while (cnt--) {
1406                 /* Wait for the current command to complete */
1407                 rc = raw->wait_comp(bp, raw);
1408                 if (rc)
1409                         return rc;
1410
1411                 /* Wait until there are no pending commands */
1412                 if (!bnx2x_exe_queue_empty(exeq))
1413                         usleep_range(1000, 2000);
1414                 else
1415                         return 0;
1416         }
1417
1418         return -EBUSY;
1419 }
1420
1421 /**
1422  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1423  *
1424  * @bp:         device handle
1425  * @o:          bnx2x_vlan_mac_obj
1426  * @cqe:
1427  * @cont:       if true schedule next execution chunk
1428  *
1429  */
1430 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1431                                    struct bnx2x_vlan_mac_obj *o,
1432                                    union event_ring_elem *cqe,
1433                                    unsigned long *ramrod_flags)
1434 {
1435         struct bnx2x_raw_obj *r = &o->raw;
1436         int rc;
1437
1438         /* Reset pending list */
1439         bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1440
1441         /* Clear pending */
1442         r->clear_pending(r);
1443
1444         /* If ramrod failed this is most likely a SW bug */
1445         if (cqe->message.error)
1446                 return -EINVAL;
1447
1448         /* Run the next bulk of pending commands if requested */
1449         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1450                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1451                 if (rc < 0)
1452                         return rc;
1453         }
1454
1455         /* If there is more work to do return PENDING */
1456         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1457                 return 1;
1458
1459         return 0;
1460 }
1461
1462 /**
1463  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1464  *
1465  * @bp:         device handle
1466  * @o:          bnx2x_qable_obj
1467  * @elem:       bnx2x_exeq_elem
1468  */
1469 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1470                                    union bnx2x_qable_obj *qo,
1471                                    struct bnx2x_exeq_elem *elem)
1472 {
1473         struct bnx2x_exeq_elem query, *pos;
1474         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1475         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1476
1477         memcpy(&query, elem, sizeof(query));
1478
1479         switch (elem->cmd_data.vlan_mac.cmd) {
1480         case BNX2X_VLAN_MAC_ADD:
1481                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1482                 break;
1483         case BNX2X_VLAN_MAC_DEL:
1484                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1485                 break;
1486         default:
1487                 /* Don't handle anything other than ADD or DEL */
1488                 return 0;
1489         }
1490
1491         /* If we found the appropriate element - delete it */
1492         pos = exeq->get(exeq, &query);
1493         if (pos) {
1494
1495                 /* Return the credit of the optimized command */
1496                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1497                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1498                         if ((query.cmd_data.vlan_mac.cmd ==
1499                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1500                                 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1501                                 return -EINVAL;
1502                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1503                                 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1504                                 return -EINVAL;
1505                         }
1506                 }
1507
1508                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1509                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1510                            "ADD" : "DEL");
1511
1512                 list_del(&pos->link);
1513                 bnx2x_exe_queue_free_elem(bp, pos);
1514                 return 1;
1515         }
1516
1517         return 0;
1518 }
1519
1520 /**
1521  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1522  *
1523  * @bp:   device handle
1524  * @o:
1525  * @elem:
1526  * @restore:
1527  * @re:
1528  *
1529  * prepare a registry element according to the current command request.
1530  */
1531 static inline int bnx2x_vlan_mac_get_registry_elem(
1532         struct bnx2x *bp,
1533         struct bnx2x_vlan_mac_obj *o,
1534         struct bnx2x_exeq_elem *elem,
1535         bool restore,
1536         struct bnx2x_vlan_mac_registry_elem **re)
1537 {
1538         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1539         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1540
1541         /* Allocate a new registry element if needed. */
1542         if (!restore &&
1543             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1544                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1545                 if (!reg_elem)
1546                         return -ENOMEM;
1547
1548                 /* Get a new CAM offset */
1549                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1550                         /*
1551                          * This shell never happen, because we have checked the
1552                          * CAM availiability in the 'validate'.
1553                          */
1554                         WARN_ON(1);
1555                         kfree(reg_elem);
1556                         return -EINVAL;
1557                 }
1558
1559                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1560
1561                 /* Set a VLAN-MAC data */
1562                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1563                           sizeof(reg_elem->u));
1564
1565                 /* Copy the flags (needed for DEL and RESTORE flows) */
1566                 reg_elem->vlan_mac_flags =
1567                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1568         } else /* DEL, RESTORE */
1569                 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1570
1571         *re = reg_elem;
1572         return 0;
1573 }
1574
1575 /**
1576  * bnx2x_execute_vlan_mac - execute vlan mac command
1577  *
1578  * @bp:                 device handle
1579  * @qo:
1580  * @exe_chunk:
1581  * @ramrod_flags:
1582  *
1583  * go and send a ramrod!
1584  */
1585 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1586                                   union bnx2x_qable_obj *qo,
1587                                   struct list_head *exe_chunk,
1588                                   unsigned long *ramrod_flags)
1589 {
1590         struct bnx2x_exeq_elem *elem;
1591         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1592         struct bnx2x_raw_obj *r = &o->raw;
1593         int rc, idx = 0;
1594         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1595         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1596         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1597         enum bnx2x_vlan_mac_cmd cmd;
1598
1599         /*
1600          * If DRIVER_ONLY execution is requested, cleanup a registry
1601          * and exit. Otherwise send a ramrod to FW.
1602          */
1603         if (!drv_only) {
1604                 WARN_ON(r->check_pending(r));
1605
1606                 /* Set pending */
1607                 r->set_pending(r);
1608
1609                 /* Fill tha ramrod data */
1610                 list_for_each_entry(elem, exe_chunk, link) {
1611                         cmd = elem->cmd_data.vlan_mac.cmd;
1612                         /*
1613                          * We will add to the target object in MOVE command, so
1614                          * change the object for a CAM search.
1615                          */
1616                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1617                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1618                         else
1619                                 cam_obj = o;
1620
1621                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1622                                                               elem, restore,
1623                                                               &reg_elem);
1624                         if (rc)
1625                                 goto error_exit;
1626
1627                         WARN_ON(!reg_elem);
1628
1629                         /* Push a new entry into the registry */
1630                         if (!restore &&
1631                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1632                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1633                                 list_add(&reg_elem->link, &cam_obj->head);
1634
1635                         /* Configure a single command in a ramrod data buffer */
1636                         o->set_one_rule(bp, o, elem, idx,
1637                                         reg_elem->cam_offset);
1638
1639                         /* MOVE command consumes 2 entries in the ramrod data */
1640                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1641                                 idx += 2;
1642                         else
1643                                 idx++;
1644                 }
1645
1646                 /*
1647                  *  No need for an explicit memory barrier here as long we would
1648                  *  need to ensure the ordering of writing to the SPQ element
1649                  *  and updating of the SPQ producer which involves a memory
1650                  *  read and we will have to put a full memory barrier there
1651                  *  (inside bnx2x_sp_post()).
1652                  */
1653
1654                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1655                                    U64_HI(r->rdata_mapping),
1656                                    U64_LO(r->rdata_mapping),
1657                                    ETH_CONNECTION_TYPE);
1658                 if (rc)
1659                         goto error_exit;
1660         }
1661
1662         /* Now, when we are done with the ramrod - clean up the registry */
1663         list_for_each_entry(elem, exe_chunk, link) {
1664                 cmd = elem->cmd_data.vlan_mac.cmd;
1665                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1666                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1667                         reg_elem = o->check_del(bp, o,
1668                                                 &elem->cmd_data.vlan_mac.u);
1669
1670                         WARN_ON(!reg_elem);
1671
1672                         o->put_cam_offset(o, reg_elem->cam_offset);
1673                         list_del(&reg_elem->link);
1674                         kfree(reg_elem);
1675                 }
1676         }
1677
1678         if (!drv_only)
1679                 return 1;
1680         else
1681                 return 0;
1682
1683 error_exit:
1684         r->clear_pending(r);
1685
1686         /* Cleanup a registry in case of a failure */
1687         list_for_each_entry(elem, exe_chunk, link) {
1688                 cmd = elem->cmd_data.vlan_mac.cmd;
1689
1690                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1691                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1692                 else
1693                         cam_obj = o;
1694
1695                 /* Delete all newly added above entries */
1696                 if (!restore &&
1697                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1698                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1699                         reg_elem = o->check_del(bp, cam_obj,
1700                                                 &elem->cmd_data.vlan_mac.u);
1701                         if (reg_elem) {
1702                                 list_del(&reg_elem->link);
1703                                 kfree(reg_elem);
1704                         }
1705                 }
1706         }
1707
1708         return rc;
1709 }
1710
1711 static inline int bnx2x_vlan_mac_push_new_cmd(
1712         struct bnx2x *bp,
1713         struct bnx2x_vlan_mac_ramrod_params *p)
1714 {
1715         struct bnx2x_exeq_elem *elem;
1716         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1717         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1718
1719         /* Allocate the execution queue element */
1720         elem = bnx2x_exe_queue_alloc_elem(bp);
1721         if (!elem)
1722                 return -ENOMEM;
1723
1724         /* Set the command 'length' */
1725         switch (p->user_req.cmd) {
1726         case BNX2X_VLAN_MAC_MOVE:
1727                 elem->cmd_len = 2;
1728                 break;
1729         default:
1730                 elem->cmd_len = 1;
1731         }
1732
1733         /* Fill the object specific info */
1734         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1735
1736         /* Try to add a new command to the pending list */
1737         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1738 }
1739
1740 /**
1741  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1742  *
1743  * @bp:   device handle
1744  * @p:
1745  *
1746  */
1747 int bnx2x_config_vlan_mac(
1748         struct bnx2x *bp,
1749         struct bnx2x_vlan_mac_ramrod_params *p)
1750 {
1751         int rc = 0;
1752         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1753         unsigned long *ramrod_flags = &p->ramrod_flags;
1754         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1755         struct bnx2x_raw_obj *raw = &o->raw;
1756
1757         /*
1758          * Add new elements to the execution list for commands that require it.
1759          */
1760         if (!cont) {
1761                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1762                 if (rc)
1763                         return rc;
1764         }
1765
1766         /*
1767          * If nothing will be executed further in this iteration we want to
1768          * return PENDING if there are pending commands
1769          */
1770         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1771                 rc = 1;
1772
1773         if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1774                 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1775                 raw->clear_pending(raw);
1776         }
1777
1778         /* Execute commands if required */
1779         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1780             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1781                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1782                 if (rc < 0)
1783                         return rc;
1784         }
1785
1786         /*
1787          * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1788          * then user want to wait until the last command is done.
1789          */
1790         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1791                 /*
1792                  * Wait maximum for the current exe_queue length iterations plus
1793                  * one (for the current pending command).
1794                  */
1795                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1796
1797                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1798                        max_iterations--) {
1799
1800                         /* Wait for the current command to complete */
1801                         rc = raw->wait_comp(bp, raw);
1802                         if (rc)
1803                                 return rc;
1804
1805                         /* Make a next step */
1806                         rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1807                                                   ramrod_flags);
1808                         if (rc < 0)
1809                                 return rc;
1810                 }
1811
1812                 return 0;
1813         }
1814
1815         return rc;
1816 }
1817
1818
1819
1820 /**
1821  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1822  *
1823  * @bp:                 device handle
1824  * @o:
1825  * @vlan_mac_flags:
1826  * @ramrod_flags:       execution flags to be used for this deletion
1827  *
1828  * if the last operation has completed successfully and there are no
1829  * moreelements left, positive value if the last operation has completed
1830  * successfully and there are more previously configured elements, negative
1831  * value is current operation has failed.
1832  */
1833 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1834                                   struct bnx2x_vlan_mac_obj *o,
1835                                   unsigned long *vlan_mac_flags,
1836                                   unsigned long *ramrod_flags)
1837 {
1838         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1839         int rc = 0;
1840         struct bnx2x_vlan_mac_ramrod_params p;
1841         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1842         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1843
1844         /* Clear pending commands first */
1845
1846         spin_lock_bh(&exeq->lock);
1847
1848         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1849                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1850                     *vlan_mac_flags) {
1851                         rc = exeq->remove(bp, exeq->owner, exeq_pos);
1852                         if (rc) {
1853                                 BNX2X_ERR("Failed to remove command\n");
1854                                 spin_unlock_bh(&exeq->lock);
1855                                 return rc;
1856                         }
1857                         list_del(&exeq_pos->link);
1858                         bnx2x_exe_queue_free_elem(bp, exeq_pos);
1859                 }
1860         }
1861
1862         spin_unlock_bh(&exeq->lock);
1863
1864         /* Prepare a command request */
1865         memset(&p, 0, sizeof(p));
1866         p.vlan_mac_obj = o;
1867         p.ramrod_flags = *ramrod_flags;
1868         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1869
1870         /*
1871          * Add all but the last VLAN-MAC to the execution queue without actually
1872          * execution anything.
1873          */
1874         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1875         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1876         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1877
1878         list_for_each_entry(pos, &o->head, link) {
1879                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1880                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1881                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1882                         rc = bnx2x_config_vlan_mac(bp, &p);
1883                         if (rc < 0) {
1884                                 BNX2X_ERR("Failed to add a new DEL command\n");
1885                                 return rc;
1886                         }
1887                 }
1888         }
1889
1890         p.ramrod_flags = *ramrod_flags;
1891         __set_bit(RAMROD_CONT, &p.ramrod_flags);
1892
1893         return bnx2x_config_vlan_mac(bp, &p);
1894 }
1895
1896 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1897         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1898         unsigned long *pstate, bnx2x_obj_type type)
1899 {
1900         raw->func_id = func_id;
1901         raw->cid = cid;
1902         raw->cl_id = cl_id;
1903         raw->rdata = rdata;
1904         raw->rdata_mapping = rdata_mapping;
1905         raw->state = state;
1906         raw->pstate = pstate;
1907         raw->obj_type = type;
1908         raw->check_pending = bnx2x_raw_check_pending;
1909         raw->clear_pending = bnx2x_raw_clear_pending;
1910         raw->set_pending = bnx2x_raw_set_pending;
1911         raw->wait_comp = bnx2x_raw_wait;
1912 }
1913
1914 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1915         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1916         int state, unsigned long *pstate, bnx2x_obj_type type,
1917         struct bnx2x_credit_pool_obj *macs_pool,
1918         struct bnx2x_credit_pool_obj *vlans_pool)
1919 {
1920         INIT_LIST_HEAD(&o->head);
1921
1922         o->macs_pool = macs_pool;
1923         o->vlans_pool = vlans_pool;
1924
1925         o->delete_all = bnx2x_vlan_mac_del_all;
1926         o->restore = bnx2x_vlan_mac_restore;
1927         o->complete = bnx2x_complete_vlan_mac;
1928         o->wait = bnx2x_wait_vlan_mac;
1929
1930         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1931                            state, pstate, type);
1932 }
1933
1934
1935 void bnx2x_init_mac_obj(struct bnx2x *bp,
1936                         struct bnx2x_vlan_mac_obj *mac_obj,
1937                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
1938                         dma_addr_t rdata_mapping, int state,
1939                         unsigned long *pstate, bnx2x_obj_type type,
1940                         struct bnx2x_credit_pool_obj *macs_pool)
1941 {
1942         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1943
1944         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1945                                    rdata_mapping, state, pstate, type,
1946                                    macs_pool, NULL);
1947
1948         /* CAM credit pool handling */
1949         mac_obj->get_credit = bnx2x_get_credit_mac;
1950         mac_obj->put_credit = bnx2x_put_credit_mac;
1951         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1952         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1953
1954         if (CHIP_IS_E1x(bp)) {
1955                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1956                 mac_obj->check_del         = bnx2x_check_mac_del;
1957                 mac_obj->check_add         = bnx2x_check_mac_add;
1958                 mac_obj->check_move        = bnx2x_check_move_always_err;
1959                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1960
1961                 /* Exe Queue */
1962                 bnx2x_exe_queue_init(bp,
1963                                      &mac_obj->exe_queue, 1, qable_obj,
1964                                      bnx2x_validate_vlan_mac,
1965                                      bnx2x_remove_vlan_mac,
1966                                      bnx2x_optimize_vlan_mac,
1967                                      bnx2x_execute_vlan_mac,
1968                                      bnx2x_exeq_get_mac);
1969         } else {
1970                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1971                 mac_obj->check_del         = bnx2x_check_mac_del;
1972                 mac_obj->check_add         = bnx2x_check_mac_add;
1973                 mac_obj->check_move        = bnx2x_check_move;
1974                 mac_obj->ramrod_cmd        =
1975                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1976                 mac_obj->get_n_elements    = bnx2x_get_n_elements;
1977
1978                 /* Exe Queue */
1979                 bnx2x_exe_queue_init(bp,
1980                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1981                                      qable_obj, bnx2x_validate_vlan_mac,
1982                                      bnx2x_remove_vlan_mac,
1983                                      bnx2x_optimize_vlan_mac,
1984                                      bnx2x_execute_vlan_mac,
1985                                      bnx2x_exeq_get_mac);
1986         }
1987 }
1988
1989 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1990                          struct bnx2x_vlan_mac_obj *vlan_obj,
1991                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
1992                          dma_addr_t rdata_mapping, int state,
1993                          unsigned long *pstate, bnx2x_obj_type type,
1994                          struct bnx2x_credit_pool_obj *vlans_pool)
1995 {
1996         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1997
1998         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1999                                    rdata_mapping, state, pstate, type, NULL,
2000                                    vlans_pool);
2001
2002         vlan_obj->get_credit = bnx2x_get_credit_vlan;
2003         vlan_obj->put_credit = bnx2x_put_credit_vlan;
2004         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2005         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2006
2007         if (CHIP_IS_E1x(bp)) {
2008                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2009                 BUG();
2010         } else {
2011                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2012                 vlan_obj->check_del         = bnx2x_check_vlan_del;
2013                 vlan_obj->check_add         = bnx2x_check_vlan_add;
2014                 vlan_obj->check_move        = bnx2x_check_move;
2015                 vlan_obj->ramrod_cmd        =
2016                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2017                 vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2018
2019                 /* Exe Queue */
2020                 bnx2x_exe_queue_init(bp,
2021                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2022                                      qable_obj, bnx2x_validate_vlan_mac,
2023                                      bnx2x_remove_vlan_mac,
2024                                      bnx2x_optimize_vlan_mac,
2025                                      bnx2x_execute_vlan_mac,
2026                                      bnx2x_exeq_get_vlan);
2027         }
2028 }
2029
2030 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2031                              struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2032                              u8 cl_id, u32 cid, u8 func_id, void *rdata,
2033                              dma_addr_t rdata_mapping, int state,
2034                              unsigned long *pstate, bnx2x_obj_type type,
2035                              struct bnx2x_credit_pool_obj *macs_pool,
2036                              struct bnx2x_credit_pool_obj *vlans_pool)
2037 {
2038         union bnx2x_qable_obj *qable_obj =
2039                 (union bnx2x_qable_obj *)vlan_mac_obj;
2040
2041         bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2042                                    rdata_mapping, state, pstate, type,
2043                                    macs_pool, vlans_pool);
2044
2045         /* CAM pool handling */
2046         vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2047         vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2048         /*
2049          * CAM offset is relevant for 57710 and 57711 chips only which have a
2050          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2051          * will be taken from MACs' pool object only.
2052          */
2053         vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2054         vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2055
2056         if (CHIP_IS_E1(bp)) {
2057                 BNX2X_ERR("Do not support chips others than E2\n");
2058                 BUG();
2059         } else if (CHIP_IS_E1H(bp)) {
2060                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2061                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2062                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2063                 vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2064                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2065
2066                 /* Exe Queue */
2067                 bnx2x_exe_queue_init(bp,
2068                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2069                                      bnx2x_validate_vlan_mac,
2070                                      bnx2x_remove_vlan_mac,
2071                                      bnx2x_optimize_vlan_mac,
2072                                      bnx2x_execute_vlan_mac,
2073                                      bnx2x_exeq_get_vlan_mac);
2074         } else {
2075                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2076                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2077                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2078                 vlan_mac_obj->check_move        = bnx2x_check_move;
2079                 vlan_mac_obj->ramrod_cmd        =
2080                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2081
2082                 /* Exe Queue */
2083                 bnx2x_exe_queue_init(bp,
2084                                      &vlan_mac_obj->exe_queue,
2085                                      CLASSIFY_RULES_COUNT,
2086                                      qable_obj, bnx2x_validate_vlan_mac,
2087                                      bnx2x_remove_vlan_mac,
2088                                      bnx2x_optimize_vlan_mac,
2089                                      bnx2x_execute_vlan_mac,
2090                                      bnx2x_exeq_get_vlan_mac);
2091         }
2092
2093 }
2094
2095 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2096 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2097                         struct tstorm_eth_mac_filter_config *mac_filters,
2098                         u16 pf_id)
2099 {
2100         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2101
2102         u32 addr = BAR_TSTRORM_INTMEM +
2103                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2104
2105         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2106 }
2107
2108 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2109                                  struct bnx2x_rx_mode_ramrod_params *p)
2110 {
2111         /* update the bp MAC filter structure */
2112         u32 mask = (1 << p->cl_id);
2113
2114         struct tstorm_eth_mac_filter_config *mac_filters =
2115                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2116
2117         /* initial seeting is drop-all */
2118         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2119         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2120         u8 unmatched_unicast = 0;
2121
2122     /* In e1x there we only take into account rx acceot flag since tx switching
2123      * isn't enabled. */
2124         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2125                 /* accept matched ucast */
2126                 drop_all_ucast = 0;
2127
2128         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2129                 /* accept matched mcast */
2130                 drop_all_mcast = 0;
2131
2132         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2133                 /* accept all mcast */
2134                 drop_all_ucast = 0;
2135                 accp_all_ucast = 1;
2136         }
2137         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2138                 /* accept all mcast */
2139                 drop_all_mcast = 0;
2140                 accp_all_mcast = 1;
2141         }
2142         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2143                 /* accept (all) bcast */
2144                 accp_all_bcast = 1;
2145         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2146                 /* accept unmatched unicasts */
2147                 unmatched_unicast = 1;
2148
2149         mac_filters->ucast_drop_all = drop_all_ucast ?
2150                 mac_filters->ucast_drop_all | mask :
2151                 mac_filters->ucast_drop_all & ~mask;
2152
2153         mac_filters->mcast_drop_all = drop_all_mcast ?
2154                 mac_filters->mcast_drop_all | mask :
2155                 mac_filters->mcast_drop_all & ~mask;
2156
2157         mac_filters->ucast_accept_all = accp_all_ucast ?
2158                 mac_filters->ucast_accept_all | mask :
2159                 mac_filters->ucast_accept_all & ~mask;
2160
2161         mac_filters->mcast_accept_all = accp_all_mcast ?
2162                 mac_filters->mcast_accept_all | mask :
2163                 mac_filters->mcast_accept_all & ~mask;
2164
2165         mac_filters->bcast_accept_all = accp_all_bcast ?
2166                 mac_filters->bcast_accept_all | mask :
2167                 mac_filters->bcast_accept_all & ~mask;
2168
2169         mac_filters->unmatched_unicast = unmatched_unicast ?
2170                 mac_filters->unmatched_unicast | mask :
2171                 mac_filters->unmatched_unicast & ~mask;
2172
2173         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2174                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2175            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2176            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2177            mac_filters->bcast_accept_all);
2178
2179         /* write the MAC filter structure*/
2180         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2181
2182         /* The operation is completed */
2183         clear_bit(p->state, p->pstate);
2184         smp_mb__after_clear_bit();
2185
2186         return 0;
2187 }
2188
2189 /* Setup ramrod data */
2190 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2191                                 struct eth_classify_header *hdr,
2192                                 u8 rule_cnt)
2193 {
2194         hdr->echo = cpu_to_le32(cid);
2195         hdr->rule_cnt = rule_cnt;
2196 }
2197
2198 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2199                                 unsigned long *accept_flags,
2200                                 struct eth_filter_rules_cmd *cmd,
2201                                 bool clear_accept_all)
2202 {
2203         u16 state;
2204
2205         /* start with 'drop-all' */
2206         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2207                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2208
2209         if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2210                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2211
2212         if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2213                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2214
2215         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2216                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2217                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2218         }
2219
2220         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2221                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2222                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2223         }
2224
2225         if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2226                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2227
2228         if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2229                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2230                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2231         }
2232
2233         if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2234                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2235
2236         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2237         if (clear_accept_all) {
2238                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2239                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2240                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2241                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2242         }
2243
2244         cmd->state = cpu_to_le16(state);
2245
2246 }
2247
2248 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2249                                 struct bnx2x_rx_mode_ramrod_params *p)
2250 {
2251         struct eth_filter_rules_ramrod_data *data = p->rdata;
2252         int rc;
2253         u8 rule_idx = 0;
2254
2255         /* Reset the ramrod data buffer */
2256         memset(data, 0, sizeof(*data));
2257
2258         /* Setup ramrod data */
2259
2260         /* Tx (internal switching) */
2261         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2262                 data->rules[rule_idx].client_id = p->cl_id;
2263                 data->rules[rule_idx].func_id = p->func_id;
2264
2265                 data->rules[rule_idx].cmd_general_data =
2266                         ETH_FILTER_RULES_CMD_TX_CMD;
2267
2268                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2269                                                &(data->rules[rule_idx++]),
2270                                                false);
2271         }
2272
2273         /* Rx */
2274         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2275                 data->rules[rule_idx].client_id = p->cl_id;
2276                 data->rules[rule_idx].func_id = p->func_id;
2277
2278                 data->rules[rule_idx].cmd_general_data =
2279                         ETH_FILTER_RULES_CMD_RX_CMD;
2280
2281                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2282                                                &(data->rules[rule_idx++]),
2283                                                false);
2284         }
2285
2286
2287         /*
2288          * If FCoE Queue configuration has been requested configure the Rx and
2289          * internal switching modes for this queue in separate rules.
2290          *
2291          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2292          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2293          */
2294         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2295                 /*  Tx (internal switching) */
2296                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2297                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2298                         data->rules[rule_idx].func_id = p->func_id;
2299
2300                         data->rules[rule_idx].cmd_general_data =
2301                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2302
2303                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2304                                                        &(data->rules[rule_idx]),
2305                                                        true);
2306                         rule_idx++;
2307                 }
2308
2309                 /* Rx */
2310                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2311                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2312                         data->rules[rule_idx].func_id = p->func_id;
2313
2314                         data->rules[rule_idx].cmd_general_data =
2315                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2316
2317                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2318                                                        &(data->rules[rule_idx]),
2319                                                        true);
2320                         rule_idx++;
2321                 }
2322         }
2323
2324         /*
2325          * Set the ramrod header (most importantly - number of rules to
2326          * configure).
2327          */
2328         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2329
2330         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2331                          data->header.rule_cnt, p->rx_accept_flags,
2332                          p->tx_accept_flags);
2333
2334         /*
2335          *  No need for an explicit memory barrier here as long we would
2336          *  need to ensure the ordering of writing to the SPQ element
2337          *  and updating of the SPQ producer which involves a memory
2338          *  read and we will have to put a full memory barrier there
2339          *  (inside bnx2x_sp_post()).
2340          */
2341
2342         /* Send a ramrod */
2343         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2344                            U64_HI(p->rdata_mapping),
2345                            U64_LO(p->rdata_mapping),
2346                            ETH_CONNECTION_TYPE);
2347         if (rc)
2348                 return rc;
2349
2350         /* Ramrod completion is pending */
2351         return 1;
2352 }
2353
2354 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2355                                       struct bnx2x_rx_mode_ramrod_params *p)
2356 {
2357         return bnx2x_state_wait(bp, p->state, p->pstate);
2358 }
2359
2360 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2361                                     struct bnx2x_rx_mode_ramrod_params *p)
2362 {
2363         /* Do nothing */
2364         return 0;
2365 }
2366
2367 int bnx2x_config_rx_mode(struct bnx2x *bp,
2368                          struct bnx2x_rx_mode_ramrod_params *p)
2369 {
2370         int rc;
2371
2372         /* Configure the new classification in the chip */
2373         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2374         if (rc < 0)
2375                 return rc;
2376
2377         /* Wait for a ramrod completion if was requested */
2378         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2379                 rc = p->rx_mode_obj->wait_comp(bp, p);
2380                 if (rc)
2381                         return rc;
2382         }
2383
2384         return rc;
2385 }
2386
2387 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2388                             struct bnx2x_rx_mode_obj *o)
2389 {
2390         if (CHIP_IS_E1x(bp)) {
2391                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2392                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2393         } else {
2394                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2395                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2396         }
2397 }
2398
2399 /********************* Multicast verbs: SET, CLEAR ****************************/
2400 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2401 {
2402         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2403 }
2404
2405 struct bnx2x_mcast_mac_elem {
2406         struct list_head link;
2407         u8 mac[ETH_ALEN];
2408         u8 pad[2]; /* For a natural alignment of the following buffer */
2409 };
2410
2411 struct bnx2x_pending_mcast_cmd {
2412         struct list_head link;
2413         int type; /* BNX2X_MCAST_CMD_X */
2414         union {
2415                 struct list_head macs_head;
2416                 u32 macs_num; /* Needed for DEL command */
2417                 int next_bin; /* Needed for RESTORE flow with aprox match */
2418         } data;
2419
2420         bool done; /* set to true, when the command has been handled,
2421                     * practically used in 57712 handling only, where one pending
2422                     * command may be handled in a few operations. As long as for
2423                     * other chips every operation handling is completed in a
2424                     * single ramrod, there is no need to utilize this field.
2425                     */
2426 };
2427
2428 static int bnx2x_mcast_wait(struct bnx2x *bp,
2429                             struct bnx2x_mcast_obj *o)
2430 {
2431         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2432                         o->raw.wait_comp(bp, &o->raw))
2433                 return -EBUSY;
2434
2435         return 0;
2436 }
2437
2438 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2439                                    struct bnx2x_mcast_obj *o,
2440                                    struct bnx2x_mcast_ramrod_params *p,
2441                                    enum bnx2x_mcast_cmd cmd)
2442 {
2443         int total_sz;
2444         struct bnx2x_pending_mcast_cmd *new_cmd;
2445         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2446         struct bnx2x_mcast_list_elem *pos;
2447         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2448                              p->mcast_list_len : 0);
2449
2450         /* If the command is empty ("handle pending commands only"), break */
2451         if (!p->mcast_list_len)
2452                 return 0;
2453
2454         total_sz = sizeof(*new_cmd) +
2455                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2456
2457         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2458         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2459
2460         if (!new_cmd)
2461                 return -ENOMEM;
2462
2463         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2464            cmd, macs_list_len);
2465
2466         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2467
2468         new_cmd->type = cmd;
2469         new_cmd->done = false;
2470
2471         switch (cmd) {
2472         case BNX2X_MCAST_CMD_ADD:
2473                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2474                           ((u8 *)new_cmd + sizeof(*new_cmd));
2475
2476                 /* Push the MACs of the current command into the pendig command
2477                  * MACs list: FIFO
2478                  */
2479                 list_for_each_entry(pos, &p->mcast_list, link) {
2480                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2481                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2482                         cur_mac++;
2483                 }
2484
2485                 break;
2486
2487         case BNX2X_MCAST_CMD_DEL:
2488                 new_cmd->data.macs_num = p->mcast_list_len;
2489                 break;
2490
2491         case BNX2X_MCAST_CMD_RESTORE:
2492                 new_cmd->data.next_bin = 0;
2493                 break;
2494
2495         default:
2496                 kfree(new_cmd);
2497                 BNX2X_ERR("Unknown command: %d\n", cmd);
2498                 return -EINVAL;
2499         }
2500
2501         /* Push the new pending command to the tail of the pending list: FIFO */
2502         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2503
2504         o->set_sched(o);
2505
2506         return 1;
2507 }
2508
2509 /**
2510  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2511  *
2512  * @o:
2513  * @last:       index to start looking from (including)
2514  *
2515  * returns the next found (set) bin or a negative value if none is found.
2516  */
2517 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2518 {
2519         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2520
2521         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2522                 if (o->registry.aprox_match.vec[i])
2523                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2524                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2525                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2526                                                        vec, cur_bit)) {
2527                                         return cur_bit;
2528                                 }
2529                         }
2530                 inner_start = 0;
2531         }
2532
2533         /* None found */
2534         return -1;
2535 }
2536
2537 /**
2538  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2539  *
2540  * @o:
2541  *
2542  * returns the index of the found bin or -1 if none is found
2543  */
2544 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2545 {
2546         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2547
2548         if (cur_bit >= 0)
2549                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2550
2551         return cur_bit;
2552 }
2553
2554 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2555 {
2556         struct bnx2x_raw_obj *raw = &o->raw;
2557         u8 rx_tx_flag = 0;
2558
2559         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2560             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2561                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2562
2563         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2564             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2565                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2566
2567         return rx_tx_flag;
2568 }
2569
2570 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2571                                         struct bnx2x_mcast_obj *o, int idx,
2572                                         union bnx2x_mcast_config_data *cfg_data,
2573                                         enum bnx2x_mcast_cmd cmd)
2574 {
2575         struct bnx2x_raw_obj *r = &o->raw;
2576         struct eth_multicast_rules_ramrod_data *data =
2577                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2578         u8 func_id = r->func_id;
2579         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2580         int bin;
2581
2582         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2583                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2584
2585         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2586
2587         /* Get a bin and update a bins' vector */
2588         switch (cmd) {
2589         case BNX2X_MCAST_CMD_ADD:
2590                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2591                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2592                 break;
2593
2594         case BNX2X_MCAST_CMD_DEL:
2595                 /* If there were no more bins to clear
2596                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2597                  * clear any (0xff) bin.
2598                  * See bnx2x_mcast_validate_e2() for explanation when it may
2599                  * happen.
2600                  */
2601                 bin = bnx2x_mcast_clear_first_bin(o);
2602                 break;
2603
2604         case BNX2X_MCAST_CMD_RESTORE:
2605                 bin = cfg_data->bin;
2606                 break;
2607
2608         default:
2609                 BNX2X_ERR("Unknown command: %d\n", cmd);
2610                 return;
2611         }
2612
2613         DP(BNX2X_MSG_SP, "%s bin %d\n",
2614                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2615                          "Setting"  : "Clearing"), bin);
2616
2617         data->rules[idx].bin_id    = (u8)bin;
2618         data->rules[idx].func_id   = func_id;
2619         data->rules[idx].engine_id = o->engine_id;
2620 }
2621
2622 /**
2623  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2624  *
2625  * @bp:         device handle
2626  * @o:
2627  * @start_bin:  index in the registry to start from (including)
2628  * @rdata_idx:  index in the ramrod data to start from
2629  *
2630  * returns last handled bin index or -1 if all bins have been handled
2631  */
2632 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2633         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2634         int *rdata_idx)
2635 {
2636         int cur_bin, cnt = *rdata_idx;
2637         union bnx2x_mcast_config_data cfg_data = {NULL};
2638
2639         /* go through the registry and configure the bins from it */
2640         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2641             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2642
2643                 cfg_data.bin = (u8)cur_bin;
2644                 o->set_one_rule(bp, o, cnt, &cfg_data,
2645                                 BNX2X_MCAST_CMD_RESTORE);
2646
2647                 cnt++;
2648
2649                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2650
2651                 /* Break if we reached the maximum number
2652                  * of rules.
2653                  */
2654                 if (cnt >= o->max_cmd_len)
2655                         break;
2656         }
2657
2658         *rdata_idx = cnt;
2659
2660         return cur_bin;
2661 }
2662
2663 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2664         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2665         int *line_idx)
2666 {
2667         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2668         int cnt = *line_idx;
2669         union bnx2x_mcast_config_data cfg_data = {NULL};
2670
2671         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2672                                  link) {
2673
2674                 cfg_data.mac = &pmac_pos->mac[0];
2675                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2676
2677                 cnt++;
2678
2679                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2680                    pmac_pos->mac);
2681
2682                 list_del(&pmac_pos->link);
2683
2684                 /* Break if we reached the maximum number
2685                  * of rules.
2686                  */
2687                 if (cnt >= o->max_cmd_len)
2688                         break;
2689         }
2690
2691         *line_idx = cnt;
2692
2693         /* if no more MACs to configure - we are done */
2694         if (list_empty(&cmd_pos->data.macs_head))
2695                 cmd_pos->done = true;
2696 }
2697
2698 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2699         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2700         int *line_idx)
2701 {
2702         int cnt = *line_idx;
2703
2704         while (cmd_pos->data.macs_num) {
2705                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2706
2707                 cnt++;
2708
2709                 cmd_pos->data.macs_num--;
2710
2711                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2712                                    cmd_pos->data.macs_num, cnt);
2713
2714                 /* Break if we reached the maximum
2715                  * number of rules.
2716                  */
2717                 if (cnt >= o->max_cmd_len)
2718                         break;
2719         }
2720
2721         *line_idx = cnt;
2722
2723         /* If we cleared all bins - we are done */
2724         if (!cmd_pos->data.macs_num)
2725                 cmd_pos->done = true;
2726 }
2727
2728 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2729         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2730         int *line_idx)
2731 {
2732         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2733                                                 line_idx);
2734
2735         if (cmd_pos->data.next_bin < 0)
2736                 /* If o->set_restore returned -1 we are done */
2737                 cmd_pos->done = true;
2738         else
2739                 /* Start from the next bin next time */
2740                 cmd_pos->data.next_bin++;
2741 }
2742
2743 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2744                                 struct bnx2x_mcast_ramrod_params *p)
2745 {
2746         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2747         int cnt = 0;
2748         struct bnx2x_mcast_obj *o = p->mcast_obj;
2749
2750         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2751                                  link) {
2752                 switch (cmd_pos->type) {
2753                 case BNX2X_MCAST_CMD_ADD:
2754                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2755                         break;
2756
2757                 case BNX2X_MCAST_CMD_DEL:
2758                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2759                         break;
2760
2761                 case BNX2X_MCAST_CMD_RESTORE:
2762                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2763                                                            &cnt);
2764                         break;
2765
2766                 default:
2767                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2768                         return -EINVAL;
2769                 }
2770
2771                 /* If the command has been completed - remove it from the list
2772                  * and free the memory
2773                  */
2774                 if (cmd_pos->done) {
2775                         list_del(&cmd_pos->link);
2776                         kfree(cmd_pos);
2777                 }
2778
2779                 /* Break if we reached the maximum number of rules */
2780                 if (cnt >= o->max_cmd_len)
2781                         break;
2782         }
2783
2784         return cnt;
2785 }
2786
2787 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2788         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2789         int *line_idx)
2790 {
2791         struct bnx2x_mcast_list_elem *mlist_pos;
2792         union bnx2x_mcast_config_data cfg_data = {NULL};
2793         int cnt = *line_idx;
2794
2795         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2796                 cfg_data.mac = mlist_pos->mac;
2797                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2798
2799                 cnt++;
2800
2801                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2802                    mlist_pos->mac);
2803         }
2804
2805         *line_idx = cnt;
2806 }
2807
2808 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2809         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2810         int *line_idx)
2811 {
2812         int cnt = *line_idx, i;
2813
2814         for (i = 0; i < p->mcast_list_len; i++) {
2815                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2816
2817                 cnt++;
2818
2819                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2820                                  p->mcast_list_len - i - 1);
2821         }
2822
2823         *line_idx = cnt;
2824 }
2825
2826 /**
2827  * bnx2x_mcast_handle_current_cmd -
2828  *
2829  * @bp:         device handle
2830  * @p:
2831  * @cmd:
2832  * @start_cnt:  first line in the ramrod data that may be used
2833  *
2834  * This function is called iff there is enough place for the current command in
2835  * the ramrod data.
2836  * Returns number of lines filled in the ramrod data in total.
2837  */
2838 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2839                         struct bnx2x_mcast_ramrod_params *p,
2840                         enum bnx2x_mcast_cmd cmd,
2841                         int start_cnt)
2842 {
2843         struct bnx2x_mcast_obj *o = p->mcast_obj;
2844         int cnt = start_cnt;
2845
2846         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2847
2848         switch (cmd) {
2849         case BNX2X_MCAST_CMD_ADD:
2850                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2851                 break;
2852
2853         case BNX2X_MCAST_CMD_DEL:
2854                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2855                 break;
2856
2857         case BNX2X_MCAST_CMD_RESTORE:
2858                 o->hdl_restore(bp, o, 0, &cnt);
2859                 break;
2860
2861         default:
2862                 BNX2X_ERR("Unknown command: %d\n", cmd);
2863                 return -EINVAL;
2864         }
2865
2866         /* The current command has been handled */
2867         p->mcast_list_len = 0;
2868
2869         return cnt;
2870 }
2871
2872 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2873                                    struct bnx2x_mcast_ramrod_params *p,
2874                                    enum bnx2x_mcast_cmd cmd)
2875 {
2876         struct bnx2x_mcast_obj *o = p->mcast_obj;
2877         int reg_sz = o->get_registry_size(o);
2878
2879         switch (cmd) {
2880         /* DEL command deletes all currently configured MACs */
2881         case BNX2X_MCAST_CMD_DEL:
2882                 o->set_registry_size(o, 0);
2883                 /* Don't break */
2884
2885         /* RESTORE command will restore the entire multicast configuration */
2886         case BNX2X_MCAST_CMD_RESTORE:
2887                 /* Here we set the approximate amount of work to do, which in
2888                  * fact may be only less as some MACs in postponed ADD
2889                  * command(s) scheduled before this command may fall into
2890                  * the same bin and the actual number of bins set in the
2891                  * registry would be less than we estimated here. See
2892                  * bnx2x_mcast_set_one_rule_e2() for further details.
2893                  */
2894                 p->mcast_list_len = reg_sz;
2895                 break;
2896
2897         case BNX2X_MCAST_CMD_ADD:
2898         case BNX2X_MCAST_CMD_CONT:
2899                 /* Here we assume that all new MACs will fall into new bins.
2900                  * However we will correct the real registry size after we
2901                  * handle all pending commands.
2902                  */
2903                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2904                 break;
2905
2906         default:
2907                 BNX2X_ERR("Unknown command: %d\n", cmd);
2908                 return -EINVAL;
2909
2910         }
2911
2912         /* Increase the total number of MACs pending to be configured */
2913         o->total_pending_num += p->mcast_list_len;
2914
2915         return 0;
2916 }
2917
2918 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2919                                       struct bnx2x_mcast_ramrod_params *p,
2920                                       int old_num_bins)
2921 {
2922         struct bnx2x_mcast_obj *o = p->mcast_obj;
2923
2924         o->set_registry_size(o, old_num_bins);
2925         o->total_pending_num -= p->mcast_list_len;
2926 }
2927
2928 /**
2929  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2930  *
2931  * @bp:         device handle
2932  * @p:
2933  * @len:        number of rules to handle
2934  */
2935 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2936                                         struct bnx2x_mcast_ramrod_params *p,
2937                                         u8 len)
2938 {
2939         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2940         struct eth_multicast_rules_ramrod_data *data =
2941                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2942
2943         data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2944                                         (BNX2X_FILTER_MCAST_PENDING <<
2945                                          BNX2X_SWCID_SHIFT));
2946         data->header.rule_cnt = len;
2947 }
2948
2949 /**
2950  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2951  *
2952  * @bp:         device handle
2953  * @o:
2954  *
2955  * Recalculate the actual number of set bins in the registry using Brian
2956  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2957  *
2958  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2959  */
2960 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2961                                                   struct bnx2x_mcast_obj *o)
2962 {
2963         int i, cnt = 0;
2964         u64 elem;
2965
2966         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2967                 elem = o->registry.aprox_match.vec[i];
2968                 for (; elem; cnt++)
2969                         elem &= elem - 1;
2970         }
2971
2972         o->set_registry_size(o, cnt);
2973
2974         return 0;
2975 }
2976
2977 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2978                                 struct bnx2x_mcast_ramrod_params *p,
2979                                 enum bnx2x_mcast_cmd cmd)
2980 {
2981         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2982         struct bnx2x_mcast_obj *o = p->mcast_obj;
2983         struct eth_multicast_rules_ramrod_data *data =
2984                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2985         int cnt = 0, rc;
2986
2987         /* Reset the ramrod data buffer */
2988         memset(data, 0, sizeof(*data));
2989
2990         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2991
2992         /* If there are no more pending commands - clear SCHEDULED state */
2993         if (list_empty(&o->pending_cmds_head))
2994                 o->clear_sched(o);
2995
2996         /* The below may be true iff there was enough room in ramrod
2997          * data for all pending commands and for the current
2998          * command. Otherwise the current command would have been added
2999          * to the pending commands and p->mcast_list_len would have been
3000          * zeroed.
3001          */
3002         if (p->mcast_list_len > 0)
3003                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3004
3005         /* We've pulled out some MACs - update the total number of
3006          * outstanding.
3007          */
3008         o->total_pending_num -= cnt;
3009
3010         /* send a ramrod */
3011         WARN_ON(o->total_pending_num < 0);
3012         WARN_ON(cnt > o->max_cmd_len);
3013
3014         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3015
3016         /* Update a registry size if there are no more pending operations.
3017          *
3018          * We don't want to change the value of the registry size if there are
3019          * pending operations because we want it to always be equal to the
3020          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3021          * set bins after the last requested operation in order to properly
3022          * evaluate the size of the next DEL/RESTORE operation.
3023          *
3024          * Note that we update the registry itself during command(s) handling
3025          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3026          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3027          * with a limited amount of update commands (per MAC/bin) and we don't
3028          * know in this scope what the actual state of bins configuration is
3029          * going to be after this ramrod.
3030          */
3031         if (!o->total_pending_num)
3032                 bnx2x_mcast_refresh_registry_e2(bp, o);
3033
3034         /*
3035          * If CLEAR_ONLY was requested - don't send a ramrod and clear
3036          * RAMROD_PENDING status immediately.
3037          */
3038         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3039                 raw->clear_pending(raw);
3040                 return 0;
3041         } else {
3042                 /*
3043                  *  No need for an explicit memory barrier here as long we would
3044                  *  need to ensure the ordering of writing to the SPQ element
3045                  *  and updating of the SPQ producer which involves a memory
3046                  *  read and we will have to put a full memory barrier there
3047                  *  (inside bnx2x_sp_post()).
3048                  */
3049
3050                 /* Send a ramrod */
3051                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3052                                    raw->cid, U64_HI(raw->rdata_mapping),
3053                                    U64_LO(raw->rdata_mapping),
3054                                    ETH_CONNECTION_TYPE);
3055                 if (rc)
3056                         return rc;
3057
3058                 /* Ramrod completion is pending */
3059                 return 1;
3060         }
3061 }
3062
3063 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3064                                     struct bnx2x_mcast_ramrod_params *p,
3065                                     enum bnx2x_mcast_cmd cmd)
3066 {
3067         /* Mark, that there is a work to do */
3068         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3069                 p->mcast_list_len = 1;
3070
3071         return 0;
3072 }
3073
3074 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3075                                        struct bnx2x_mcast_ramrod_params *p,
3076                                        int old_num_bins)
3077 {
3078         /* Do nothing */
3079 }
3080
3081 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3082 do { \
3083         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3084 } while (0)
3085
3086 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3087                                            struct bnx2x_mcast_obj *o,
3088                                            struct bnx2x_mcast_ramrod_params *p,
3089                                            u32 *mc_filter)
3090 {
3091         struct bnx2x_mcast_list_elem *mlist_pos;
3092         int bit;
3093
3094         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3095                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3096                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3097
3098                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3099                    mlist_pos->mac, bit);
3100
3101                 /* bookkeeping... */
3102                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3103                                   bit);
3104         }
3105 }
3106
3107 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3108         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3109         u32 *mc_filter)
3110 {
3111         int bit;
3112
3113         for (bit = bnx2x_mcast_get_next_bin(o, 0);
3114              bit >= 0;
3115              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3116                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3117                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3118         }
3119 }
3120
3121 /* On 57711 we write the multicast MACs' aproximate match
3122  * table by directly into the TSTORM's internal RAM. So we don't
3123  * really need to handle any tricks to make it work.
3124  */
3125 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3126                                  struct bnx2x_mcast_ramrod_params *p,
3127                                  enum bnx2x_mcast_cmd cmd)
3128 {
3129         int i;
3130         struct bnx2x_mcast_obj *o = p->mcast_obj;
3131         struct bnx2x_raw_obj *r = &o->raw;
3132
3133         /* If CLEAR_ONLY has been requested - clear the registry
3134          * and clear a pending bit.
3135          */
3136         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3137                 u32 mc_filter[MC_HASH_SIZE] = {0};
3138
3139                 /* Set the multicast filter bits before writing it into
3140                  * the internal memory.
3141                  */
3142                 switch (cmd) {
3143                 case BNX2X_MCAST_CMD_ADD:
3144                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3145                         break;
3146
3147                 case BNX2X_MCAST_CMD_DEL:
3148                         DP(BNX2X_MSG_SP,
3149                            "Invalidating multicast MACs configuration\n");
3150
3151                         /* clear the registry */
3152                         memset(o->registry.aprox_match.vec, 0,
3153                                sizeof(o->registry.aprox_match.vec));
3154                         break;
3155
3156                 case BNX2X_MCAST_CMD_RESTORE:
3157                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3158                         break;
3159
3160                 default:
3161                         BNX2X_ERR("Unknown command: %d\n", cmd);
3162                         return -EINVAL;
3163                 }
3164
3165                 /* Set the mcast filter in the internal memory */
3166                 for (i = 0; i < MC_HASH_SIZE; i++)
3167                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3168         } else
3169                 /* clear the registry */
3170                 memset(o->registry.aprox_match.vec, 0,
3171                        sizeof(o->registry.aprox_match.vec));
3172
3173         /* We are done */
3174         r->clear_pending(r);
3175
3176         return 0;
3177 }
3178
3179 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3180                                    struct bnx2x_mcast_ramrod_params *p,
3181                                    enum bnx2x_mcast_cmd cmd)
3182 {
3183         struct bnx2x_mcast_obj *o = p->mcast_obj;
3184         int reg_sz = o->get_registry_size(o);
3185
3186         switch (cmd) {
3187         /* DEL command deletes all currently configured MACs */
3188         case BNX2X_MCAST_CMD_DEL:
3189                 o->set_registry_size(o, 0);
3190                 /* Don't break */
3191
3192         /* RESTORE command will restore the entire multicast configuration */
3193         case BNX2X_MCAST_CMD_RESTORE:
3194                 p->mcast_list_len = reg_sz;
3195                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3196                                    cmd, p->mcast_list_len);
3197                 break;
3198
3199         case BNX2X_MCAST_CMD_ADD:
3200         case BNX2X_MCAST_CMD_CONT:
3201                 /* Multicast MACs on 57710 are configured as unicast MACs and
3202                  * there is only a limited number of CAM entries for that
3203                  * matter.
3204                  */
3205                 if (p->mcast_list_len > o->max_cmd_len) {
3206                         BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3207                                   o->max_cmd_len);
3208                         return -EINVAL;
3209                 }
3210                 /* Every configured MAC should be cleared if DEL command is
3211                  * called. Only the last ADD command is relevant as long as
3212                  * every ADD commands overrides the previous configuration.
3213                  */
3214                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3215                 if (p->mcast_list_len > 0)
3216                         o->set_registry_size(o, p->mcast_list_len);
3217
3218                 break;
3219
3220         default:
3221                 BNX2X_ERR("Unknown command: %d\n", cmd);
3222                 return -EINVAL;
3223
3224         }
3225
3226         /* We want to ensure that commands are executed one by one for 57710.
3227          * Therefore each none-empty command will consume o->max_cmd_len.
3228          */
3229         if (p->mcast_list_len)
3230                 o->total_pending_num += o->max_cmd_len;
3231
3232         return 0;
3233 }
3234
3235 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3236                                       struct bnx2x_mcast_ramrod_params *p,
3237                                       int old_num_macs)
3238 {
3239         struct bnx2x_mcast_obj *o = p->mcast_obj;
3240
3241         o->set_registry_size(o, old_num_macs);
3242
3243         /* If current command hasn't been handled yet and we are
3244          * here means that it's meant to be dropped and we have to
3245          * update the number of outstandling MACs accordingly.
3246          */
3247         if (p->mcast_list_len)
3248                 o->total_pending_num -= o->max_cmd_len;
3249 }
3250
3251 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3252                                         struct bnx2x_mcast_obj *o, int idx,
3253                                         union bnx2x_mcast_config_data *cfg_data,
3254                                         enum bnx2x_mcast_cmd cmd)
3255 {
3256         struct bnx2x_raw_obj *r = &o->raw;
3257         struct mac_configuration_cmd *data =
3258                 (struct mac_configuration_cmd *)(r->rdata);
3259
3260         /* copy mac */
3261         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3262                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3263                                       &data->config_table[idx].middle_mac_addr,
3264                                       &data->config_table[idx].lsb_mac_addr,
3265                                       cfg_data->mac);
3266
3267                 data->config_table[idx].vlan_id = 0;
3268                 data->config_table[idx].pf_id = r->func_id;
3269                 data->config_table[idx].clients_bit_vector =
3270                         cpu_to_le32(1 << r->cl_id);
3271
3272                 SET_FLAG(data->config_table[idx].flags,
3273                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3274                          T_ETH_MAC_COMMAND_SET);
3275         }
3276 }
3277
3278 /**
3279  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3280  *
3281  * @bp:         device handle
3282  * @p:
3283  * @len:        number of rules to handle
3284  */
3285 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3286                                         struct bnx2x_mcast_ramrod_params *p,
3287                                         u8 len)
3288 {
3289         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3290         struct mac_configuration_cmd *data =
3291                 (struct mac_configuration_cmd *)(r->rdata);
3292
3293         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3294                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3295                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3296
3297         data->hdr.offset = offset;
3298         data->hdr.client_id = cpu_to_le16(0xff);
3299         data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3300                                      (BNX2X_FILTER_MCAST_PENDING <<
3301                                       BNX2X_SWCID_SHIFT));
3302         data->hdr.length = len;
3303 }
3304
3305 /**
3306  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3307  *
3308  * @bp:         device handle
3309  * @o:
3310  * @start_idx:  index in the registry to start from
3311  * @rdata_idx:  index in the ramrod data to start from
3312  *
3313  * restore command for 57710 is like all other commands - always a stand alone
3314  * command - start_idx and rdata_idx will always be 0. This function will always
3315  * succeed.
3316  * returns -1 to comply with 57712 variant.
3317  */
3318 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3319         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3320         int *rdata_idx)
3321 {
3322         struct bnx2x_mcast_mac_elem *elem;
3323         int i = 0;
3324         union bnx2x_mcast_config_data cfg_data = {NULL};
3325
3326         /* go through the registry and configure the MACs from it. */
3327         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3328                 cfg_data.mac = &elem->mac[0];
3329                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3330
3331                 i++;
3332
3333                   DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3334                      cfg_data.mac);
3335         }
3336
3337         *rdata_idx = i;
3338
3339         return -1;
3340 }
3341
3342
3343 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3344         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3345 {
3346         struct bnx2x_pending_mcast_cmd *cmd_pos;
3347         struct bnx2x_mcast_mac_elem *pmac_pos;
3348         struct bnx2x_mcast_obj *o = p->mcast_obj;
3349         union bnx2x_mcast_config_data cfg_data = {NULL};
3350         int cnt = 0;
3351
3352
3353         /* If nothing to be done - return */
3354         if (list_empty(&o->pending_cmds_head))
3355                 return 0;
3356
3357         /* Handle the first command */
3358         cmd_pos = list_first_entry(&o->pending_cmds_head,
3359                                    struct bnx2x_pending_mcast_cmd, link);
3360
3361         switch (cmd_pos->type) {
3362         case BNX2X_MCAST_CMD_ADD:
3363                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3364                         cfg_data.mac = &pmac_pos->mac[0];
3365                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3366
3367                         cnt++;
3368
3369                         DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3370                            pmac_pos->mac);
3371                 }
3372                 break;
3373
3374         case BNX2X_MCAST_CMD_DEL:
3375                 cnt = cmd_pos->data.macs_num;
3376                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3377                 break;
3378
3379         case BNX2X_MCAST_CMD_RESTORE:
3380                 o->hdl_restore(bp, o, 0, &cnt);
3381                 break;
3382
3383         default:
3384                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3385                 return -EINVAL;
3386         }
3387
3388         list_del(&cmd_pos->link);
3389         kfree(cmd_pos);
3390
3391         return cnt;
3392 }
3393
3394 /**
3395  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3396  *
3397  * @fw_hi:
3398  * @fw_mid:
3399  * @fw_lo:
3400  * @mac:
3401  */
3402 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3403                                          __le16 *fw_lo, u8 *mac)
3404 {
3405         mac[1] = ((u8 *)fw_hi)[0];
3406         mac[0] = ((u8 *)fw_hi)[1];
3407         mac[3] = ((u8 *)fw_mid)[0];
3408         mac[2] = ((u8 *)fw_mid)[1];
3409         mac[5] = ((u8 *)fw_lo)[0];
3410         mac[4] = ((u8 *)fw_lo)[1];
3411 }
3412
3413 /**
3414  * bnx2x_mcast_refresh_registry_e1 -
3415  *
3416  * @bp:         device handle
3417  * @cnt:
3418  *
3419  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3420  * and update the registry correspondingly: if ADD - allocate a memory and add
3421  * the entries to the registry (list), if DELETE - clear the registry and free
3422  * the memory.
3423  */
3424 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3425                                                   struct bnx2x_mcast_obj *o)
3426 {
3427         struct bnx2x_raw_obj *raw = &o->raw;
3428         struct bnx2x_mcast_mac_elem *elem;
3429         struct mac_configuration_cmd *data =
3430                         (struct mac_configuration_cmd *)(raw->rdata);
3431
3432         /* If first entry contains a SET bit - the command was ADD,
3433          * otherwise - DEL_ALL
3434          */
3435         if (GET_FLAG(data->config_table[0].flags,
3436                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3437                 int i, len = data->hdr.length;
3438
3439                 /* Break if it was a RESTORE command */
3440                 if (!list_empty(&o->registry.exact_match.macs))
3441                         return 0;
3442
3443                 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3444                 if (!elem) {
3445                         BNX2X_ERR("Failed to allocate registry memory\n");
3446                         return -ENOMEM;
3447                 }
3448
3449                 for (i = 0; i < len; i++, elem++) {
3450                         bnx2x_get_fw_mac_addr(
3451                                 &data->config_table[i].msb_mac_addr,
3452                                 &data->config_table[i].middle_mac_addr,
3453                                 &data->config_table[i].lsb_mac_addr,
3454                                 elem->mac);
3455                         DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3456                            elem->mac);
3457                         list_add_tail(&elem->link,
3458                                       &o->registry.exact_match.macs);
3459                 }
3460         } else {
3461                 elem = list_first_entry(&o->registry.exact_match.macs,
3462                                         struct bnx2x_mcast_mac_elem, link);
3463                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3464                 kfree(elem);
3465                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3466         }
3467
3468         return 0;
3469 }
3470
3471 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3472                                 struct bnx2x_mcast_ramrod_params *p,
3473                                 enum bnx2x_mcast_cmd cmd)
3474 {
3475         struct bnx2x_mcast_obj *o = p->mcast_obj;
3476         struct bnx2x_raw_obj *raw = &o->raw;
3477         struct mac_configuration_cmd *data =
3478                 (struct mac_configuration_cmd *)(raw->rdata);
3479         int cnt = 0, i, rc;
3480
3481         /* Reset the ramrod data buffer */
3482         memset(data, 0, sizeof(*data));
3483
3484         /* First set all entries as invalid */
3485         for (i = 0; i < o->max_cmd_len ; i++)
3486                 SET_FLAG(data->config_table[i].flags,
3487                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3488                          T_ETH_MAC_COMMAND_INVALIDATE);
3489
3490         /* Handle pending commands first */
3491         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3492
3493         /* If there are no more pending commands - clear SCHEDULED state */
3494         if (list_empty(&o->pending_cmds_head))
3495                 o->clear_sched(o);
3496
3497         /* The below may be true iff there were no pending commands */
3498         if (!cnt)
3499                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3500
3501         /* For 57710 every command has o->max_cmd_len length to ensure that
3502          * commands are done one at a time.
3503          */
3504         o->total_pending_num -= o->max_cmd_len;
3505
3506         /* send a ramrod */
3507
3508         WARN_ON(cnt > o->max_cmd_len);
3509
3510         /* Set ramrod header (in particular, a number of entries to update) */
3511         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3512
3513         /* update a registry: we need the registry contents to be always up
3514          * to date in order to be able to execute a RESTORE opcode. Here
3515          * we use the fact that for 57710 we sent one command at a time
3516          * hence we may take the registry update out of the command handling
3517          * and do it in a simpler way here.
3518          */
3519         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3520         if (rc)
3521                 return rc;
3522
3523         /*
3524          * If CLEAR_ONLY was requested - don't send a ramrod and clear
3525          * RAMROD_PENDING status immediately.
3526          */
3527         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3528                 raw->clear_pending(raw);
3529                 return 0;
3530         } else {
3531                 /*
3532                  *  No need for an explicit memory barrier here as long we would
3533                  *  need to ensure the ordering of writing to the SPQ element
3534                  *  and updating of the SPQ producer which involves a memory
3535                  *  read and we will have to put a full memory barrier there
3536                  *  (inside bnx2x_sp_post()).
3537                  */
3538
3539                 /* Send a ramrod */
3540                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3541                                    U64_HI(raw->rdata_mapping),
3542                                    U64_LO(raw->rdata_mapping),
3543                                    ETH_CONNECTION_TYPE);
3544                 if (rc)
3545                         return rc;
3546
3547                 /* Ramrod completion is pending */
3548                 return 1;
3549         }
3550
3551 }
3552
3553 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3554 {
3555         return o->registry.exact_match.num_macs_set;
3556 }
3557
3558 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3559 {
3560         return o->registry.aprox_match.num_bins_set;
3561 }
3562
3563 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3564                                                 int n)
3565 {
3566         o->registry.exact_match.num_macs_set = n;
3567 }
3568
3569 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3570                                                 int n)
3571 {
3572         o->registry.aprox_match.num_bins_set = n;
3573 }
3574
3575 int bnx2x_config_mcast(struct bnx2x *bp,
3576                        struct bnx2x_mcast_ramrod_params *p,
3577                        enum bnx2x_mcast_cmd cmd)
3578 {
3579         struct bnx2x_mcast_obj *o = p->mcast_obj;
3580         struct bnx2x_raw_obj *r = &o->raw;
3581         int rc = 0, old_reg_size;
3582
3583         /* This is needed to recover number of currently configured mcast macs
3584          * in case of failure.
3585          */
3586         old_reg_size = o->get_registry_size(o);
3587
3588         /* Do some calculations and checks */
3589         rc = o->validate(bp, p, cmd);
3590         if (rc)
3591                 return rc;
3592
3593         /* Return if there is no work to do */
3594         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3595                 return 0;
3596
3597         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3598            o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3599
3600         /* Enqueue the current command to the pending list if we can't complete
3601          * it in the current iteration
3602          */
3603         if (r->check_pending(r) ||
3604             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3605                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3606                 if (rc < 0)
3607                         goto error_exit1;
3608
3609                 /* As long as the current command is in a command list we
3610                  * don't need to handle it separately.
3611                  */
3612                 p->mcast_list_len = 0;
3613         }
3614
3615         if (!r->check_pending(r)) {
3616
3617                 /* Set 'pending' state */
3618                 r->set_pending(r);
3619
3620                 /* Configure the new classification in the chip */
3621                 rc = o->config_mcast(bp, p, cmd);
3622                 if (rc < 0)
3623                         goto error_exit2;
3624
3625                 /* Wait for a ramrod completion if was requested */
3626                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3627                         rc = o->wait_comp(bp, o);
3628         }
3629
3630         return rc;
3631
3632 error_exit2:
3633         r->clear_pending(r);
3634
3635 error_exit1:
3636         o->revert(bp, p, old_reg_size);
3637
3638         return rc;
3639 }
3640
3641 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3642 {
3643         smp_mb__before_clear_bit();
3644         clear_bit(o->sched_state, o->raw.pstate);
3645         smp_mb__after_clear_bit();
3646 }
3647
3648 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3649 {
3650         smp_mb__before_clear_bit();
3651         set_bit(o->sched_state, o->raw.pstate);
3652         smp_mb__after_clear_bit();
3653 }
3654
3655 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3656 {
3657         return !!test_bit(o->sched_state, o->raw.pstate);
3658 }
3659
3660 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3661 {
3662         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3663 }
3664
3665 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3666                           struct bnx2x_mcast_obj *mcast_obj,
3667                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3668                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3669                           int state, unsigned long *pstate, bnx2x_obj_type type)
3670 {
3671         memset(mcast_obj, 0, sizeof(*mcast_obj));
3672
3673         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3674                            rdata, rdata_mapping, state, pstate, type);
3675
3676         mcast_obj->engine_id = engine_id;
3677
3678         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3679
3680         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3681         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3682         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3683         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3684
3685         if (CHIP_IS_E1(bp)) {
3686                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3687                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3688                 mcast_obj->hdl_restore       =
3689                         bnx2x_mcast_handle_restore_cmd_e1;
3690                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3691
3692                 if (CHIP_REV_IS_SLOW(bp))
3693                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3694                 else
3695                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3696
3697                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3698                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3699                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3700                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3701                 mcast_obj->get_registry_size =
3702                         bnx2x_mcast_get_registry_size_exact;
3703                 mcast_obj->set_registry_size =
3704                         bnx2x_mcast_set_registry_size_exact;
3705
3706                 /* 57710 is the only chip that uses the exact match for mcast
3707                  * at the moment.
3708                  */
3709                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3710
3711         } else if (CHIP_IS_E1H(bp)) {
3712                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3713                 mcast_obj->enqueue_cmd   = NULL;
3714                 mcast_obj->hdl_restore   = NULL;
3715                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3716
3717                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3718                  * for one command.
3719                  */
3720                 mcast_obj->max_cmd_len       = -1;
3721                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3722                 mcast_obj->set_one_rule      = NULL;
3723                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3724                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3725                 mcast_obj->get_registry_size =
3726                         bnx2x_mcast_get_registry_size_aprox;
3727                 mcast_obj->set_registry_size =
3728                         bnx2x_mcast_set_registry_size_aprox;
3729         } else {
3730                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3731                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3732                 mcast_obj->hdl_restore       =
3733                         bnx2x_mcast_handle_restore_cmd_e2;
3734                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3735                 /* TODO: There should be a proper HSI define for this number!!!
3736                  */
3737                 mcast_obj->max_cmd_len       = 16;
3738                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3739                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3740                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3741                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3742                 mcast_obj->get_registry_size =
3743                         bnx2x_mcast_get_registry_size_aprox;
3744                 mcast_obj->set_registry_size =
3745                         bnx2x_mcast_set_registry_size_aprox;
3746         }
3747 }
3748
3749 /*************************** Credit handling **********************************/
3750
3751 /**
3752  * atomic_add_ifless - add if the result is less than a given value.
3753  *
3754  * @v:  pointer of type atomic_t
3755  * @a:  the amount to add to v...
3756  * @u:  ...if (v + a) is less than u.
3757  *
3758  * returns true if (v + a) was less than u, and false otherwise.
3759  *
3760  */
3761 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3762 {
3763         int c, old;
3764
3765         c = atomic_read(v);
3766         for (;;) {
3767                 if (unlikely(c + a >= u))
3768                         return false;
3769
3770                 old = atomic_cmpxchg((v), c, c + a);
3771                 if (likely(old == c))
3772                         break;
3773                 c = old;
3774         }
3775
3776         return true;
3777 }
3778
3779 /**
3780  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3781  *
3782  * @v:  pointer of type atomic_t
3783  * @a:  the amount to dec from v...
3784  * @u:  ...if (v - a) is more or equal than u.
3785  *
3786  * returns true if (v - a) was more or equal than u, and false
3787  * otherwise.
3788  */
3789 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3790 {
3791         int c, old;
3792
3793         c = atomic_read(v);
3794         for (;;) {
3795                 if (unlikely(c - a < u))
3796                         return false;
3797
3798                 old = atomic_cmpxchg((v), c, c - a);
3799                 if (likely(old == c))
3800                         break;
3801                 c = old;
3802         }
3803
3804         return true;
3805 }
3806
3807 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3808 {
3809         bool rc;
3810
3811         smp_mb();
3812         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3813         smp_mb();
3814
3815         return rc;
3816 }
3817
3818 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3819 {
3820         bool rc;
3821
3822         smp_mb();
3823
3824         /* Don't let to refill if credit + cnt > pool_sz */
3825         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3826
3827         smp_mb();
3828
3829         return rc;
3830 }
3831
3832 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3833 {
3834         int cur_credit;
3835
3836         smp_mb();
3837         cur_credit = atomic_read(&o->credit);
3838
3839         return cur_credit;
3840 }
3841
3842 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3843                                           int cnt)
3844 {
3845         return true;
3846 }
3847
3848
3849 static bool bnx2x_credit_pool_get_entry(
3850         struct bnx2x_credit_pool_obj *o,
3851         int *offset)
3852 {
3853         int idx, vec, i;
3854
3855         *offset = -1;
3856
3857         /* Find "internal cam-offset" then add to base for this object... */
3858         for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3859
3860                 /* Skip the current vector if there are no free entries in it */
3861                 if (!o->pool_mirror[vec])
3862                         continue;
3863
3864                 /* If we've got here we are going to find a free entry */
3865                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3866                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
3867
3868                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3869                                 /* Got one!! */
3870                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3871                                 *offset = o->base_pool_offset + idx;
3872                                 return true;
3873                         }
3874         }
3875
3876         return false;
3877 }
3878
3879 static bool bnx2x_credit_pool_put_entry(
3880         struct bnx2x_credit_pool_obj *o,
3881         int offset)
3882 {
3883         if (offset < o->base_pool_offset)
3884                 return false;
3885
3886         offset -= o->base_pool_offset;
3887
3888         if (offset >= o->pool_sz)
3889                 return false;
3890
3891         /* Return the entry to the pool */
3892         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3893
3894         return true;
3895 }
3896
3897 static bool bnx2x_credit_pool_put_entry_always_true(
3898         struct bnx2x_credit_pool_obj *o,
3899         int offset)
3900 {
3901         return true;
3902 }
3903
3904 static bool bnx2x_credit_pool_get_entry_always_true(
3905         struct bnx2x_credit_pool_obj *o,
3906         int *offset)
3907 {
3908         *offset = -1;
3909         return true;
3910 }
3911 /**
3912  * bnx2x_init_credit_pool - initialize credit pool internals.
3913  *
3914  * @p:
3915  * @base:       Base entry in the CAM to use.
3916  * @credit:     pool size.
3917  *
3918  * If base is negative no CAM entries handling will be performed.
3919  * If credit is negative pool operations will always succeed (unlimited pool).
3920  *
3921  */
3922 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3923                                           int base, int credit)
3924 {
3925         /* Zero the object first */
3926         memset(p, 0, sizeof(*p));
3927
3928         /* Set the table to all 1s */
3929         memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3930
3931         /* Init a pool as full */
3932         atomic_set(&p->credit, credit);
3933
3934         /* The total poll size */
3935         p->pool_sz = credit;
3936
3937         p->base_pool_offset = base;
3938
3939         /* Commit the change */
3940         smp_mb();
3941
3942         p->check = bnx2x_credit_pool_check;
3943
3944         /* if pool credit is negative - disable the checks */
3945         if (credit >= 0) {
3946                 p->put      = bnx2x_credit_pool_put;
3947                 p->get      = bnx2x_credit_pool_get;
3948                 p->put_entry = bnx2x_credit_pool_put_entry;
3949                 p->get_entry = bnx2x_credit_pool_get_entry;
3950         } else {
3951                 p->put      = bnx2x_credit_pool_always_true;
3952                 p->get      = bnx2x_credit_pool_always_true;
3953                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3954                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3955         }
3956
3957         /* If base is negative - disable entries handling */
3958         if (base < 0) {
3959                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3960                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3961         }
3962 }
3963
3964 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3965                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
3966                                 u8 func_num)
3967 {
3968 /* TODO: this will be defined in consts as well... */
3969 #define BNX2X_CAM_SIZE_EMUL 5
3970
3971         int cam_sz;
3972
3973         if (CHIP_IS_E1(bp)) {
3974                 /* In E1, Multicast is saved in cam... */
3975                 if (!CHIP_REV_IS_SLOW(bp))
3976                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3977                 else
3978                         cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3979
3980                 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3981
3982         } else if (CHIP_IS_E1H(bp)) {
3983                 /* CAM credit is equaly divided between all active functions
3984                  * on the PORT!.
3985                  */
3986                 if ((func_num > 0)) {
3987                         if (!CHIP_REV_IS_SLOW(bp))
3988                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3989                         else
3990                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3991                         bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3992                 } else {
3993                         /* this should never happen! Block MAC operations. */
3994                         bnx2x_init_credit_pool(p, 0, 0);
3995                 }
3996
3997         } else {
3998
3999                 /*
4000                  * CAM credit is equaly divided between all active functions
4001                  * on the PATH.
4002                  */
4003                 if ((func_num > 0)) {
4004                         if (!CHIP_REV_IS_SLOW(bp))
4005                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4006                         else
4007                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
4008
4009                         /*
4010                          * No need for CAM entries handling for 57712 and
4011                          * newer.
4012                          */
4013                         bnx2x_init_credit_pool(p, -1, cam_sz);
4014                 } else {
4015                         /* this should never happen! Block MAC operations. */
4016                         bnx2x_init_credit_pool(p, 0, 0);
4017                 }
4018
4019         }
4020 }
4021
4022 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4023                                  struct bnx2x_credit_pool_obj *p,
4024                                  u8 func_id,
4025                                  u8 func_num)
4026 {
4027         if (CHIP_IS_E1x(bp)) {
4028                 /*
4029                  * There is no VLAN credit in HW on 57710 and 57711 only
4030                  * MAC / MAC-VLAN can be set
4031                  */
4032                 bnx2x_init_credit_pool(p, 0, -1);
4033         } else {
4034                 /*
4035                  * CAM credit is equaly divided between all active functions
4036                  * on the PATH.
4037                  */
4038                 if (func_num > 0) {
4039                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
4040                         bnx2x_init_credit_pool(p, func_id * credit, credit);
4041                 } else
4042                         /* this should never happen! Block VLAN operations. */
4043                         bnx2x_init_credit_pool(p, 0, 0);
4044         }
4045 }
4046
4047 /****************** RSS Configuration ******************/
4048 /**
4049  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4050  *
4051  * @bp:         driver hanlde
4052  * @p:          pointer to rss configuration
4053  *
4054  * Prints it when NETIF_MSG_IFUP debug level is configured.
4055  */
4056 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4057                                         struct bnx2x_config_rss_params *p)
4058 {
4059         int i;
4060
4061         DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4062         DP(BNX2X_MSG_SP, "0x0000: ");
4063         for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4064                 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4065
4066                 /* Print 4 bytes in a line */
4067                 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4068                     (((i + 1) & 0x3) == 0)) {
4069                         DP_CONT(BNX2X_MSG_SP, "\n");
4070                         DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4071                 }
4072         }
4073
4074         DP_CONT(BNX2X_MSG_SP, "\n");
4075 }
4076
4077 /**
4078  * bnx2x_setup_rss - configure RSS
4079  *
4080  * @bp:         device handle
4081  * @p:          rss configuration
4082  *
4083  * sends on UPDATE ramrod for that matter.
4084  */
4085 static int bnx2x_setup_rss(struct bnx2x *bp,
4086                            struct bnx2x_config_rss_params *p)
4087 {
4088         struct bnx2x_rss_config_obj *o = p->rss_obj;
4089         struct bnx2x_raw_obj *r = &o->raw;
4090         struct eth_rss_update_ramrod_data *data =
4091                 (struct eth_rss_update_ramrod_data *)(r->rdata);
4092         u8 rss_mode = 0;
4093         int rc;
4094
4095         memset(data, 0, sizeof(*data));
4096
4097         DP(BNX2X_MSG_SP, "Configuring RSS\n");
4098
4099         /* Set an echo field */
4100         data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4101                                  (r->state << BNX2X_SWCID_SHIFT));
4102
4103         /* RSS mode */
4104         if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4105                 rss_mode = ETH_RSS_MODE_DISABLED;
4106         else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4107                 rss_mode = ETH_RSS_MODE_REGULAR;
4108
4109         data->rss_mode = rss_mode;
4110
4111         DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4112
4113         /* RSS capabilities */
4114         if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4115                 data->capabilities |=
4116                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4117
4118         if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4119                 data->capabilities |=
4120                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4121
4122         if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4123                 data->capabilities |=
4124                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4125
4126         if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4127                 data->capabilities |=
4128                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4129
4130         if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4131                 data->capabilities |=
4132                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4133
4134         if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4135                 data->capabilities |=
4136                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4137
4138         /* Hashing mask */
4139         data->rss_result_mask = p->rss_result_mask;
4140
4141         /* RSS engine ID */
4142         data->rss_engine_id = o->engine_id;
4143
4144         DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4145
4146         /* Indirection table */
4147         memcpy(data->indirection_table, p->ind_table,
4148                   T_ETH_INDIRECTION_TABLE_SIZE);
4149
4150         /* Remember the last configuration */
4151         memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4152
4153         /* Print the indirection table */
4154         if (netif_msg_ifup(bp))
4155                 bnx2x_debug_print_ind_table(bp, p);
4156
4157         /* RSS keys */
4158         if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4159                 memcpy(&data->rss_key[0], &p->rss_key[0],
4160                        sizeof(data->rss_key));
4161                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4162         }
4163
4164         /*
4165          *  No need for an explicit memory barrier here as long we would
4166          *  need to ensure the ordering of writing to the SPQ element
4167          *  and updating of the SPQ producer which involves a memory
4168          *  read and we will have to put a full memory barrier there
4169          *  (inside bnx2x_sp_post()).
4170          */
4171
4172         /* Send a ramrod */
4173         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4174                            U64_HI(r->rdata_mapping),
4175                            U64_LO(r->rdata_mapping),
4176                            ETH_CONNECTION_TYPE);
4177
4178         if (rc < 0)
4179                 return rc;
4180
4181         return 1;
4182 }
4183
4184 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4185                              u8 *ind_table)
4186 {
4187         memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4188 }
4189
4190 int bnx2x_config_rss(struct bnx2x *bp,
4191                      struct bnx2x_config_rss_params *p)
4192 {
4193         int rc;
4194         struct bnx2x_rss_config_obj *o = p->rss_obj;
4195         struct bnx2x_raw_obj *r = &o->raw;
4196
4197         /* Do nothing if only driver cleanup was requested */
4198         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4199                 return 0;
4200
4201         r->set_pending(r);
4202
4203         rc = o->config_rss(bp, p);
4204         if (rc < 0) {
4205                 r->clear_pending(r);
4206                 return rc;
4207         }
4208
4209         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4210                 rc = r->wait_comp(bp, r);
4211
4212         return rc;
4213 }
4214
4215
4216 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4217                                struct bnx2x_rss_config_obj *rss_obj,
4218                                u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4219                                void *rdata, dma_addr_t rdata_mapping,
4220                                int state, unsigned long *pstate,
4221                                bnx2x_obj_type type)
4222 {
4223         bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4224                            rdata_mapping, state, pstate, type);
4225
4226         rss_obj->engine_id  = engine_id;
4227         rss_obj->config_rss = bnx2x_setup_rss;
4228 }
4229
4230 /********************** Queue state object ***********************************/
4231
4232 /**
4233  * bnx2x_queue_state_change - perform Queue state change transition
4234  *
4235  * @bp:         device handle
4236  * @params:     parameters to perform the transition
4237  *
4238  * returns 0 in case of successfully completed transition, negative error
4239  * code in case of failure, positive (EBUSY) value if there is a completion
4240  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4241  * not set in params->ramrod_flags for asynchronous commands).
4242  *
4243  */
4244 int bnx2x_queue_state_change(struct bnx2x *bp,
4245                              struct bnx2x_queue_state_params *params)
4246 {
4247         struct bnx2x_queue_sp_obj *o = params->q_obj;
4248         int rc, pending_bit;
4249         unsigned long *pending = &o->pending;
4250
4251         /* Check that the requested transition is legal */
4252         rc = o->check_transition(bp, o, params);
4253         if (rc) {
4254                 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4255                 return -EINVAL;
4256         }
4257
4258         /* Set "pending" bit */
4259         DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4260         pending_bit = o->set_pending(o, params);
4261         DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4262
4263         /* Don't send a command if only driver cleanup was requested */
4264         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4265                 o->complete_cmd(bp, o, pending_bit);
4266         else {
4267                 /* Send a ramrod */
4268                 rc = o->send_cmd(bp, params);
4269                 if (rc) {
4270                         o->next_state = BNX2X_Q_STATE_MAX;
4271                         clear_bit(pending_bit, pending);
4272                         smp_mb__after_clear_bit();
4273                         return rc;
4274                 }
4275
4276                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4277                         rc = o->wait_comp(bp, o, pending_bit);
4278                         if (rc)
4279                                 return rc;
4280
4281                         return 0;
4282                 }
4283         }
4284
4285         return !!test_bit(pending_bit, pending);
4286 }
4287
4288
4289 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4290                                    struct bnx2x_queue_state_params *params)
4291 {
4292         enum bnx2x_queue_cmd cmd = params->cmd, bit;
4293
4294         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4295          * UPDATE command.
4296          */
4297         if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4298             (cmd == BNX2X_Q_CMD_DEACTIVATE))
4299                 bit = BNX2X_Q_CMD_UPDATE;
4300         else
4301                 bit = cmd;
4302
4303         set_bit(bit, &obj->pending);
4304         return bit;
4305 }
4306
4307 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4308                                  struct bnx2x_queue_sp_obj *o,
4309                                  enum bnx2x_queue_cmd cmd)
4310 {
4311         return bnx2x_state_wait(bp, cmd, &o->pending);
4312 }
4313
4314 /**
4315  * bnx2x_queue_comp_cmd - complete the state change command.
4316  *
4317  * @bp:         device handle
4318  * @o:
4319  * @cmd:
4320  *
4321  * Checks that the arrived completion is expected.
4322  */
4323 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4324                                 struct bnx2x_queue_sp_obj *o,
4325                                 enum bnx2x_queue_cmd cmd)
4326 {
4327         unsigned long cur_pending = o->pending;
4328
4329         if (!test_and_clear_bit(cmd, &cur_pending)) {
4330                 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4331                           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4332                           o->state, cur_pending, o->next_state);
4333                 return -EINVAL;
4334         }
4335
4336         if (o->next_tx_only >= o->max_cos)
4337                 /* >= becuase tx only must always be smaller than cos since the
4338                  * primary connection supports COS 0
4339                  */
4340                 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4341                            o->next_tx_only, o->max_cos);
4342
4343         DP(BNX2X_MSG_SP,
4344            "Completing command %d for queue %d, setting state to %d\n",
4345            cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4346
4347         if (o->next_tx_only)  /* print num tx-only if any exist */
4348                 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4349                    o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4350
4351         o->state = o->next_state;
4352         o->num_tx_only = o->next_tx_only;
4353         o->next_state = BNX2X_Q_STATE_MAX;
4354
4355         /* It's important that o->state and o->next_state are
4356          * updated before o->pending.
4357          */
4358         wmb();
4359
4360         clear_bit(cmd, &o->pending);
4361         smp_mb__after_clear_bit();
4362
4363         return 0;
4364 }
4365
4366 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4367                                 struct bnx2x_queue_state_params *cmd_params,
4368                                 struct client_init_ramrod_data *data)
4369 {
4370         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4371
4372         /* Rx data */
4373
4374         /* IPv6 TPA supported for E2 and above only */
4375         data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4376                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4377 }
4378
4379 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4380                                 struct bnx2x_queue_sp_obj *o,
4381                                 struct bnx2x_general_setup_params *params,
4382                                 struct client_init_general_data *gen_data,
4383                                 unsigned long *flags)
4384 {
4385         gen_data->client_id = o->cl_id;
4386
4387         if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4388                 gen_data->statistics_counter_id =
4389                                         params->stat_id;
4390                 gen_data->statistics_en_flg = 1;
4391                 gen_data->statistics_zero_flg =
4392                         test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4393         } else
4394                 gen_data->statistics_counter_id =
4395                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4396
4397         gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4398         gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4399         gen_data->sp_client_id = params->spcl_id;
4400         gen_data->mtu = cpu_to_le16(params->mtu);
4401         gen_data->func_id = o->func_id;
4402
4403
4404         gen_data->cos = params->cos;
4405
4406         gen_data->traffic_type =
4407                 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4408                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4409
4410         DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4411            gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4412 }
4413
4414 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4415                                 struct bnx2x_txq_setup_params *params,
4416                                 struct client_init_tx_data *tx_data,
4417                                 unsigned long *flags)
4418 {
4419         tx_data->enforce_security_flg =
4420                 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4421         tx_data->default_vlan =
4422                 cpu_to_le16(params->default_vlan);
4423         tx_data->default_vlan_flg =
4424                 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4425         tx_data->tx_switching_flg =
4426                 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4427         tx_data->anti_spoofing_flg =
4428                 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4429         tx_data->force_default_pri_flg =
4430                 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4431
4432         tx_data->tunnel_lso_inc_ip_id =
4433                 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4434         tx_data->tunnel_non_lso_pcsum_location =
4435                 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4436                                                                   PCSUM_ON_BD;
4437
4438         tx_data->tx_status_block_id = params->fw_sb_id;
4439         tx_data->tx_sb_index_number = params->sb_cq_index;
4440         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4441
4442         tx_data->tx_bd_page_base.lo =
4443                 cpu_to_le32(U64_LO(params->dscr_map));
4444         tx_data->tx_bd_page_base.hi =
4445                 cpu_to_le32(U64_HI(params->dscr_map));
4446
4447         /* Don't configure any Tx switching mode during queue SETUP */
4448         tx_data->state = 0;
4449 }
4450
4451 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4452                                 struct rxq_pause_params *params,
4453                                 struct client_init_rx_data *rx_data)
4454 {
4455         /* flow control data */
4456         rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4457         rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4458         rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4459         rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4460         rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4461         rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4462         rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4463 }
4464
4465 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4466                                 struct bnx2x_rxq_setup_params *params,
4467                                 struct client_init_rx_data *rx_data,
4468                                 unsigned long *flags)
4469 {
4470         rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4471                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4472         rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4473                                 CLIENT_INIT_RX_DATA_TPA_MODE;
4474         rx_data->vmqueue_mode_en_flg = 0;
4475
4476         rx_data->cache_line_alignment_log_size =
4477                 params->cache_line_log;
4478         rx_data->enable_dynamic_hc =
4479                 test_bit(BNX2X_Q_FLG_DHC, flags);
4480         rx_data->max_sges_for_packet = params->max_sges_pkt;
4481         rx_data->client_qzone_id = params->cl_qzone_id;
4482         rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4483
4484         /* Always start in DROP_ALL mode */
4485         rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4486                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4487
4488         /* We don't set drop flags */
4489         rx_data->drop_ip_cs_err_flg = 0;
4490         rx_data->drop_tcp_cs_err_flg = 0;
4491         rx_data->drop_ttl0_flg = 0;
4492         rx_data->drop_udp_cs_err_flg = 0;
4493         rx_data->inner_vlan_removal_enable_flg =
4494                 test_bit(BNX2X_Q_FLG_VLAN, flags);
4495         rx_data->outer_vlan_removal_enable_flg =
4496                 test_bit(BNX2X_Q_FLG_OV, flags);
4497         rx_data->status_block_id = params->fw_sb_id;
4498         rx_data->rx_sb_index_number = params->sb_cq_index;
4499         rx_data->max_tpa_queues = params->max_tpa_queues;
4500         rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4501         rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4502         rx_data->bd_page_base.lo =
4503                 cpu_to_le32(U64_LO(params->dscr_map));
4504         rx_data->bd_page_base.hi =
4505                 cpu_to_le32(U64_HI(params->dscr_map));
4506         rx_data->sge_page_base.lo =
4507                 cpu_to_le32(U64_LO(params->sge_map));
4508         rx_data->sge_page_base.hi =
4509                 cpu_to_le32(U64_HI(params->sge_map));
4510         rx_data->cqe_page_base.lo =
4511                 cpu_to_le32(U64_LO(params->rcq_map));
4512         rx_data->cqe_page_base.hi =
4513                 cpu_to_le32(U64_HI(params->rcq_map));
4514         rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4515
4516         if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4517                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4518                 rx_data->is_approx_mcast = 1;
4519         }
4520
4521         rx_data->rss_engine_id = params->rss_engine_id;
4522
4523         /* silent vlan removal */
4524         rx_data->silent_vlan_removal_flg =
4525                 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4526         rx_data->silent_vlan_value =
4527                 cpu_to_le16(params->silent_removal_value);
4528         rx_data->silent_vlan_mask =
4529                 cpu_to_le16(params->silent_removal_mask);
4530
4531 }
4532
4533 /* initialize the general, tx and rx parts of a queue object */
4534 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4535                                 struct bnx2x_queue_state_params *cmd_params,
4536                                 struct client_init_ramrod_data *data)
4537 {
4538         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4539                                        &cmd_params->params.setup.gen_params,
4540                                        &data->general,
4541                                        &cmd_params->params.setup.flags);
4542
4543         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4544                                   &cmd_params->params.setup.txq_params,
4545                                   &data->tx,
4546                                   &cmd_params->params.setup.flags);
4547
4548         bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4549                                   &cmd_params->params.setup.rxq_params,
4550                                   &data->rx,
4551                                   &cmd_params->params.setup.flags);
4552
4553         bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4554                                      &cmd_params->params.setup.pause_params,
4555                                      &data->rx);
4556 }
4557
4558 /* initialize the general and tx parts of a tx-only queue object */
4559 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4560                                 struct bnx2x_queue_state_params *cmd_params,
4561                                 struct tx_queue_init_ramrod_data *data)
4562 {
4563         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4564                                        &cmd_params->params.tx_only.gen_params,
4565                                        &data->general,
4566                                        &cmd_params->params.tx_only.flags);
4567
4568         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4569                                   &cmd_params->params.tx_only.txq_params,
4570                                   &data->tx,
4571                                   &cmd_params->params.tx_only.flags);
4572
4573         DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4574                          cmd_params->q_obj->cids[0],
4575                          data->tx.tx_bd_page_base.lo,
4576                          data->tx.tx_bd_page_base.hi);
4577 }
4578
4579 /**
4580  * bnx2x_q_init - init HW/FW queue
4581  *
4582  * @bp:         device handle
4583  * @params:
4584  *
4585  * HW/FW initial Queue configuration:
4586  *      - HC: Rx and Tx
4587  *      - CDU context validation
4588  *
4589  */
4590 static inline int bnx2x_q_init(struct bnx2x *bp,
4591                                struct bnx2x_queue_state_params *params)
4592 {
4593         struct bnx2x_queue_sp_obj *o = params->q_obj;
4594         struct bnx2x_queue_init_params *init = &params->params.init;
4595         u16 hc_usec;
4596         u8 cos;
4597
4598         /* Tx HC configuration */
4599         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4600             test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4601                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4602
4603                 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4604                         init->tx.sb_cq_index,
4605                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4606                         hc_usec);
4607         }
4608
4609         /* Rx HC configuration */
4610         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4611             test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4612                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4613
4614                 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4615                         init->rx.sb_cq_index,
4616                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4617                         hc_usec);
4618         }
4619
4620         /* Set CDU context validation values */
4621         for (cos = 0; cos < o->max_cos; cos++) {
4622                 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4623                                  o->cids[cos], cos);
4624                 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4625                 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4626         }
4627
4628         /* As no ramrod is sent, complete the command immediately  */
4629         o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4630
4631         mmiowb();
4632         smp_mb();
4633
4634         return 0;
4635 }
4636
4637 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4638                                         struct bnx2x_queue_state_params *params)
4639 {
4640         struct bnx2x_queue_sp_obj *o = params->q_obj;
4641         struct client_init_ramrod_data *rdata =
4642                 (struct client_init_ramrod_data *)o->rdata;
4643         dma_addr_t data_mapping = o->rdata_mapping;
4644         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4645
4646         /* Clear the ramrod data */
4647         memset(rdata, 0, sizeof(*rdata));
4648
4649         /* Fill the ramrod data */
4650         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4651
4652         /*
4653          *  No need for an explicit memory barrier here as long we would
4654          *  need to ensure the ordering of writing to the SPQ element
4655          *  and updating of the SPQ producer which involves a memory
4656          *  read and we will have to put a full memory barrier there
4657          *  (inside bnx2x_sp_post()).
4658          */
4659
4660         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4661                              U64_HI(data_mapping),
4662                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4663 }
4664
4665 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4666                                         struct bnx2x_queue_state_params *params)
4667 {
4668         struct bnx2x_queue_sp_obj *o = params->q_obj;
4669         struct client_init_ramrod_data *rdata =
4670                 (struct client_init_ramrod_data *)o->rdata;
4671         dma_addr_t data_mapping = o->rdata_mapping;
4672         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4673
4674         /* Clear the ramrod data */
4675         memset(rdata, 0, sizeof(*rdata));
4676
4677         /* Fill the ramrod data */
4678         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4679         bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4680
4681         /*
4682          *  No need for an explicit memory barrier here as long we would
4683          *  need to ensure the ordering of writing to the SPQ element
4684          *  and updating of the SPQ producer which involves a memory
4685          *  read and we will have to put a full memory barrier there
4686          *  (inside bnx2x_sp_post()).
4687          */
4688
4689         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4690                              U64_HI(data_mapping),
4691                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4692 }
4693
4694 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4695                                   struct bnx2x_queue_state_params *params)
4696 {
4697         struct bnx2x_queue_sp_obj *o = params->q_obj;
4698         struct tx_queue_init_ramrod_data *rdata =
4699                 (struct tx_queue_init_ramrod_data *)o->rdata;
4700         dma_addr_t data_mapping = o->rdata_mapping;
4701         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4702         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4703                 &params->params.tx_only;
4704         u8 cid_index = tx_only_params->cid_index;
4705
4706
4707         if (cid_index >= o->max_cos) {
4708                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4709                           o->cl_id, cid_index);
4710                 return -EINVAL;
4711         }
4712
4713         DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4714                          tx_only_params->gen_params.cos,
4715                          tx_only_params->gen_params.spcl_id);
4716
4717         /* Clear the ramrod data */
4718         memset(rdata, 0, sizeof(*rdata));
4719
4720         /* Fill the ramrod data */
4721         bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4722
4723         DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4724                          o->cids[cid_index], rdata->general.client_id,
4725                          rdata->general.sp_client_id, rdata->general.cos);
4726
4727         /*
4728          *  No need for an explicit memory barrier here as long we would
4729          *  need to ensure the ordering of writing to the SPQ element
4730          *  and updating of the SPQ producer which involves a memory
4731          *  read and we will have to put a full memory barrier there
4732          *  (inside bnx2x_sp_post()).
4733          */
4734
4735         return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4736                              U64_HI(data_mapping),
4737                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4738 }
4739
4740 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4741                                      struct bnx2x_queue_sp_obj *obj,
4742                                      struct bnx2x_queue_update_params *params,
4743                                      struct client_update_ramrod_data *data)
4744 {
4745         /* Client ID of the client to update */
4746         data->client_id = obj->cl_id;
4747
4748         /* Function ID of the client to update */
4749         data->func_id = obj->func_id;
4750
4751         /* Default VLAN value */
4752         data->default_vlan = cpu_to_le16(params->def_vlan);
4753
4754         /* Inner VLAN stripping */
4755         data->inner_vlan_removal_enable_flg =
4756                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4757         data->inner_vlan_removal_change_flg =
4758                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4759                          &params->update_flags);
4760
4761         /* Outer VLAN sripping */
4762         data->outer_vlan_removal_enable_flg =
4763                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4764         data->outer_vlan_removal_change_flg =
4765                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4766                          &params->update_flags);
4767
4768         /* Drop packets that have source MAC that doesn't belong to this
4769          * Queue.
4770          */
4771         data->anti_spoofing_enable_flg =
4772                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4773         data->anti_spoofing_change_flg =
4774                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4775
4776         /* Activate/Deactivate */
4777         data->activate_flg =
4778                 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4779         data->activate_change_flg =
4780                 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4781
4782         /* Enable default VLAN */
4783         data->default_vlan_enable_flg =
4784                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4785         data->default_vlan_change_flg =
4786                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4787                          &params->update_flags);
4788
4789         /* silent vlan removal */
4790         data->silent_vlan_change_flg =
4791                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4792                          &params->update_flags);
4793         data->silent_vlan_removal_flg =
4794                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4795         data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4796         data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4797 }
4798
4799 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4800                                       struct bnx2x_queue_state_params *params)
4801 {
4802         struct bnx2x_queue_sp_obj *o = params->q_obj;
4803         struct client_update_ramrod_data *rdata =
4804                 (struct client_update_ramrod_data *)o->rdata;
4805         dma_addr_t data_mapping = o->rdata_mapping;
4806         struct bnx2x_queue_update_params *update_params =
4807                 &params->params.update;
4808         u8 cid_index = update_params->cid_index;
4809
4810         if (cid_index >= o->max_cos) {
4811                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4812                           o->cl_id, cid_index);
4813                 return -EINVAL;
4814         }
4815
4816
4817         /* Clear the ramrod data */
4818         memset(rdata, 0, sizeof(*rdata));
4819
4820         /* Fill the ramrod data */
4821         bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4822
4823         /*
4824          *  No need for an explicit memory barrier here as long we would
4825          *  need to ensure the ordering of writing to the SPQ element
4826          *  and updating of the SPQ producer which involves a memory
4827          *  read and we will have to put a full memory barrier there
4828          *  (inside bnx2x_sp_post()).
4829          */
4830
4831         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4832                              o->cids[cid_index], U64_HI(data_mapping),
4833                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4834 }
4835
4836 /**
4837  * bnx2x_q_send_deactivate - send DEACTIVATE command
4838  *
4839  * @bp:         device handle
4840  * @params:
4841  *
4842  * implemented using the UPDATE command.
4843  */
4844 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4845                                         struct bnx2x_queue_state_params *params)
4846 {
4847         struct bnx2x_queue_update_params *update = &params->params.update;
4848
4849         memset(update, 0, sizeof(*update));
4850
4851         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4852
4853         return bnx2x_q_send_update(bp, params);
4854 }
4855
4856 /**
4857  * bnx2x_q_send_activate - send ACTIVATE command
4858  *
4859  * @bp:         device handle
4860  * @params:
4861  *
4862  * implemented using the UPDATE command.
4863  */
4864 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4865                                         struct bnx2x_queue_state_params *params)
4866 {
4867         struct bnx2x_queue_update_params *update = &params->params.update;
4868
4869         memset(update, 0, sizeof(*update));
4870
4871         __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4872         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4873
4874         return bnx2x_q_send_update(bp, params);
4875 }
4876
4877 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4878                                         struct bnx2x_queue_state_params *params)
4879 {
4880         /* TODO: Not implemented yet. */
4881         return -1;
4882 }
4883
4884 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4885                                     struct bnx2x_queue_state_params *params)
4886 {
4887         struct bnx2x_queue_sp_obj *o = params->q_obj;
4888
4889         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4890                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4891                              ETH_CONNECTION_TYPE);
4892 }
4893
4894 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4895                                        struct bnx2x_queue_state_params *params)
4896 {
4897         struct bnx2x_queue_sp_obj *o = params->q_obj;
4898         u8 cid_idx = params->params.cfc_del.cid_index;
4899
4900         if (cid_idx >= o->max_cos) {
4901                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4902                           o->cl_id, cid_idx);
4903                 return -EINVAL;
4904         }
4905
4906         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4907                              o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4908 }
4909
4910 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4911                                         struct bnx2x_queue_state_params *params)
4912 {
4913         struct bnx2x_queue_sp_obj *o = params->q_obj;
4914         u8 cid_index = params->params.terminate.cid_index;
4915
4916         if (cid_index >= o->max_cos) {
4917                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4918                           o->cl_id, cid_index);
4919                 return -EINVAL;
4920         }
4921
4922         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4923                              o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4924 }
4925
4926 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4927                                      struct bnx2x_queue_state_params *params)
4928 {
4929         struct bnx2x_queue_sp_obj *o = params->q_obj;
4930
4931         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4932                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4933                              ETH_CONNECTION_TYPE);
4934 }
4935
4936 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4937                                         struct bnx2x_queue_state_params *params)
4938 {
4939         switch (params->cmd) {
4940         case BNX2X_Q_CMD_INIT:
4941                 return bnx2x_q_init(bp, params);
4942         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4943                 return bnx2x_q_send_setup_tx_only(bp, params);
4944         case BNX2X_Q_CMD_DEACTIVATE:
4945                 return bnx2x_q_send_deactivate(bp, params);
4946         case BNX2X_Q_CMD_ACTIVATE:
4947                 return bnx2x_q_send_activate(bp, params);
4948         case BNX2X_Q_CMD_UPDATE:
4949                 return bnx2x_q_send_update(bp, params);
4950         case BNX2X_Q_CMD_UPDATE_TPA:
4951                 return bnx2x_q_send_update_tpa(bp, params);
4952         case BNX2X_Q_CMD_HALT:
4953                 return bnx2x_q_send_halt(bp, params);
4954         case BNX2X_Q_CMD_CFC_DEL:
4955                 return bnx2x_q_send_cfc_del(bp, params);
4956         case BNX2X_Q_CMD_TERMINATE:
4957                 return bnx2x_q_send_terminate(bp, params);
4958         case BNX2X_Q_CMD_EMPTY:
4959                 return bnx2x_q_send_empty(bp, params);
4960         default:
4961                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4962                 return -EINVAL;
4963         }
4964 }
4965
4966 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4967                                     struct bnx2x_queue_state_params *params)
4968 {
4969         switch (params->cmd) {
4970         case BNX2X_Q_CMD_SETUP:
4971                 return bnx2x_q_send_setup_e1x(bp, params);
4972         case BNX2X_Q_CMD_INIT:
4973         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4974         case BNX2X_Q_CMD_DEACTIVATE:
4975         case BNX2X_Q_CMD_ACTIVATE:
4976         case BNX2X_Q_CMD_UPDATE:
4977         case BNX2X_Q_CMD_UPDATE_TPA:
4978         case BNX2X_Q_CMD_HALT:
4979         case BNX2X_Q_CMD_CFC_DEL:
4980         case BNX2X_Q_CMD_TERMINATE:
4981         case BNX2X_Q_CMD_EMPTY:
4982                 return bnx2x_queue_send_cmd_cmn(bp, params);
4983         default:
4984                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4985                 return -EINVAL;
4986         }
4987 }
4988
4989 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4990                                    struct bnx2x_queue_state_params *params)
4991 {
4992         switch (params->cmd) {
4993         case BNX2X_Q_CMD_SETUP:
4994                 return bnx2x_q_send_setup_e2(bp, params);
4995         case BNX2X_Q_CMD_INIT:
4996         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4997         case BNX2X_Q_CMD_DEACTIVATE:
4998         case BNX2X_Q_CMD_ACTIVATE:
4999         case BNX2X_Q_CMD_UPDATE:
5000         case BNX2X_Q_CMD_UPDATE_TPA:
5001         case BNX2X_Q_CMD_HALT:
5002         case BNX2X_Q_CMD_CFC_DEL:
5003         case BNX2X_Q_CMD_TERMINATE:
5004         case BNX2X_Q_CMD_EMPTY:
5005                 return bnx2x_queue_send_cmd_cmn(bp, params);
5006         default:
5007                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5008                 return -EINVAL;
5009         }
5010 }
5011
5012 /**
5013  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5014  *
5015  * @bp:         device handle
5016  * @o:
5017  * @params:
5018  *
5019  * (not Forwarding)
5020  * It both checks if the requested command is legal in a current
5021  * state and, if it's legal, sets a `next_state' in the object
5022  * that will be used in the completion flow to set the `state'
5023  * of the object.
5024  *
5025  * returns 0 if a requested command is a legal transition,
5026  *         -EINVAL otherwise.
5027  */
5028 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5029                                       struct bnx2x_queue_sp_obj *o,
5030                                       struct bnx2x_queue_state_params *params)
5031 {
5032         enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5033         enum bnx2x_queue_cmd cmd = params->cmd;
5034         struct bnx2x_queue_update_params *update_params =
5035                  &params->params.update;
5036         u8 next_tx_only = o->num_tx_only;
5037
5038         /*
5039          * Forget all pending for completion commands if a driver only state
5040          * transition has been requested.
5041          */
5042         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5043                 o->pending = 0;
5044                 o->next_state = BNX2X_Q_STATE_MAX;
5045         }
5046
5047         /*
5048          * Don't allow a next state transition if we are in the middle of
5049          * the previous one.
5050          */
5051         if (o->pending) {
5052                 BNX2X_ERR("Blocking transition since pending was %lx\n",
5053                           o->pending);
5054                 return -EBUSY;
5055         }
5056
5057         switch (state) {
5058         case BNX2X_Q_STATE_RESET:
5059                 if (cmd == BNX2X_Q_CMD_INIT)
5060                         next_state = BNX2X_Q_STATE_INITIALIZED;
5061
5062                 break;
5063         case BNX2X_Q_STATE_INITIALIZED:
5064                 if (cmd == BNX2X_Q_CMD_SETUP) {
5065                         if (test_bit(BNX2X_Q_FLG_ACTIVE,
5066                                      &params->params.setup.flags))
5067                                 next_state = BNX2X_Q_STATE_ACTIVE;
5068                         else
5069                                 next_state = BNX2X_Q_STATE_INACTIVE;
5070                 }
5071
5072                 break;
5073         case BNX2X_Q_STATE_ACTIVE:
5074                 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5075                         next_state = BNX2X_Q_STATE_INACTIVE;
5076
5077                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5078                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5079                         next_state = BNX2X_Q_STATE_ACTIVE;
5080
5081                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5082                         next_state = BNX2X_Q_STATE_MULTI_COS;
5083                         next_tx_only = 1;
5084                 }
5085
5086                 else if (cmd == BNX2X_Q_CMD_HALT)
5087                         next_state = BNX2X_Q_STATE_STOPPED;
5088
5089                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5090                         /* If "active" state change is requested, update the
5091                          *  state accordingly.
5092                          */
5093                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5094                                      &update_params->update_flags) &&
5095                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5096                                       &update_params->update_flags))
5097                                 next_state = BNX2X_Q_STATE_INACTIVE;
5098                         else
5099                                 next_state = BNX2X_Q_STATE_ACTIVE;
5100                 }
5101
5102                 break;
5103         case BNX2X_Q_STATE_MULTI_COS:
5104                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5105                         next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5106
5107                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5108                         next_state = BNX2X_Q_STATE_MULTI_COS;
5109                         next_tx_only = o->num_tx_only + 1;
5110                 }
5111
5112                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5113                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5114                         next_state = BNX2X_Q_STATE_MULTI_COS;
5115
5116                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5117                         /* If "active" state change is requested, update the
5118                          *  state accordingly.
5119                          */
5120                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5121                                      &update_params->update_flags) &&
5122                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5123                                       &update_params->update_flags))
5124                                 next_state = BNX2X_Q_STATE_INACTIVE;
5125                         else
5126                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5127                 }
5128
5129                 break;
5130         case BNX2X_Q_STATE_MCOS_TERMINATED:
5131                 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5132                         next_tx_only = o->num_tx_only - 1;
5133                         if (next_tx_only == 0)
5134                                 next_state = BNX2X_Q_STATE_ACTIVE;
5135                         else
5136                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5137                 }
5138
5139                 break;
5140         case BNX2X_Q_STATE_INACTIVE:
5141                 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5142                         next_state = BNX2X_Q_STATE_ACTIVE;
5143
5144                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5145                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5146                         next_state = BNX2X_Q_STATE_INACTIVE;
5147
5148                 else if (cmd == BNX2X_Q_CMD_HALT)
5149                         next_state = BNX2X_Q_STATE_STOPPED;
5150
5151                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5152                         /* If "active" state change is requested, update the
5153                          * state accordingly.
5154                          */
5155                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5156                                      &update_params->update_flags) &&
5157                             test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5158                                      &update_params->update_flags)){
5159                                 if (o->num_tx_only == 0)
5160                                         next_state = BNX2X_Q_STATE_ACTIVE;
5161                                 else /* tx only queues exist for this queue */
5162                                         next_state = BNX2X_Q_STATE_MULTI_COS;
5163                         } else
5164                                 next_state = BNX2X_Q_STATE_INACTIVE;
5165                 }
5166
5167                 break;
5168         case BNX2X_Q_STATE_STOPPED:
5169                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5170                         next_state = BNX2X_Q_STATE_TERMINATED;
5171
5172                 break;
5173         case BNX2X_Q_STATE_TERMINATED:
5174                 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5175                         next_state = BNX2X_Q_STATE_RESET;
5176
5177                 break;
5178         default:
5179                 BNX2X_ERR("Illegal state: %d\n", state);
5180         }
5181
5182         /* Transition is assured */
5183         if (next_state != BNX2X_Q_STATE_MAX) {
5184                 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5185                                  state, cmd, next_state);
5186                 o->next_state = next_state;
5187                 o->next_tx_only = next_tx_only;
5188                 return 0;
5189         }
5190
5191         DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5192
5193         return -EINVAL;
5194 }
5195
5196 void bnx2x_init_queue_obj(struct bnx2x *bp,
5197                           struct bnx2x_queue_sp_obj *obj,
5198                           u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5199                           void *rdata,
5200                           dma_addr_t rdata_mapping, unsigned long type)
5201 {
5202         memset(obj, 0, sizeof(*obj));
5203
5204         /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5205         BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5206
5207         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5208         obj->max_cos = cid_cnt;
5209         obj->cl_id = cl_id;
5210         obj->func_id = func_id;
5211         obj->rdata = rdata;
5212         obj->rdata_mapping = rdata_mapping;
5213         obj->type = type;
5214         obj->next_state = BNX2X_Q_STATE_MAX;
5215
5216         if (CHIP_IS_E1x(bp))
5217                 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5218         else
5219                 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5220
5221         obj->check_transition = bnx2x_queue_chk_transition;
5222
5223         obj->complete_cmd = bnx2x_queue_comp_cmd;
5224         obj->wait_comp = bnx2x_queue_wait_comp;
5225         obj->set_pending = bnx2x_queue_set_pending;
5226 }
5227
5228 /* return a queue object's logical state*/
5229 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5230                                struct bnx2x_queue_sp_obj *obj)
5231 {
5232         switch (obj->state) {
5233         case BNX2X_Q_STATE_ACTIVE:
5234         case BNX2X_Q_STATE_MULTI_COS:
5235                 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5236         case BNX2X_Q_STATE_RESET:
5237         case BNX2X_Q_STATE_INITIALIZED:
5238         case BNX2X_Q_STATE_MCOS_TERMINATED:
5239         case BNX2X_Q_STATE_INACTIVE:
5240         case BNX2X_Q_STATE_STOPPED:
5241         case BNX2X_Q_STATE_TERMINATED:
5242         case BNX2X_Q_STATE_FLRED:
5243                 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5244         default:
5245                 return -EINVAL;
5246         }
5247 }
5248
5249 /********************** Function state object *********************************/
5250 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5251                                            struct bnx2x_func_sp_obj *o)
5252 {
5253         /* in the middle of transaction - return INVALID state */
5254         if (o->pending)
5255                 return BNX2X_F_STATE_MAX;
5256
5257         /*
5258          * unsure the order of reading of o->pending and o->state
5259          * o->pending should be read first
5260          */
5261         rmb();
5262
5263         return o->state;
5264 }
5265
5266 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5267                                 struct bnx2x_func_sp_obj *o,
5268                                 enum bnx2x_func_cmd cmd)
5269 {
5270         return bnx2x_state_wait(bp, cmd, &o->pending);
5271 }
5272
5273 /**
5274  * bnx2x_func_state_change_comp - complete the state machine transition
5275  *
5276  * @bp:         device handle
5277  * @o:
5278  * @cmd:
5279  *
5280  * Called on state change transition. Completes the state
5281  * machine transition only - no HW interaction.
5282  */
5283 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5284                                                struct bnx2x_func_sp_obj *o,
5285                                                enum bnx2x_func_cmd cmd)
5286 {
5287         unsigned long cur_pending = o->pending;
5288
5289         if (!test_and_clear_bit(cmd, &cur_pending)) {
5290                 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5291                           cmd, BP_FUNC(bp), o->state,
5292                           cur_pending, o->next_state);
5293                 return -EINVAL;
5294         }
5295
5296         DP(BNX2X_MSG_SP,
5297            "Completing command %d for func %d, setting state to %d\n",
5298            cmd, BP_FUNC(bp), o->next_state);
5299
5300         o->state = o->next_state;
5301         o->next_state = BNX2X_F_STATE_MAX;
5302
5303         /* It's important that o->state and o->next_state are
5304          * updated before o->pending.
5305          */
5306         wmb();
5307
5308         clear_bit(cmd, &o->pending);
5309         smp_mb__after_clear_bit();
5310
5311         return 0;
5312 }
5313
5314 /**
5315  * bnx2x_func_comp_cmd - complete the state change command
5316  *
5317  * @bp:         device handle
5318  * @o:
5319  * @cmd:
5320  *
5321  * Checks that the arrived completion is expected.
5322  */
5323 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5324                                struct bnx2x_func_sp_obj *o,
5325                                enum bnx2x_func_cmd cmd)
5326 {
5327         /* Complete the state machine part first, check if it's a
5328          * legal completion.
5329          */
5330         int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5331         return rc;
5332 }
5333
5334 /**
5335  * bnx2x_func_chk_transition - perform function state machine transition
5336  *
5337  * @bp:         device handle
5338  * @o:
5339  * @params:
5340  *
5341  * It both checks if the requested command is legal in a current
5342  * state and, if it's legal, sets a `next_state' in the object
5343  * that will be used in the completion flow to set the `state'
5344  * of the object.
5345  *
5346  * returns 0 if a requested command is a legal transition,
5347  *         -EINVAL otherwise.
5348  */
5349 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5350                                      struct bnx2x_func_sp_obj *o,
5351                                      struct bnx2x_func_state_params *params)
5352 {
5353         enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5354         enum bnx2x_func_cmd cmd = params->cmd;
5355
5356         /*
5357          * Forget all pending for completion commands if a driver only state
5358          * transition has been requested.
5359          */
5360         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5361                 o->pending = 0;
5362                 o->next_state = BNX2X_F_STATE_MAX;
5363         }
5364
5365         /*
5366          * Don't allow a next state transition if we are in the middle of
5367          * the previous one.
5368          */
5369         if (o->pending)
5370                 return -EBUSY;
5371
5372         switch (state) {
5373         case BNX2X_F_STATE_RESET:
5374                 if (cmd == BNX2X_F_CMD_HW_INIT)
5375                         next_state = BNX2X_F_STATE_INITIALIZED;
5376
5377                 break;
5378         case BNX2X_F_STATE_INITIALIZED:
5379                 if (cmd == BNX2X_F_CMD_START)
5380                         next_state = BNX2X_F_STATE_STARTED;
5381
5382                 else if (cmd == BNX2X_F_CMD_HW_RESET)
5383                         next_state = BNX2X_F_STATE_RESET;
5384
5385                 break;
5386         case BNX2X_F_STATE_STARTED:
5387                 if (cmd == BNX2X_F_CMD_STOP)
5388                         next_state = BNX2X_F_STATE_INITIALIZED;
5389                 /* afex ramrods can be sent only in started mode, and only
5390                  * if not pending for function_stop ramrod completion
5391                  * for these events - next state remained STARTED.
5392                  */
5393                 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5394                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5395                         next_state = BNX2X_F_STATE_STARTED;
5396
5397                 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5398                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5399                         next_state = BNX2X_F_STATE_STARTED;
5400
5401                 /* Switch_update ramrod can be sent in either started or
5402                  * tx_stopped state, and it doesn't change the state.
5403                  */
5404                 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5405                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5406                         next_state = BNX2X_F_STATE_STARTED;
5407
5408                 else if (cmd == BNX2X_F_CMD_TX_STOP)
5409                         next_state = BNX2X_F_STATE_TX_STOPPED;
5410
5411                 break;
5412         case BNX2X_F_STATE_TX_STOPPED:
5413                 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5414                     (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5415                         next_state = BNX2X_F_STATE_TX_STOPPED;
5416
5417                 else if (cmd == BNX2X_F_CMD_TX_START)
5418                         next_state = BNX2X_F_STATE_STARTED;
5419
5420                 break;
5421         default:
5422                 BNX2X_ERR("Unknown state: %d\n", state);
5423         }
5424
5425         /* Transition is assured */
5426         if (next_state != BNX2X_F_STATE_MAX) {
5427                 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5428                                  state, cmd, next_state);
5429                 o->next_state = next_state;
5430                 return 0;
5431         }
5432
5433         DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5434                          state, cmd);
5435
5436         return -EINVAL;
5437 }
5438
5439 /**
5440  * bnx2x_func_init_func - performs HW init at function stage
5441  *
5442  * @bp:         device handle
5443  * @drv:
5444  *
5445  * Init HW when the current phase is
5446  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5447  * HW blocks.
5448  */
5449 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5450                                        const struct bnx2x_func_sp_drv_ops *drv)
5451 {
5452         return drv->init_hw_func(bp);
5453 }
5454
5455 /**
5456  * bnx2x_func_init_port - performs HW init at port stage
5457  *
5458  * @bp:         device handle
5459  * @drv:
5460  *
5461  * Init HW when the current phase is
5462  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5463  * FUNCTION-only HW blocks.
5464  *
5465  */
5466 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5467                                        const struct bnx2x_func_sp_drv_ops *drv)
5468 {
5469         int rc = drv->init_hw_port(bp);
5470         if (rc)
5471                 return rc;
5472
5473         return bnx2x_func_init_func(bp, drv);
5474 }
5475
5476 /**
5477  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5478  *
5479  * @bp:         device handle
5480  * @drv:
5481  *
5482  * Init HW when the current phase is
5483  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5484  * PORT-only and FUNCTION-only HW blocks.
5485  */
5486 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5487                                         const struct bnx2x_func_sp_drv_ops *drv)
5488 {
5489         int rc = drv->init_hw_cmn_chip(bp);
5490         if (rc)
5491                 return rc;
5492
5493         return bnx2x_func_init_port(bp, drv);
5494 }
5495
5496 /**
5497  * bnx2x_func_init_cmn - performs HW init at common stage
5498  *
5499  * @bp:         device handle
5500  * @drv:
5501  *
5502  * Init HW when the current phase is
5503  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5504  * PORT-only and FUNCTION-only HW blocks.
5505  */
5506 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5507                                       const struct bnx2x_func_sp_drv_ops *drv)
5508 {
5509         int rc = drv->init_hw_cmn(bp);
5510         if (rc)
5511                 return rc;
5512
5513         return bnx2x_func_init_port(bp, drv);
5514 }
5515
5516 static int bnx2x_func_hw_init(struct bnx2x *bp,
5517                               struct bnx2x_func_state_params *params)
5518 {
5519         u32 load_code = params->params.hw_init.load_phase;
5520         struct bnx2x_func_sp_obj *o = params->f_obj;
5521         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5522         int rc = 0;
5523
5524         DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5525                          BP_ABS_FUNC(bp), load_code);
5526
5527         /* Prepare buffers for unzipping the FW */
5528         rc = drv->gunzip_init(bp);
5529         if (rc)
5530                 return rc;
5531
5532         /* Prepare FW */
5533         rc = drv->init_fw(bp);
5534         if (rc) {
5535                 BNX2X_ERR("Error loading firmware\n");
5536                 goto init_err;
5537         }
5538
5539         /* Handle the beginning of COMMON_XXX pases separatelly... */
5540         switch (load_code) {
5541         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5542                 rc = bnx2x_func_init_cmn_chip(bp, drv);
5543                 if (rc)
5544                         goto init_err;
5545
5546                 break;
5547         case FW_MSG_CODE_DRV_LOAD_COMMON:
5548                 rc = bnx2x_func_init_cmn(bp, drv);
5549                 if (rc)
5550                         goto init_err;
5551
5552                 break;
5553         case FW_MSG_CODE_DRV_LOAD_PORT:
5554                 rc = bnx2x_func_init_port(bp, drv);
5555                 if (rc)
5556                         goto init_err;
5557
5558                 break;
5559         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5560                 rc = bnx2x_func_init_func(bp, drv);
5561                 if (rc)
5562                         goto init_err;
5563
5564                 break;
5565         default:
5566                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5567                 rc = -EINVAL;
5568         }
5569
5570 init_err:
5571         drv->gunzip_end(bp);
5572
5573         /* In case of success, complete the comand immediatelly: no ramrods
5574          * have been sent.
5575          */
5576         if (!rc)
5577                 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5578
5579         return rc;
5580 }
5581
5582 /**
5583  * bnx2x_func_reset_func - reset HW at function stage
5584  *
5585  * @bp:         device handle
5586  * @drv:
5587  *
5588  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5589  * FUNCTION-only HW blocks.
5590  */
5591 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5592                                         const struct bnx2x_func_sp_drv_ops *drv)
5593 {
5594         drv->reset_hw_func(bp);
5595 }
5596
5597 /**
5598  * bnx2x_func_reset_port - reser HW at port stage
5599  *
5600  * @bp:         device handle
5601  * @drv:
5602  *
5603  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5604  * FUNCTION-only and PORT-only HW blocks.
5605  *
5606  *                 !!!IMPORTANT!!!
5607  *
5608  * It's important to call reset_port before reset_func() as the last thing
5609  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5610  * makes impossible any DMAE transactions.
5611  */
5612 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5613                                         const struct bnx2x_func_sp_drv_ops *drv)
5614 {
5615         drv->reset_hw_port(bp);
5616         bnx2x_func_reset_func(bp, drv);
5617 }
5618
5619 /**
5620  * bnx2x_func_reset_cmn - reser HW at common stage
5621  *
5622  * @bp:         device handle
5623  * @drv:
5624  *
5625  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5626  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5627  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5628  */
5629 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5630                                         const struct bnx2x_func_sp_drv_ops *drv)
5631 {
5632         bnx2x_func_reset_port(bp, drv);
5633         drv->reset_hw_cmn(bp);
5634 }
5635
5636
5637 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5638                                       struct bnx2x_func_state_params *params)
5639 {
5640         u32 reset_phase = params->params.hw_reset.reset_phase;
5641         struct bnx2x_func_sp_obj *o = params->f_obj;
5642         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5643
5644         DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5645                          reset_phase);
5646
5647         switch (reset_phase) {
5648         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5649                 bnx2x_func_reset_cmn(bp, drv);
5650                 break;
5651         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5652                 bnx2x_func_reset_port(bp, drv);
5653                 break;
5654         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5655                 bnx2x_func_reset_func(bp, drv);
5656                 break;
5657         default:
5658                 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5659                            reset_phase);
5660                 break;
5661         }
5662
5663         /* Complete the comand immediatelly: no ramrods have been sent. */
5664         o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5665
5666         return 0;
5667 }
5668
5669 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5670                                         struct bnx2x_func_state_params *params)
5671 {
5672         struct bnx2x_func_sp_obj *o = params->f_obj;
5673         struct function_start_data *rdata =
5674                 (struct function_start_data *)o->rdata;
5675         dma_addr_t data_mapping = o->rdata_mapping;
5676         struct bnx2x_func_start_params *start_params = &params->params.start;
5677
5678         memset(rdata, 0, sizeof(*rdata));
5679
5680         /* Fill the ramrod data with provided parameters */
5681         rdata->function_mode    = (u8)start_params->mf_mode;
5682         rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
5683         rdata->path_id          = BP_PATH(bp);
5684         rdata->network_cos_mode = start_params->network_cos_mode;
5685         rdata->gre_tunnel_mode  = start_params->gre_tunnel_mode;
5686         rdata->gre_tunnel_rss   = start_params->gre_tunnel_rss;
5687
5688         /* No need for an explicit memory barrier here as long we would
5689          * need to ensure the ordering of writing to the SPQ element
5690          * and updating of the SPQ producer which involves a memory
5691          * read and we will have to put a full memory barrier there
5692          * (inside bnx2x_sp_post()).
5693          */
5694
5695         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5696                              U64_HI(data_mapping),
5697                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5698 }
5699
5700 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5701                                         struct bnx2x_func_state_params *params)
5702 {
5703         struct bnx2x_func_sp_obj *o = params->f_obj;
5704         struct function_update_data *rdata =
5705                 (struct function_update_data *)o->rdata;
5706         dma_addr_t data_mapping = o->rdata_mapping;
5707         struct bnx2x_func_switch_update_params *switch_update_params =
5708                 &params->params.switch_update;
5709
5710         memset(rdata, 0, sizeof(*rdata));
5711
5712         /* Fill the ramrod data with provided parameters */
5713         rdata->tx_switch_suspend_change_flg = 1;
5714         rdata->tx_switch_suspend = switch_update_params->suspend;
5715         rdata->echo = SWITCH_UPDATE;
5716
5717         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5718                              U64_HI(data_mapping),
5719                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5720 }
5721
5722 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5723                                          struct bnx2x_func_state_params *params)
5724 {
5725         struct bnx2x_func_sp_obj *o = params->f_obj;
5726         struct function_update_data *rdata =
5727                 (struct function_update_data *)o->afex_rdata;
5728         dma_addr_t data_mapping = o->afex_rdata_mapping;
5729         struct bnx2x_func_afex_update_params *afex_update_params =
5730                 &params->params.afex_update;
5731
5732         memset(rdata, 0, sizeof(*rdata));
5733
5734         /* Fill the ramrod data with provided parameters */
5735         rdata->vif_id_change_flg = 1;
5736         rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5737         rdata->afex_default_vlan_change_flg = 1;
5738         rdata->afex_default_vlan =
5739                 cpu_to_le16(afex_update_params->afex_default_vlan);
5740         rdata->allowed_priorities_change_flg = 1;
5741         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5742         rdata->echo = AFEX_UPDATE;
5743
5744         /*  No need for an explicit memory barrier here as long we would
5745          *  need to ensure the ordering of writing to the SPQ element
5746          *  and updating of the SPQ producer which involves a memory
5747          *  read and we will have to put a full memory barrier there
5748          *  (inside bnx2x_sp_post()).
5749          */
5750         DP(BNX2X_MSG_SP,
5751            "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5752            rdata->vif_id,
5753            rdata->afex_default_vlan, rdata->allowed_priorities);
5754
5755         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5756                              U64_HI(data_mapping),
5757                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5758 }
5759
5760 static
5761 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5762                                          struct bnx2x_func_state_params *params)
5763 {
5764         struct bnx2x_func_sp_obj *o = params->f_obj;
5765         struct afex_vif_list_ramrod_data *rdata =
5766                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5767         struct bnx2x_func_afex_viflists_params *afex_vif_params =
5768                 &params->params.afex_viflists;
5769         u64 *p_rdata = (u64 *)rdata;
5770
5771         memset(rdata, 0, sizeof(*rdata));
5772
5773         /* Fill the ramrod data with provided parameters */
5774         rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5775         rdata->func_bit_map          = afex_vif_params->func_bit_map;
5776         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5777         rdata->func_to_clear         = afex_vif_params->func_to_clear;
5778
5779         /* send in echo type of sub command */
5780         rdata->echo = afex_vif_params->afex_vif_list_command;
5781
5782         /*  No need for an explicit memory barrier here as long we would
5783          *  need to ensure the ordering of writing to the SPQ element
5784          *  and updating of the SPQ producer which involves a memory
5785          *  read and we will have to put a full memory barrier there
5786          *  (inside bnx2x_sp_post()).
5787          */
5788
5789         DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5790            rdata->afex_vif_list_command, rdata->vif_list_index,
5791            rdata->func_bit_map, rdata->func_to_clear);
5792
5793         /* this ramrod sends data directly and not through DMA mapping */
5794         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5795                              U64_HI(*p_rdata), U64_LO(*p_rdata),
5796                              NONE_CONNECTION_TYPE);
5797 }
5798
5799 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5800                                        struct bnx2x_func_state_params *params)
5801 {
5802         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5803                              NONE_CONNECTION_TYPE);
5804 }
5805
5806 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5807                                        struct bnx2x_func_state_params *params)
5808 {
5809         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5810                              NONE_CONNECTION_TYPE);
5811 }
5812 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5813                                        struct bnx2x_func_state_params *params)
5814 {
5815         struct bnx2x_func_sp_obj *o = params->f_obj;
5816         struct flow_control_configuration *rdata =
5817                 (struct flow_control_configuration *)o->rdata;
5818         dma_addr_t data_mapping = o->rdata_mapping;
5819         struct bnx2x_func_tx_start_params *tx_start_params =
5820                 &params->params.tx_start;
5821         int i;
5822
5823         memset(rdata, 0, sizeof(*rdata));
5824
5825         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5826         rdata->dcb_version = tx_start_params->dcb_version;
5827         rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5828
5829         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5830                 rdata->traffic_type_to_priority_cos[i] =
5831                         tx_start_params->traffic_type_to_priority_cos[i];
5832
5833         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5834                              U64_HI(data_mapping),
5835                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5836 }
5837
5838 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5839                                struct bnx2x_func_state_params *params)
5840 {
5841         switch (params->cmd) {
5842         case BNX2X_F_CMD_HW_INIT:
5843                 return bnx2x_func_hw_init(bp, params);
5844         case BNX2X_F_CMD_START:
5845                 return bnx2x_func_send_start(bp, params);
5846         case BNX2X_F_CMD_STOP:
5847                 return bnx2x_func_send_stop(bp, params);
5848         case BNX2X_F_CMD_HW_RESET:
5849                 return bnx2x_func_hw_reset(bp, params);
5850         case BNX2X_F_CMD_AFEX_UPDATE:
5851                 return bnx2x_func_send_afex_update(bp, params);
5852         case BNX2X_F_CMD_AFEX_VIFLISTS:
5853                 return bnx2x_func_send_afex_viflists(bp, params);
5854         case BNX2X_F_CMD_TX_STOP:
5855                 return bnx2x_func_send_tx_stop(bp, params);
5856         case BNX2X_F_CMD_TX_START:
5857                 return bnx2x_func_send_tx_start(bp, params);
5858         case BNX2X_F_CMD_SWITCH_UPDATE:
5859                 return bnx2x_func_send_switch_update(bp, params);
5860         default:
5861                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5862                 return -EINVAL;
5863         }
5864 }
5865
5866 void bnx2x_init_func_obj(struct bnx2x *bp,
5867                          struct bnx2x_func_sp_obj *obj,
5868                          void *rdata, dma_addr_t rdata_mapping,
5869                          void *afex_rdata, dma_addr_t afex_rdata_mapping,
5870                          struct bnx2x_func_sp_drv_ops *drv_iface)
5871 {
5872         memset(obj, 0, sizeof(*obj));
5873
5874         mutex_init(&obj->one_pending_mutex);
5875
5876         obj->rdata = rdata;
5877         obj->rdata_mapping = rdata_mapping;
5878         obj->afex_rdata = afex_rdata;
5879         obj->afex_rdata_mapping = afex_rdata_mapping;
5880         obj->send_cmd = bnx2x_func_send_cmd;
5881         obj->check_transition = bnx2x_func_chk_transition;
5882         obj->complete_cmd = bnx2x_func_comp_cmd;
5883         obj->wait_comp = bnx2x_func_wait_comp;
5884
5885         obj->drv = drv_iface;
5886 }
5887
5888 /**
5889  * bnx2x_func_state_change - perform Function state change transition
5890  *
5891  * @bp:         device handle
5892  * @params:     parameters to perform the transaction
5893  *
5894  * returns 0 in case of successfully completed transition,
5895  *         negative error code in case of failure, positive
5896  *         (EBUSY) value if there is a completion to that is
5897  *         still pending (possible only if RAMROD_COMP_WAIT is
5898  *         not set in params->ramrod_flags for asynchronous
5899  *         commands).
5900  */
5901 int bnx2x_func_state_change(struct bnx2x *bp,
5902                             struct bnx2x_func_state_params *params)
5903 {
5904         struct bnx2x_func_sp_obj *o = params->f_obj;
5905         int rc, cnt = 300;
5906         enum bnx2x_func_cmd cmd = params->cmd;
5907         unsigned long *pending = &o->pending;
5908
5909         mutex_lock(&o->one_pending_mutex);
5910
5911         /* Check that the requested transition is legal */
5912         rc = o->check_transition(bp, o, params);
5913         if ((rc == -EBUSY) &&
5914             (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5915                 while ((rc == -EBUSY) && (--cnt > 0)) {
5916                         mutex_unlock(&o->one_pending_mutex);
5917                         msleep(10);
5918                         mutex_lock(&o->one_pending_mutex);
5919                         rc = o->check_transition(bp, o, params);
5920                 }
5921                 if (rc == -EBUSY) {
5922                         mutex_unlock(&o->one_pending_mutex);
5923                         BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5924                         return rc;
5925                 }
5926         } else if (rc) {
5927                 mutex_unlock(&o->one_pending_mutex);
5928                 return rc;
5929         }
5930
5931         /* Set "pending" bit */
5932         set_bit(cmd, pending);
5933
5934         /* Don't send a command if only driver cleanup was requested */
5935         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5936                 bnx2x_func_state_change_comp(bp, o, cmd);
5937                 mutex_unlock(&o->one_pending_mutex);
5938         } else {
5939                 /* Send a ramrod */
5940                 rc = o->send_cmd(bp, params);
5941
5942                 mutex_unlock(&o->one_pending_mutex);
5943
5944                 if (rc) {
5945                         o->next_state = BNX2X_F_STATE_MAX;
5946                         clear_bit(cmd, pending);
5947                         smp_mb__after_clear_bit();
5948                         return rc;
5949                 }
5950
5951                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5952                         rc = o->wait_comp(bp, o, cmd);
5953                         if (rc)
5954                                 return rc;
5955
5956                         return 0;
5957                 }
5958         }
5959
5960         return !!test_bit(cmd, pending);
5961 }