2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
23 #include "bfa_defs_svc.h"
25 BFA_TRC_FILE(CNA, IOC);
28 * IOC local definitions
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
75 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
78 * forward declarations
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
99 * IOC state machine definitions/declarations
102 IOC_E_RESET = 1, /* IOC reset request */
103 IOC_E_ENABLE = 2, /* IOC enable request */
104 IOC_E_DISABLE = 3, /* IOC disable request */
105 IOC_E_DETACH = 4, /* driver detach cleanup */
106 IOC_E_ENABLED = 5, /* f/w enabled */
107 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
108 IOC_E_DISABLED = 7, /* f/w disabled */
109 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
110 IOC_E_HBFAIL = 9, /* heartbeat failure */
111 IOC_E_HWERROR = 10, /* hardware error interrupt */
112 IOC_E_TIMEOUT = 11, /* timeout */
113 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
116 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
127 static struct bfa_sm_table_s ioc_sm_table[] = {
128 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
129 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
130 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
131 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
133 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
134 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
135 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
137 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
141 * IOCPF state machine definitions/declarations
144 #define bfa_iocpf_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
149 #define bfa_iocpf_poll_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
153 #define bfa_sem_timer_start(__ioc) \
154 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
159 * Forward declareations for iocpf state machine
161 static void bfa_iocpf_timeout(void *ioc_arg);
162 static void bfa_iocpf_sem_timeout(void *ioc_arg);
163 static void bfa_iocpf_poll_timeout(void *ioc_arg);
166 * IOCPF state machine events
169 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
170 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
171 IOCPF_E_STOP = 3, /* stop on driver detach */
172 IOCPF_E_FWREADY = 4, /* f/w initialization done */
173 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
174 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
175 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
176 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
177 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
178 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
179 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
180 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
186 enum bfa_iocpf_state {
187 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
188 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
189 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
190 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
191 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
192 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
193 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
194 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
195 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
198 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
207 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
213 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
215 static struct bfa_sm_table_s iocpf_sm_table[] = {
216 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
217 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
218 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
219 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
220 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
221 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
222 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
223 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
224 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
225 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
226 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
227 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
228 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
229 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
237 * Beginning state. IOC uninit state.
241 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
246 * IOC is in uninit state.
249 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
255 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
259 bfa_sm_fault(ioc, event);
263 * Reset entry actions -- initialize state machine
266 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
268 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
272 * IOC is in reset state.
275 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
281 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
285 bfa_ioc_disable_comp(ioc);
289 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
293 bfa_sm_fault(ioc, event);
299 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
301 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
305 * Host IOC function is being enabled, awaiting response from firmware.
306 * Semaphore is acquired.
309 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
315 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 /* !!! fall through !!! */
321 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
323 if (event != IOC_E_PFFAILED)
324 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
328 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
329 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
333 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
338 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
345 bfa_sm_fault(ioc, event);
351 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
353 bfa_ioc_timer_start(ioc);
354 bfa_ioc_send_getattr(ioc);
358 * IOC configuration in progress. Timer is active.
361 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
366 case IOC_E_FWRSP_GETATTR:
367 bfa_ioc_timer_stop(ioc);
368 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
373 bfa_ioc_timer_stop(ioc);
374 /* !!! fall through !!! */
376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
377 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
378 if (event != IOC_E_PFFAILED)
379 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
383 bfa_ioc_timer_stop(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
391 bfa_sm_fault(ioc, event);
396 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
398 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
400 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
401 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
402 bfa_ioc_hb_monitor(ioc);
403 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
404 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
408 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
417 bfa_hb_timer_stop(ioc);
418 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
423 bfa_hb_timer_stop(ioc);
424 /* !!! fall through !!! */
426 if (ioc->iocpf.auto_recover)
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
429 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
431 bfa_ioc_fail_notify(ioc);
433 if (event != IOC_E_PFFAILED)
434 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
438 bfa_sm_fault(ioc, event);
444 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
446 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
447 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
448 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
449 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
453 * IOC is being disabled
456 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
462 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
467 * No state change. Will move to disabled state
468 * after iocpf sm completes failure processing and
469 * moves to disabled state.
471 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
475 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
476 bfa_ioc_disable_comp(ioc);
480 bfa_sm_fault(ioc, event);
485 * IOC disable completion entry.
488 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
490 bfa_ioc_disable_comp(ioc);
494 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
500 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
504 ioc->cbfn->disable_cbfn(ioc->bfa);
508 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
509 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
513 bfa_sm_fault(ioc, event);
519 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
525 * Hardware initialization retry.
528 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
534 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
540 * Initialization retry failed.
542 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
543 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
544 if (event != IOC_E_PFFAILED)
545 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
549 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
550 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
557 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
561 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
562 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
566 bfa_sm_fault(ioc, event);
572 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
581 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
597 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
603 * HB failure / HW error notification, ignore.
607 bfa_sm_fault(ioc, event);
612 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
618 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
624 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
628 ioc->cbfn->disable_cbfn(ioc->bfa);
632 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
636 /* Ignore - already in hwfail state */
640 bfa_sm_fault(ioc, event);
645 * IOCPF State Machine
649 * Reset entry actions -- initialize state machine
652 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
654 iocpf->fw_mismatch_notified = BFA_FALSE;
655 iocpf->auto_recover = bfa_auto_recover;
659 * Beginning state. IOC is in reset state.
662 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
664 struct bfa_ioc_s *ioc = iocpf->ioc;
670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
677 bfa_sm_fault(ioc, event);
682 * Semaphore should be acquired for version check.
685 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
687 struct bfi_ioc_image_hdr_s fwhdr;
688 u32 r32, fwstate, pgnum, pgoff, loff = 0;
692 * Spin on init semaphore to serialize.
694 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
697 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
701 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
702 if (fwstate == BFI_IOC_UNINIT) {
703 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
707 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
709 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
710 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
717 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
718 pgoff = PSS_SMEM_PGOFF(loff);
719 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
721 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
722 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
726 bfa_trc(iocpf->ioc, fwstate);
727 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
729 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
732 * Unlock the hw semaphore. Should be here only once per boot.
734 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
735 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
738 * unlock init semaphore.
740 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
743 bfa_ioc_hw_sem_get(iocpf->ioc);
747 * Awaiting h/w semaphore to continue with version check.
750 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
752 struct bfa_ioc_s *ioc = iocpf->ioc;
757 case IOCPF_E_SEMLOCKED:
758 if (bfa_ioc_firmware_lock(ioc)) {
759 if (bfa_ioc_sync_start(ioc)) {
760 bfa_ioc_sync_join(ioc);
761 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
763 bfa_ioc_firmware_unlock(ioc);
764 writel(1, ioc->ioc_regs.ioc_sem_reg);
765 bfa_sem_timer_start(ioc);
768 writel(1, ioc->ioc_regs.ioc_sem_reg);
769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
773 case IOCPF_E_SEM_ERROR:
774 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
775 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
778 case IOCPF_E_DISABLE:
779 bfa_sem_timer_stop(ioc);
780 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
781 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
785 bfa_sem_timer_stop(ioc);
786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
790 bfa_sm_fault(ioc, event);
795 * Notify enable completion callback.
798 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
801 * Call only the first time sm enters fwmismatch state.
803 if (iocpf->fw_mismatch_notified == BFA_FALSE)
804 bfa_ioc_pf_fwmismatch(iocpf->ioc);
806 iocpf->fw_mismatch_notified = BFA_TRUE;
807 bfa_iocpf_timer_start(iocpf->ioc);
811 * Awaiting firmware version match.
814 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
816 struct bfa_ioc_s *ioc = iocpf->ioc;
821 case IOCPF_E_TIMEOUT:
822 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
825 case IOCPF_E_DISABLE:
826 bfa_iocpf_timer_stop(ioc);
827 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
828 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
832 bfa_iocpf_timer_stop(ioc);
833 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
837 bfa_sm_fault(ioc, event);
842 * Request for semaphore.
845 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
847 bfa_ioc_hw_sem_get(iocpf->ioc);
851 * Awaiting semaphore for h/w initialzation.
854 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
856 struct bfa_ioc_s *ioc = iocpf->ioc;
861 case IOCPF_E_SEMLOCKED:
862 if (bfa_ioc_sync_complete(ioc)) {
863 bfa_ioc_sync_join(ioc);
864 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
866 writel(1, ioc->ioc_regs.ioc_sem_reg);
867 bfa_sem_timer_start(ioc);
871 case IOCPF_E_SEM_ERROR:
872 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
873 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
876 case IOCPF_E_DISABLE:
877 bfa_sem_timer_stop(ioc);
878 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
882 bfa_sm_fault(ioc, event);
887 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
889 iocpf->poll_time = 0;
890 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
894 * Hardware is being initialized. Interrupts are enabled.
895 * Holding hardware semaphore lock.
898 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
900 struct bfa_ioc_s *ioc = iocpf->ioc;
905 case IOCPF_E_FWREADY:
906 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
909 case IOCPF_E_TIMEOUT:
910 writel(1, ioc->ioc_regs.ioc_sem_reg);
911 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
912 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
915 case IOCPF_E_DISABLE:
916 bfa_iocpf_timer_stop(ioc);
917 bfa_ioc_sync_leave(ioc);
918 writel(1, ioc->ioc_regs.ioc_sem_reg);
919 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
923 bfa_sm_fault(ioc, event);
928 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
930 bfa_iocpf_timer_start(iocpf->ioc);
932 * Enable Interrupts before sending fw IOC ENABLE cmd.
934 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
935 bfa_ioc_send_enable(iocpf->ioc);
939 * Host IOC function is being enabled, awaiting response from firmware.
940 * Semaphore is acquired.
943 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
945 struct bfa_ioc_s *ioc = iocpf->ioc;
950 case IOCPF_E_FWRSP_ENABLE:
951 bfa_iocpf_timer_stop(ioc);
952 writel(1, ioc->ioc_regs.ioc_sem_reg);
953 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
956 case IOCPF_E_INITFAIL:
957 bfa_iocpf_timer_stop(ioc);
959 * !!! fall through !!!
962 case IOCPF_E_TIMEOUT:
963 writel(1, ioc->ioc_regs.ioc_sem_reg);
964 if (event == IOCPF_E_TIMEOUT)
965 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
966 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
969 case IOCPF_E_DISABLE:
970 bfa_iocpf_timer_stop(ioc);
971 writel(1, ioc->ioc_regs.ioc_sem_reg);
972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
976 bfa_sm_fault(ioc, event);
981 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
983 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
987 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
989 struct bfa_ioc_s *ioc = iocpf->ioc;
994 case IOCPF_E_DISABLE:
995 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
998 case IOCPF_E_GETATTRFAIL:
999 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1003 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1007 bfa_sm_fault(ioc, event);
1012 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1014 bfa_iocpf_timer_start(iocpf->ioc);
1015 bfa_ioc_send_disable(iocpf->ioc);
1019 * IOC is being disabled
1022 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1024 struct bfa_ioc_s *ioc = iocpf->ioc;
1026 bfa_trc(ioc, event);
1029 case IOCPF_E_FWRSP_DISABLE:
1030 bfa_iocpf_timer_stop(ioc);
1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1035 bfa_iocpf_timer_stop(ioc);
1037 * !!! fall through !!!
1040 case IOCPF_E_TIMEOUT:
1041 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1045 case IOCPF_E_FWRSP_ENABLE:
1049 bfa_sm_fault(ioc, event);
1054 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1056 bfa_ioc_hw_sem_get(iocpf->ioc);
1060 * IOC hb ack request is being removed.
1063 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1065 struct bfa_ioc_s *ioc = iocpf->ioc;
1067 bfa_trc(ioc, event);
1070 case IOCPF_E_SEMLOCKED:
1071 bfa_ioc_sync_leave(ioc);
1072 writel(1, ioc->ioc_regs.ioc_sem_reg);
1073 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1076 case IOCPF_E_SEM_ERROR:
1077 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1078 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1085 bfa_sm_fault(ioc, event);
1090 * IOC disable completion entry.
1093 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1095 bfa_ioc_mbox_flush(iocpf->ioc);
1096 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1100 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1102 struct bfa_ioc_s *ioc = iocpf->ioc;
1104 bfa_trc(ioc, event);
1107 case IOCPF_E_ENABLE:
1108 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1112 bfa_ioc_firmware_unlock(ioc);
1113 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1117 bfa_sm_fault(ioc, event);
1122 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1124 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1125 bfa_ioc_hw_sem_get(iocpf->ioc);
1129 * Hardware initialization failed.
1132 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1134 struct bfa_ioc_s *ioc = iocpf->ioc;
1136 bfa_trc(ioc, event);
1139 case IOCPF_E_SEMLOCKED:
1140 bfa_ioc_notify_fail(ioc);
1141 bfa_ioc_sync_leave(ioc);
1142 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1143 writel(1, ioc->ioc_regs.ioc_sem_reg);
1144 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1147 case IOCPF_E_SEM_ERROR:
1148 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1149 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1152 case IOCPF_E_DISABLE:
1153 bfa_sem_timer_stop(ioc);
1154 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1158 bfa_sem_timer_stop(ioc);
1159 bfa_ioc_firmware_unlock(ioc);
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1167 bfa_sm_fault(ioc, event);
1172 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1174 bfa_trc(iocpf->ioc, 0);
1178 * Hardware initialization failed.
1181 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1183 struct bfa_ioc_s *ioc = iocpf->ioc;
1185 bfa_trc(ioc, event);
1188 case IOCPF_E_DISABLE:
1189 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1193 bfa_ioc_firmware_unlock(ioc);
1194 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1198 bfa_sm_fault(ioc, event);
1203 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1206 * Mark IOC as failed in hardware and stop firmware.
1208 bfa_ioc_lpu_stop(iocpf->ioc);
1211 * Flush any queued up mailbox requests.
1213 bfa_ioc_mbox_flush(iocpf->ioc);
1215 bfa_ioc_hw_sem_get(iocpf->ioc);
1219 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1221 struct bfa_ioc_s *ioc = iocpf->ioc;
1223 bfa_trc(ioc, event);
1226 case IOCPF_E_SEMLOCKED:
1227 bfa_ioc_sync_ack(ioc);
1228 bfa_ioc_notify_fail(ioc);
1229 if (!iocpf->auto_recover) {
1230 bfa_ioc_sync_leave(ioc);
1231 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1232 writel(1, ioc->ioc_regs.ioc_sem_reg);
1233 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1235 if (bfa_ioc_sync_complete(ioc))
1236 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1238 writel(1, ioc->ioc_regs.ioc_sem_reg);
1239 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1244 case IOCPF_E_SEM_ERROR:
1245 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1246 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1249 case IOCPF_E_DISABLE:
1250 bfa_sem_timer_stop(ioc);
1251 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1258 bfa_sm_fault(ioc, event);
1263 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1265 bfa_trc(iocpf->ioc, 0);
1269 * IOC is in failed state.
1272 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1274 struct bfa_ioc_s *ioc = iocpf->ioc;
1276 bfa_trc(ioc, event);
1279 case IOCPF_E_DISABLE:
1280 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1284 bfa_sm_fault(ioc, event);
1289 * BFA IOC private functions
1293 * Notify common modules registered for notification.
1296 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1298 struct bfa_ioc_notify_s *notify;
1299 struct list_head *qe;
1301 list_for_each(qe, &ioc->notify_q) {
1302 notify = (struct bfa_ioc_notify_s *)qe;
1303 notify->cbfn(notify->cbarg, event);
1308 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1310 ioc->cbfn->disable_cbfn(ioc->bfa);
1311 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1315 bfa_ioc_sem_get(void __iomem *sem_reg)
1319 #define BFA_SEM_SPINCNT 3000
1321 r32 = readl(sem_reg);
1323 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1326 r32 = readl(sem_reg);
1336 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1341 * First read to the semaphore register will return 0, subsequent reads
1342 * will return 1. Semaphore is released by writing 1 to the register
1344 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1347 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1351 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1355 bfa_sem_timer_start(ioc);
1359 * Initialize LPU local memory (aka secondary memory / SRAM)
1362 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1366 #define PSS_LMEM_INIT_TIME 10000
1368 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1369 pss_ctl &= ~__PSS_LMEM_RESET;
1370 pss_ctl |= __PSS_LMEM_INIT_EN;
1373 * i2c workaround 12.5khz clock
1375 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1376 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1379 * wait for memory initialization to be complete
1383 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1385 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1388 * If memory initialization is not successful, IOC timeout will catch
1391 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1392 bfa_trc(ioc, pss_ctl);
1394 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1395 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1399 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1404 * Take processor out of reset.
1406 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1407 pss_ctl &= ~__PSS_LPU0_RESET;
1409 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1413 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1418 * Put processors in reset.
1420 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1421 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1423 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1427 * Get driver and firmware versions.
1430 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1435 u32 *fwsig = (u32 *) fwhdr;
1437 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1438 pgoff = PSS_SMEM_PGOFF(loff);
1439 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1441 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1444 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1445 loff += sizeof(u32);
1450 * Returns TRUE if same.
1453 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1455 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1458 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1459 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1461 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1462 if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
1464 bfa_trc(ioc, fwhdr->md5sum[i]);
1465 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1470 bfa_trc(ioc, fwhdr->md5sum[0]);
1475 * Return true if current running version is valid. Firmware signature and
1476 * execution context (driver/bios) must match.
1478 static bfa_boolean_t
1479 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1481 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1483 bfa_ioc_fwver_get(ioc, &fwhdr);
1484 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1485 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1487 if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
1488 bfa_trc(ioc, fwhdr.signature);
1489 bfa_trc(ioc, drv_fwhdr->signature);
1493 if (swab32(fwhdr.bootenv) != boot_env) {
1494 bfa_trc(ioc, fwhdr.bootenv);
1495 bfa_trc(ioc, boot_env);
1499 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1503 * Conditionally flush any pending message from firmware at start.
1506 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1510 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1512 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1516 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1518 enum bfi_ioc_state ioc_fwstate;
1519 bfa_boolean_t fwvalid;
1523 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1526 ioc_fwstate = BFI_IOC_UNINIT;
1528 bfa_trc(ioc, ioc_fwstate);
1530 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1531 boot_env = BFI_FWBOOT_ENV_OS;
1534 * check if firmware is valid
1536 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1537 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1540 bfa_ioc_boot(ioc, boot_type, boot_env);
1541 bfa_ioc_poll_fwinit(ioc);
1546 * If hardware initialization is in progress (initialized by other IOC),
1547 * just wait for an initialization completion interrupt.
1549 if (ioc_fwstate == BFI_IOC_INITING) {
1550 bfa_ioc_poll_fwinit(ioc);
1555 * If IOC function is disabled and firmware version is same,
1556 * just re-enable IOC.
1558 * If option rom, IOC must not be in operational state. With
1559 * convergence, IOC will be in operational state when 2nd driver
1562 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1565 * When using MSI-X any pending firmware ready event should
1566 * be flushed. Otherwise MSI-X interrupts are not delivered.
1568 bfa_ioc_msgflush(ioc);
1569 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1574 * Initialize the h/w for any other states.
1576 bfa_ioc_boot(ioc, boot_type, boot_env);
1577 bfa_ioc_poll_fwinit(ioc);
1581 bfa_ioc_timeout(void *ioc_arg)
1583 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1586 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1590 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1592 u32 *msgp = (u32 *) ioc_msg;
1595 bfa_trc(ioc, msgp[0]);
1598 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1601 * first write msg to mailbox registers
1603 for (i = 0; i < len / sizeof(u32); i++)
1604 writel(cpu_to_le32(msgp[i]),
1605 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1607 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1608 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1611 * write 1 to mailbox CMD to trigger LPU event
1613 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1614 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1618 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1620 struct bfi_ioc_ctrl_req_s enable_req;
1623 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1624 bfa_ioc_portid(ioc));
1625 enable_req.clscode = cpu_to_be16(ioc->clscode);
1626 do_gettimeofday(&tv);
1627 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1628 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1632 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1634 struct bfi_ioc_ctrl_req_s disable_req;
1636 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1637 bfa_ioc_portid(ioc));
1638 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1642 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1644 struct bfi_ioc_getattr_req_s attr_req;
1646 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1647 bfa_ioc_portid(ioc));
1648 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1649 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1653 bfa_ioc_hb_check(void *cbarg)
1655 struct bfa_ioc_s *ioc = cbarg;
1658 hb_count = readl(ioc->ioc_regs.heartbeat);
1659 if (ioc->hb_count == hb_count) {
1660 bfa_ioc_recover(ioc);
1663 ioc->hb_count = hb_count;
1666 bfa_ioc_mbox_poll(ioc);
1667 bfa_hb_timer_start(ioc);
1671 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1673 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1674 bfa_hb_timer_start(ioc);
1678 * Initiate a full firmware download.
1681 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1691 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1692 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1694 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1695 pgoff = PSS_SMEM_PGOFF(loff);
1697 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1699 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1701 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1702 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1703 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1704 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1710 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1711 cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
1713 loff += sizeof(u32);
1716 * handle page offset wrap around
1718 loff = PSS_SMEM_PGOFF(loff);
1721 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1725 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1726 ioc->ioc_regs.host_page_num_fn);
1729 * Set boot type and device mode at the end.
1731 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1732 ioc->port0_mode, ioc->port1_mode);
1733 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1735 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1737 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1743 * Update BFA configuration from firmware configuration.
1746 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1748 struct bfi_ioc_attr_s *attr = ioc->attr;
1750 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1751 attr->card_type = be32_to_cpu(attr->card_type);
1752 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1753 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1759 * Attach time initialization of mbox logic.
1762 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1764 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1767 INIT_LIST_HEAD(&mod->cmd_q);
1768 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1769 mod->mbhdlr[mc].cbfn = NULL;
1770 mod->mbhdlr[mc].cbarg = ioc->bfa;
1775 * Mbox poll timer -- restarts any pending mailbox requests.
1778 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1780 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1781 struct bfa_mbox_cmd_s *cmd;
1785 * If no command pending, do nothing
1787 if (list_empty(&mod->cmd_q))
1791 * If previous command is not yet fetched by firmware, do nothing
1793 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1798 * Enqueue command to firmware.
1800 bfa_q_deq(&mod->cmd_q, &cmd);
1801 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1805 * Cleanup any pending requests.
1808 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1810 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1811 struct bfa_mbox_cmd_s *cmd;
1813 while (!list_empty(&mod->cmd_q))
1814 bfa_q_deq(&mod->cmd_q, &cmd);
1818 * Read data from SMEM to host through PCI memmap
1820 * @param[in] ioc memory for IOC
1821 * @param[in] tbuf app memory to store data from smem
1822 * @param[in] soff smem offset
1823 * @param[in] sz size of smem in bytes
1826 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1833 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1834 loff = PSS_SMEM_PGOFF(soff);
1835 bfa_trc(ioc, pgnum);
1840 * Hold semaphore to serialize pll init and fwtrc.
1842 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1844 return BFA_STATUS_FAILED;
1847 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1849 len = sz/sizeof(u32);
1851 for (i = 0; i < len; i++) {
1852 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1853 buf[i] = be32_to_cpu(r32);
1854 loff += sizeof(u32);
1857 * handle page offset wrap around
1859 loff = PSS_SMEM_PGOFF(loff);
1862 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1865 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1866 ioc->ioc_regs.host_page_num_fn);
1868 * release semaphore.
1870 readl(ioc->ioc_regs.ioc_init_sem_reg);
1871 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1873 bfa_trc(ioc, pgnum);
1874 return BFA_STATUS_OK;
1878 * Clear SMEM data from host through PCI memmap
1880 * @param[in] ioc memory for IOC
1881 * @param[in] soff smem offset
1882 * @param[in] sz size of smem in bytes
1885 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1890 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1891 loff = PSS_SMEM_PGOFF(soff);
1892 bfa_trc(ioc, pgnum);
1897 * Hold semaphore to serialize pll init and fwtrc.
1899 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1901 return BFA_STATUS_FAILED;
1904 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1906 len = sz/sizeof(u32); /* len in words */
1908 for (i = 0; i < len; i++) {
1909 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1910 loff += sizeof(u32);
1913 * handle page offset wrap around
1915 loff = PSS_SMEM_PGOFF(loff);
1918 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1921 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1922 ioc->ioc_regs.host_page_num_fn);
1925 * release semaphore.
1927 readl(ioc->ioc_regs.ioc_init_sem_reg);
1928 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1929 bfa_trc(ioc, pgnum);
1930 return BFA_STATUS_OK;
1934 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1936 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1939 * Notify driver and common modules registered for notification.
1941 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1942 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1944 bfa_ioc_debug_save_ftrc(ioc);
1946 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1947 "Heart Beat of IOC has failed\n");
1948 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1953 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1955 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1957 * Provide enable completion callback.
1959 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1960 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1961 "Running firmware version is incompatible "
1962 "with the driver version\n");
1963 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1967 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1971 * Hold semaphore so that nobody can access the chip during init.
1973 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1975 bfa_ioc_pll_init_asic(ioc);
1977 ioc->pllinit = BFA_TRUE;
1982 bfa_ioc_lmem_init(ioc);
1985 * release semaphore.
1987 readl(ioc->ioc_regs.ioc_init_sem_reg);
1988 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1990 return BFA_STATUS_OK;
1994 * Interface used by diag module to do firmware boot with memory test
1995 * as the entry vector.
1998 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2000 bfa_ioc_stats(ioc, ioc_boots);
2002 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2006 * Initialize IOC state of all functions on a chip reset.
2008 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2010 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2012 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2013 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2016 bfa_ioc_msgflush(ioc);
2017 bfa_ioc_download_fw(ioc, boot_type, boot_env);
2018 bfa_ioc_lpu_start(ioc);
2022 * Enable/disable IOC failure auto recovery.
2025 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2027 bfa_auto_recover = auto_recover;
2033 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2035 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2039 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2041 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2043 return ((r32 != BFI_IOC_UNINIT) &&
2044 (r32 != BFI_IOC_INITING) &&
2045 (r32 != BFI_IOC_MEMTEST));
2049 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2051 __be32 *msgp = mbmsg;
2055 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2062 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2064 r32 = readl(ioc->ioc_regs.lpu_mbox +
2066 msgp[i] = cpu_to_be32(r32);
2070 * turn off mailbox interrupt by clearing mailbox status
2072 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2073 readl(ioc->ioc_regs.lpu_mbox_cmd);
2079 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2081 union bfi_ioc_i2h_msg_u *msg;
2082 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2084 msg = (union bfi_ioc_i2h_msg_u *) m;
2086 bfa_ioc_stats(ioc, ioc_isrs);
2088 switch (msg->mh.msg_id) {
2089 case BFI_IOC_I2H_HBEAT:
2092 case BFI_IOC_I2H_ENABLE_REPLY:
2093 ioc->port_mode = ioc->port_mode_cfg =
2094 (enum bfa_mode_s)msg->fw_event.port_mode;
2095 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2096 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2099 case BFI_IOC_I2H_DISABLE_REPLY:
2100 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2103 case BFI_IOC_I2H_GETATTR_REPLY:
2104 bfa_ioc_getattr_reply(ioc);
2108 bfa_trc(ioc, msg->mh.msg_id);
2114 * IOC attach time initialization and setup.
2116 * @param[in] ioc memory for IOC
2117 * @param[in] bfa driver instance structure
2120 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2121 struct bfa_timer_mod_s *timer_mod)
2125 ioc->timer_mod = timer_mod;
2126 ioc->fcmode = BFA_FALSE;
2127 ioc->pllinit = BFA_FALSE;
2128 ioc->dbg_fwsave_once = BFA_TRUE;
2129 ioc->iocpf.ioc = ioc;
2131 bfa_ioc_mbox_attach(ioc);
2132 INIT_LIST_HEAD(&ioc->notify_q);
2134 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2135 bfa_fsm_send_event(ioc, IOC_E_RESET);
2139 * Driver detach time IOC cleanup.
2142 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2144 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2145 INIT_LIST_HEAD(&ioc->notify_q);
2149 * Setup IOC PCI properties.
2151 * @param[in] pcidev PCI device information for this IOC
2154 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2155 enum bfi_pcifn_class clscode)
2157 ioc->clscode = clscode;
2158 ioc->pcidev = *pcidev;
2161 * Initialize IOC and device personality
2163 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2164 ioc->asic_mode = BFI_ASIC_MODE_FC;
2166 switch (pcidev->device_id) {
2167 case BFA_PCI_DEVICE_ID_FC_8G1P:
2168 case BFA_PCI_DEVICE_ID_FC_8G2P:
2169 ioc->asic_gen = BFI_ASIC_GEN_CB;
2170 ioc->fcmode = BFA_TRUE;
2171 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2172 ioc->ad_cap_bm = BFA_CM_HBA;
2175 case BFA_PCI_DEVICE_ID_CT:
2176 ioc->asic_gen = BFI_ASIC_GEN_CT;
2177 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2178 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2179 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2180 ioc->ad_cap_bm = BFA_CM_CNA;
2183 case BFA_PCI_DEVICE_ID_CT_FC:
2184 ioc->asic_gen = BFI_ASIC_GEN_CT;
2185 ioc->fcmode = BFA_TRUE;
2186 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2187 ioc->ad_cap_bm = BFA_CM_HBA;
2190 case BFA_PCI_DEVICE_ID_CT2:
2191 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2192 if (clscode == BFI_PCIFN_CLASS_FC &&
2193 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2194 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2195 ioc->fcmode = BFA_TRUE;
2196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2197 ioc->ad_cap_bm = BFA_CM_HBA;
2199 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2200 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2201 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2203 ioc->port_mode_cfg = BFA_MODE_CNA;
2204 ioc->ad_cap_bm = BFA_CM_CNA;
2207 ioc->port_mode_cfg = BFA_MODE_NIC;
2208 ioc->ad_cap_bm = BFA_CM_NIC;
2218 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2220 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2221 bfa_ioc_set_cb_hwif(ioc);
2222 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2223 bfa_ioc_set_ct_hwif(ioc);
2225 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2226 bfa_ioc_set_ct2_hwif(ioc);
2227 bfa_ioc_ct2_poweron(ioc);
2230 bfa_ioc_map_port(ioc);
2231 bfa_ioc_reg_init(ioc);
2235 * Initialize IOC dma memory
2237 * @param[in] dm_kva kernel virtual address of IOC dma memory
2238 * @param[in] dm_pa physical address of IOC dma memory
2241 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2244 * dma memory for firmware attribute
2246 ioc->attr_dma.kva = dm_kva;
2247 ioc->attr_dma.pa = dm_pa;
2248 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2252 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2254 bfa_ioc_stats(ioc, ioc_enables);
2255 ioc->dbg_fwsave_once = BFA_TRUE;
2257 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2261 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2263 bfa_ioc_stats(ioc, ioc_disables);
2264 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2268 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2270 ioc->dbg_fwsave_once = BFA_TRUE;
2271 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2275 * Initialize memory for saving firmware trace. Driver must initialize
2276 * trace memory before call bfa_ioc_enable().
2279 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2281 ioc->dbg_fwsave = dbg_fwsave;
2282 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2286 * Register mailbox message handler functions
2288 * @param[in] ioc IOC instance
2289 * @param[in] mcfuncs message class handler functions
2292 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2294 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2297 for (mc = 0; mc < BFI_MC_MAX; mc++)
2298 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2302 * Register mailbox message handler function, to be called by common modules
2305 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2306 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2308 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2310 mod->mbhdlr[mc].cbfn = cbfn;
2311 mod->mbhdlr[mc].cbarg = cbarg;
2315 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316 * Responsibility of caller to serialize
2318 * @param[in] ioc IOC instance
2319 * @param[i] cmd Mailbox command
2322 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2324 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2328 * If a previous command is pending, queue new command
2330 if (!list_empty(&mod->cmd_q)) {
2331 list_add_tail(&cmd->qe, &mod->cmd_q);
2336 * If mailbox is busy, queue command for poll timer
2338 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2340 list_add_tail(&cmd->qe, &mod->cmd_q);
2345 * mailbox is free -- queue command to firmware
2347 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2351 * Handle mailbox interrupts
2354 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2356 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2357 struct bfi_mbmsg_s m;
2360 if (bfa_ioc_msgget(ioc, &m)) {
2362 * Treat IOC message class as special.
2364 mc = m.mh.msg_class;
2365 if (mc == BFI_MC_IOC) {
2366 bfa_ioc_isr(ioc, &m);
2370 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2373 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2376 bfa_ioc_lpu_read_stat(ioc);
2379 * Try to send pending mailbox commands
2381 bfa_ioc_mbox_poll(ioc);
2385 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2387 bfa_ioc_stats(ioc, ioc_hbfails);
2388 ioc->stats.hb_count = ioc->hb_count;
2389 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2393 * return true if IOC is disabled
2396 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2398 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2399 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2403 * return true if IOC firmware is different.
2406 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2408 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2409 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2410 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2413 #define bfa_ioc_state_disabled(__sm) \
2414 (((__sm) == BFI_IOC_UNINIT) || \
2415 ((__sm) == BFI_IOC_INITING) || \
2416 ((__sm) == BFI_IOC_HWINIT) || \
2417 ((__sm) == BFI_IOC_DISABLED) || \
2418 ((__sm) == BFI_IOC_FAIL) || \
2419 ((__sm) == BFI_IOC_CFG_DISABLED))
2422 * Check if adapter is disabled -- both IOCs should be in a disabled
2426 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2430 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2433 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2434 if (!bfa_ioc_state_disabled(ioc_state))
2437 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2438 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2439 if (!bfa_ioc_state_disabled(ioc_state))
2447 * Reset IOC fwstate registers.
2450 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2452 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2453 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2456 #define BFA_MFG_NAME "Brocade"
2458 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2459 struct bfa_adapter_attr_s *ad_attr)
2461 struct bfi_ioc_attr_s *ioc_attr;
2463 ioc_attr = ioc->attr;
2465 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2466 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2467 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2468 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2469 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2470 sizeof(struct bfa_mfg_vpd_s));
2472 ad_attr->nports = bfa_ioc_get_nports(ioc);
2473 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2475 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2476 /* For now, model descr uses same model string */
2477 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2479 ad_attr->card_type = ioc_attr->card_type;
2480 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2482 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2483 ad_attr->prototype = 1;
2485 ad_attr->prototype = 0;
2487 ad_attr->pwwn = ioc->attr->pwwn;
2488 ad_attr->mac = bfa_ioc_get_mac(ioc);
2490 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2491 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2492 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2493 ad_attr->asic_rev = ioc_attr->asic_rev;
2495 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2503 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2505 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2506 return BFA_IOC_TYPE_LL;
2508 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2510 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2511 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2515 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2517 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2518 memcpy((void *)serial_num,
2519 (void *)ioc->attr->brcd_serialnum,
2520 BFA_ADAPTER_SERIAL_NUM_LEN);
2524 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2526 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2527 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2531 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2535 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2541 chip_rev[4] = ioc->attr->asic_rev;
2546 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2548 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2549 memcpy(optrom_ver, ioc->attr->optrom_version,
2554 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2556 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2557 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2561 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2563 struct bfi_ioc_attr_s *ioc_attr;
2566 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2568 ioc_attr = ioc->attr;
2570 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2571 BFA_MFG_NAME, ioc_attr->card_type);
2575 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2577 enum bfa_iocpf_state iocpf_st;
2578 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2580 if (ioc_st == BFA_IOC_ENABLING ||
2581 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2583 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2586 case BFA_IOCPF_SEMWAIT:
2587 ioc_st = BFA_IOC_SEMWAIT;
2590 case BFA_IOCPF_HWINIT:
2591 ioc_st = BFA_IOC_HWINIT;
2594 case BFA_IOCPF_FWMISMATCH:
2595 ioc_st = BFA_IOC_FWMISMATCH;
2598 case BFA_IOCPF_FAIL:
2599 ioc_st = BFA_IOC_FAIL;
2602 case BFA_IOCPF_INITFAIL:
2603 ioc_st = BFA_IOC_INITFAIL;
2615 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2617 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2619 ioc_attr->state = bfa_ioc_get_state(ioc);
2620 ioc_attr->port_id = ioc->port_id;
2621 ioc_attr->port_mode = ioc->port_mode;
2622 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2623 ioc_attr->cap_bm = ioc->ad_cap_bm;
2625 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2627 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2629 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2630 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2631 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2635 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2638 * Check the IOC type and return the appropriate MAC
2640 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2641 return ioc->attr->fcoe_mac;
2643 return ioc->attr->mac;
2647 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2651 m = ioc->attr->mfg_mac;
2652 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2653 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2655 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2656 bfa_ioc_pcifn(ioc));
2662 * Send AEN notification
2665 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2667 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2668 struct bfa_aen_entry_s *aen_entry;
2669 enum bfa_ioc_type_e ioc_type;
2671 bfad_get_aen_entry(bfad, aen_entry);
2675 ioc_type = bfa_ioc_get_type(ioc);
2677 case BFA_IOC_TYPE_FC:
2678 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2680 case BFA_IOC_TYPE_FCoE:
2681 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2682 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2684 case BFA_IOC_TYPE_LL:
2685 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2688 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2692 /* Send the AEN notification */
2693 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2694 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2695 BFA_AEN_CAT_IOC, event);
2699 * Retrieve saved firmware trace from a prior IOC failure.
2702 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2706 if (ioc->dbg_fwsave_len == 0)
2707 return BFA_STATUS_ENOFSAVE;
2710 if (tlen > ioc->dbg_fwsave_len)
2711 tlen = ioc->dbg_fwsave_len;
2713 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2715 return BFA_STATUS_OK;
2720 * Retrieve saved firmware trace from a prior IOC failure.
2723 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2725 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2727 bfa_status_t status;
2729 bfa_trc(ioc, *trclen);
2732 if (tlen > BFA_DBG_FWTRC_LEN)
2733 tlen = BFA_DBG_FWTRC_LEN;
2735 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2741 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2743 struct bfa_mbox_cmd_s cmd;
2744 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2746 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2747 bfa_ioc_portid(ioc));
2748 req->clscode = cpu_to_be16(ioc->clscode);
2749 bfa_ioc_mbox_queue(ioc, &cmd);
2753 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2755 u32 fwsync_iter = 1000;
2757 bfa_ioc_send_fwsync(ioc);
2760 * After sending a fw sync mbox command wait for it to
2761 * take effect. We will not wait for a response because
2762 * 1. fw_sync mbox cmd doesn't have a response.
2763 * 2. Even if we implement that, interrupts might not
2764 * be enabled when we call this function.
2765 * So, just keep checking if any mbox cmd is pending, and
2766 * after waiting for a reasonable amount of time, go ahead.
2767 * It is possible that fw has crashed and the mbox command
2768 * is never acknowledged.
2770 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2775 * Dump firmware smem
2778 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2779 u32 *offset, int *buflen)
2783 bfa_status_t status;
2784 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2786 if (*offset >= smem_len) {
2787 *offset = *buflen = 0;
2788 return BFA_STATUS_EINVAL;
2795 * First smem read, sync smem before proceeding
2796 * No need to sync before reading every chunk.
2799 bfa_ioc_fwsync(ioc);
2801 if ((loff + dlen) >= smem_len)
2802 dlen = smem_len - loff;
2804 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2806 if (status != BFA_STATUS_OK) {
2807 *offset = *buflen = 0;
2813 if (*offset >= smem_len)
2822 * Firmware statistics
2825 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2827 u32 loff = BFI_IOC_FWSTATS_OFF + \
2828 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2830 bfa_status_t status;
2832 if (ioc->stats_busy) {
2833 bfa_trc(ioc, ioc->stats_busy);
2834 return BFA_STATUS_DEVBUSY;
2836 ioc->stats_busy = BFA_TRUE;
2838 tlen = sizeof(struct bfa_fw_stats_s);
2839 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2841 ioc->stats_busy = BFA_FALSE;
2846 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2848 u32 loff = BFI_IOC_FWSTATS_OFF + \
2849 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2851 bfa_status_t status;
2853 if (ioc->stats_busy) {
2854 bfa_trc(ioc, ioc->stats_busy);
2855 return BFA_STATUS_DEVBUSY;
2857 ioc->stats_busy = BFA_TRUE;
2859 tlen = sizeof(struct bfa_fw_stats_s);
2860 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2862 ioc->stats_busy = BFA_FALSE;
2867 * Save firmware trace if configured.
2870 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2874 if (ioc->dbg_fwsave_once) {
2875 ioc->dbg_fwsave_once = BFA_FALSE;
2876 if (ioc->dbg_fwsave_len) {
2877 tlen = ioc->dbg_fwsave_len;
2878 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2884 * Firmware failure detected. Start recovery actions.
2887 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2889 bfa_ioc_stats(ioc, ioc_hbfails);
2890 ioc->stats.hb_count = ioc->hb_count;
2891 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2895 * BFA IOC PF private functions
2898 bfa_iocpf_timeout(void *ioc_arg)
2900 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2903 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2907 bfa_iocpf_sem_timeout(void *ioc_arg)
2909 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2911 bfa_ioc_hw_sem_get(ioc);
2915 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2917 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2919 bfa_trc(ioc, fwstate);
2921 if (fwstate == BFI_IOC_DISABLED) {
2922 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2926 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2927 bfa_iocpf_timeout(ioc);
2929 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2930 bfa_iocpf_poll_timer_start(ioc);
2935 bfa_iocpf_poll_timeout(void *ioc_arg)
2937 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2939 bfa_ioc_poll_fwinit(ioc);
2943 * bfa timer function
2946 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2948 struct list_head *qh = &mod->timer_q;
2949 struct list_head *qe, *qe_next;
2950 struct bfa_timer_s *elem;
2951 struct list_head timedout_q;
2953 INIT_LIST_HEAD(&timedout_q);
2955 qe = bfa_q_next(qh);
2958 qe_next = bfa_q_next(qe);
2960 elem = (struct bfa_timer_s *) qe;
2961 if (elem->timeout <= BFA_TIMER_FREQ) {
2963 list_del(&elem->qe);
2964 list_add_tail(&elem->qe, &timedout_q);
2966 elem->timeout -= BFA_TIMER_FREQ;
2969 qe = qe_next; /* go to next elem */
2973 * Pop all the timeout entries
2975 while (!list_empty(&timedout_q)) {
2976 bfa_q_deq(&timedout_q, &elem);
2977 elem->timercb(elem->arg);
2982 * Should be called with lock protection
2985 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2986 void (*timercb) (void *), void *arg, unsigned int timeout)
2989 WARN_ON(timercb == NULL);
2990 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2992 timer->timeout = timeout;
2993 timer->timercb = timercb;
2996 list_add_tail(&timer->qe, &mod->timer_q);
3000 * Should be called with lock protection
3003 bfa_timer_stop(struct bfa_timer_s *timer)
3005 WARN_ON(list_empty(&timer->qe));
3007 list_del(&timer->qe);
3011 * ASIC block related
3014 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3016 struct bfa_ablk_cfg_inst_s *cfg_inst;
3021 for (i = 0; i < BFA_ABLK_MAX; i++) {
3022 cfg_inst = &cfg->inst[i];
3023 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3024 be16 = cfg_inst->pf_cfg[j].pers;
3025 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3026 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3027 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3028 be16 = cfg_inst->pf_cfg[j].num_vectors;
3029 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3030 be32 = cfg_inst->pf_cfg[j].bw;
3031 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3037 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3039 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3040 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3041 bfa_ablk_cbfn_t cbfn;
3043 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3044 bfa_trc(ablk->ioc, msg->mh.msg_id);
3046 switch (msg->mh.msg_id) {
3047 case BFI_ABLK_I2H_QUERY:
3048 if (rsp->status == BFA_STATUS_OK) {
3049 memcpy(ablk->cfg, ablk->dma_addr.kva,
3050 sizeof(struct bfa_ablk_cfg_s));
3051 bfa_ablk_config_swap(ablk->cfg);
3056 case BFI_ABLK_I2H_ADPT_CONFIG:
3057 case BFI_ABLK_I2H_PORT_CONFIG:
3058 /* update config port mode */
3059 ablk->ioc->port_mode_cfg = rsp->port_mode;
3061 case BFI_ABLK_I2H_PF_DELETE:
3062 case BFI_ABLK_I2H_PF_UPDATE:
3063 case BFI_ABLK_I2H_OPTROM_ENABLE:
3064 case BFI_ABLK_I2H_OPTROM_DISABLE:
3068 case BFI_ABLK_I2H_PF_CREATE:
3069 *(ablk->pcifn) = rsp->pcifn;
3077 ablk->busy = BFA_FALSE;
3081 cbfn(ablk->cbarg, rsp->status);
3086 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3088 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3090 bfa_trc(ablk->ioc, event);
3093 case BFA_IOC_E_ENABLED:
3094 WARN_ON(ablk->busy != BFA_FALSE);
3097 case BFA_IOC_E_DISABLED:
3098 case BFA_IOC_E_FAILED:
3099 /* Fail any pending requests */
3103 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3105 ablk->busy = BFA_FALSE;
3116 bfa_ablk_meminfo(void)
3118 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3122 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3124 ablk->dma_addr.kva = dma_kva;
3125 ablk->dma_addr.pa = dma_pa;
3129 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3133 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3134 bfa_q_qe_init(&ablk->ioc_notify);
3135 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3136 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3140 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3141 bfa_ablk_cbfn_t cbfn, void *cbarg)
3143 struct bfi_ablk_h2i_query_s *m;
3147 if (!bfa_ioc_is_operational(ablk->ioc)) {
3148 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3149 return BFA_STATUS_IOC_FAILURE;
3153 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3154 return BFA_STATUS_DEVBUSY;
3157 ablk->cfg = ablk_cfg;
3159 ablk->cbarg = cbarg;
3160 ablk->busy = BFA_TRUE;
3162 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3163 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3164 bfa_ioc_portid(ablk->ioc));
3165 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3166 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3168 return BFA_STATUS_OK;
3172 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3173 u8 port, enum bfi_pcifn_class personality, int bw,
3174 bfa_ablk_cbfn_t cbfn, void *cbarg)
3176 struct bfi_ablk_h2i_pf_req_s *m;
3178 if (!bfa_ioc_is_operational(ablk->ioc)) {
3179 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3180 return BFA_STATUS_IOC_FAILURE;
3184 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3185 return BFA_STATUS_DEVBUSY;
3188 ablk->pcifn = pcifn;
3190 ablk->cbarg = cbarg;
3191 ablk->busy = BFA_TRUE;
3193 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3194 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3195 bfa_ioc_portid(ablk->ioc));
3196 m->pers = cpu_to_be16((u16)personality);
3197 m->bw = cpu_to_be32(bw);
3199 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3201 return BFA_STATUS_OK;
3205 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3206 bfa_ablk_cbfn_t cbfn, void *cbarg)
3208 struct bfi_ablk_h2i_pf_req_s *m;
3210 if (!bfa_ioc_is_operational(ablk->ioc)) {
3211 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3212 return BFA_STATUS_IOC_FAILURE;
3216 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3217 return BFA_STATUS_DEVBUSY;
3221 ablk->cbarg = cbarg;
3222 ablk->busy = BFA_TRUE;
3224 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3225 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3226 bfa_ioc_portid(ablk->ioc));
3227 m->pcifn = (u8)pcifn;
3228 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3230 return BFA_STATUS_OK;
3234 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3235 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3237 struct bfi_ablk_h2i_cfg_req_s *m;
3239 if (!bfa_ioc_is_operational(ablk->ioc)) {
3240 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3241 return BFA_STATUS_IOC_FAILURE;
3245 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3246 return BFA_STATUS_DEVBUSY;
3250 ablk->cbarg = cbarg;
3251 ablk->busy = BFA_TRUE;
3253 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3254 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3255 bfa_ioc_portid(ablk->ioc));
3257 m->max_pf = (u8)max_pf;
3258 m->max_vf = (u8)max_vf;
3259 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3261 return BFA_STATUS_OK;
3265 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3266 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3268 struct bfi_ablk_h2i_cfg_req_s *m;
3270 if (!bfa_ioc_is_operational(ablk->ioc)) {
3271 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3272 return BFA_STATUS_IOC_FAILURE;
3276 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3277 return BFA_STATUS_DEVBUSY;
3281 ablk->cbarg = cbarg;
3282 ablk->busy = BFA_TRUE;
3284 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3285 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3286 bfa_ioc_portid(ablk->ioc));
3289 m->max_pf = (u8)max_pf;
3290 m->max_vf = (u8)max_vf;
3291 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3293 return BFA_STATUS_OK;
3297 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3298 bfa_ablk_cbfn_t cbfn, void *cbarg)
3300 struct bfi_ablk_h2i_pf_req_s *m;
3302 if (!bfa_ioc_is_operational(ablk->ioc)) {
3303 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3304 return BFA_STATUS_IOC_FAILURE;
3308 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3309 return BFA_STATUS_DEVBUSY;
3313 ablk->cbarg = cbarg;
3314 ablk->busy = BFA_TRUE;
3316 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3317 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3318 bfa_ioc_portid(ablk->ioc));
3319 m->pcifn = (u8)pcifn;
3320 m->bw = cpu_to_be32(bw);
3321 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3323 return BFA_STATUS_OK;
3327 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3329 struct bfi_ablk_h2i_optrom_s *m;
3331 if (!bfa_ioc_is_operational(ablk->ioc)) {
3332 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3333 return BFA_STATUS_IOC_FAILURE;
3337 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3338 return BFA_STATUS_DEVBUSY;
3342 ablk->cbarg = cbarg;
3343 ablk->busy = BFA_TRUE;
3345 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3346 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3347 bfa_ioc_portid(ablk->ioc));
3348 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3350 return BFA_STATUS_OK;
3354 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3356 struct bfi_ablk_h2i_optrom_s *m;
3358 if (!bfa_ioc_is_operational(ablk->ioc)) {
3359 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3360 return BFA_STATUS_IOC_FAILURE;
3364 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3365 return BFA_STATUS_DEVBUSY;
3369 ablk->cbarg = cbarg;
3370 ablk->busy = BFA_TRUE;
3372 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3373 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3374 bfa_ioc_portid(ablk->ioc));
3375 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3377 return BFA_STATUS_OK;
3381 * SFP module specific
3384 /* forward declarations */
3385 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3386 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3387 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3388 enum bfa_port_speed portspeed);
3391 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3393 bfa_trc(sfp, sfp->lock);
3395 sfp->cbfn(sfp->cbarg, sfp->status);
3401 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3403 bfa_trc(sfp, sfp->portspeed);
3405 bfa_sfp_media_get(sfp);
3406 if (sfp->state_query_cbfn)
3407 sfp->state_query_cbfn(sfp->state_query_cbarg,
3412 if (sfp->portspeed) {
3413 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3414 if (sfp->state_query_cbfn)
3415 sfp->state_query_cbfn(sfp->state_query_cbarg,
3417 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3420 sfp->state_query_lock = 0;
3421 sfp->state_query_cbfn = NULL;
3425 * IOC event handler.
3428 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3430 struct bfa_sfp_s *sfp = sfp_arg;
3432 bfa_trc(sfp, event);
3433 bfa_trc(sfp, sfp->lock);
3434 bfa_trc(sfp, sfp->state_query_lock);
3437 case BFA_IOC_E_DISABLED:
3438 case BFA_IOC_E_FAILED:
3440 sfp->status = BFA_STATUS_IOC_FAILURE;
3441 bfa_cb_sfp_show(sfp);
3444 if (sfp->state_query_lock) {
3445 sfp->status = BFA_STATUS_IOC_FAILURE;
3446 bfa_cb_sfp_state_query(sfp);
3456 * SFP's State Change Notification post to AEN
3459 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3461 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3462 struct bfa_aen_entry_s *aen_entry;
3463 enum bfa_port_aen_event aen_evt = 0;
3465 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3468 bfad_get_aen_entry(bfad, aen_entry);
3472 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3473 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3474 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3476 switch (rsp->event) {
3477 case BFA_SFP_SCN_INSERTED:
3478 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3480 case BFA_SFP_SCN_REMOVED:
3481 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3483 case BFA_SFP_SCN_FAILED:
3484 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3486 case BFA_SFP_SCN_UNSUPPORT:
3487 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3489 case BFA_SFP_SCN_POM:
3490 aen_evt = BFA_PORT_AEN_SFP_POM;
3491 aen_entry->aen_data.port.level = rsp->pomlvl;
3494 bfa_trc(sfp, rsp->event);
3498 /* Send the AEN notification */
3499 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3500 BFA_AEN_CAT_PORT, aen_evt);
3507 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3509 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3511 bfa_trc(sfp, req->memtype);
3513 /* build host command */
3514 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3515 bfa_ioc_portid(sfp->ioc));
3518 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3522 * SFP is valid, read sfp data
3525 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3527 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3529 WARN_ON(sfp->lock != 0);
3530 bfa_trc(sfp, sfp->state);
3533 sfp->memtype = memtype;
3534 req->memtype = memtype;
3537 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3539 bfa_sfp_getdata_send(sfp);
3546 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3548 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3550 switch (rsp->event) {
3551 case BFA_SFP_SCN_INSERTED:
3552 sfp->state = BFA_SFP_STATE_INSERTED;
3553 sfp->data_valid = 0;
3554 bfa_sfp_scn_aen_post(sfp, rsp);
3556 case BFA_SFP_SCN_REMOVED:
3557 sfp->state = BFA_SFP_STATE_REMOVED;
3558 sfp->data_valid = 0;
3559 bfa_sfp_scn_aen_post(sfp, rsp);
3561 case BFA_SFP_SCN_FAILED:
3562 sfp->state = BFA_SFP_STATE_FAILED;
3563 sfp->data_valid = 0;
3564 bfa_sfp_scn_aen_post(sfp, rsp);
3566 case BFA_SFP_SCN_UNSUPPORT:
3567 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3568 bfa_sfp_scn_aen_post(sfp, rsp);
3570 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3572 case BFA_SFP_SCN_POM:
3573 bfa_sfp_scn_aen_post(sfp, rsp);
3575 case BFA_SFP_SCN_VALID:
3576 sfp->state = BFA_SFP_STATE_VALID;
3578 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3581 bfa_trc(sfp, rsp->event);
3590 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3592 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3596 * receiving response after ioc failure
3598 bfa_trc(sfp, sfp->lock);
3602 bfa_trc(sfp, rsp->status);
3603 if (rsp->status == BFA_STATUS_OK) {
3604 sfp->data_valid = 1;
3605 if (sfp->state == BFA_SFP_STATE_VALID)
3606 sfp->status = BFA_STATUS_OK;
3607 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3608 sfp->status = BFA_STATUS_SFP_UNSUPP;
3610 bfa_trc(sfp, sfp->state);
3612 sfp->data_valid = 0;
3613 sfp->status = rsp->status;
3614 /* sfpshow shouldn't change sfp state */
3617 bfa_trc(sfp, sfp->memtype);
3618 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3619 bfa_trc(sfp, sfp->data_valid);
3620 if (sfp->data_valid) {
3621 u32 size = sizeof(struct sfp_mem_s);
3622 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3623 memcpy(des, sfp->dbuf_kva, size);
3626 * Queue completion callback.
3628 bfa_cb_sfp_show(sfp);
3632 bfa_trc(sfp, sfp->state_query_lock);
3633 if (sfp->state_query_lock) {
3634 sfp->state = rsp->state;
3635 /* Complete callback */
3636 bfa_cb_sfp_state_query(sfp);
3641 * SFP query fw sfp state
3644 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3646 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3648 /* Should not be doing query if not in _INIT state */
3649 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3650 WARN_ON(sfp->state_query_lock != 0);
3651 bfa_trc(sfp, sfp->state);
3653 sfp->state_query_lock = 1;
3657 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3661 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3663 enum bfa_defs_sfp_media_e *media = sfp->media;
3665 *media = BFA_SFP_MEDIA_UNKNOWN;
3667 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3668 *media = BFA_SFP_MEDIA_UNSUPPORT;
3669 else if (sfp->state == BFA_SFP_STATE_VALID) {
3670 union sfp_xcvr_e10g_code_u e10g;
3671 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3672 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3673 (sfpmem->srlid_base.xcvr[5] >> 1);
3675 e10g.b = sfpmem->srlid_base.xcvr[0];
3676 bfa_trc(sfp, e10g.b);
3677 bfa_trc(sfp, xmtr_tech);
3678 /* check fc transmitter tech */
3679 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3680 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3681 (xmtr_tech & SFP_XMTR_TECH_CA))
3682 *media = BFA_SFP_MEDIA_CU;
3683 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3684 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3685 *media = BFA_SFP_MEDIA_EL;
3686 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3687 (xmtr_tech & SFP_XMTR_TECH_LC))
3688 *media = BFA_SFP_MEDIA_LW;
3689 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3690 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3691 (xmtr_tech & SFP_XMTR_TECH_SA))
3692 *media = BFA_SFP_MEDIA_SW;
3693 /* Check 10G Ethernet Compilance code */
3694 else if (e10g.r.e10g_sr)
3695 *media = BFA_SFP_MEDIA_SW;
3696 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3697 *media = BFA_SFP_MEDIA_LW;
3698 else if (e10g.r.e10g_unall)
3699 *media = BFA_SFP_MEDIA_UNKNOWN;
3703 bfa_trc(sfp, sfp->state);
3707 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3709 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3710 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3711 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3712 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3714 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3715 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3716 return BFA_STATUS_OK;
3718 bfa_trc(sfp, e10g.b);
3719 return BFA_STATUS_UNSUPP_SPEED;
3722 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3723 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3724 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3725 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3726 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3727 return BFA_STATUS_OK;
3729 bfa_trc(sfp, portspeed);
3730 bfa_trc(sfp, fc3.b);
3731 bfa_trc(sfp, e10g.b);
3732 return BFA_STATUS_UNSUPP_SPEED;
3740 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3742 struct bfa_sfp_s *sfp = sfparg;
3744 switch (msg->mh.msg_id) {
3745 case BFI_SFP_I2H_SHOW:
3746 bfa_sfp_show_comp(sfp, msg);
3749 case BFI_SFP_I2H_SCN:
3750 bfa_sfp_scn(sfp, msg);
3754 bfa_trc(sfp, msg->mh.msg_id);
3760 * Return DMA memory needed by sfp module.
3763 bfa_sfp_meminfo(void)
3765 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3769 * Attach virtual and physical memory for SFP.
3772 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3773 struct bfa_trc_mod_s *trcmod)
3777 sfp->trcmod = trcmod;
3783 sfp->data_valid = 0;
3784 sfp->state = BFA_SFP_STATE_INIT;
3785 sfp->state_query_lock = 0;
3786 sfp->state_query_cbfn = NULL;
3787 sfp->state_query_cbarg = NULL;
3789 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3790 sfp->is_elb = BFA_FALSE;
3792 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3793 bfa_q_qe_init(&sfp->ioc_notify);
3794 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3795 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3799 * Claim Memory for SFP
3802 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3804 sfp->dbuf_kva = dm_kva;
3805 sfp->dbuf_pa = dm_pa;
3806 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3808 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3809 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3813 * Show SFP eeprom content
3815 * @param[in] sfp - bfa sfp module
3817 * @param[out] sfpmem - sfp eeprom data
3821 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3822 bfa_cb_sfp_t cbfn, void *cbarg)
3825 if (!bfa_ioc_is_operational(sfp->ioc)) {
3827 return BFA_STATUS_IOC_NON_OP;
3832 return BFA_STATUS_DEVBUSY;
3837 sfp->sfpmem = sfpmem;
3839 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3840 return BFA_STATUS_OK;
3844 * Return SFP Media type
3846 * @param[in] sfp - bfa sfp module
3848 * @param[out] media - port speed from user
3852 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3853 bfa_cb_sfp_t cbfn, void *cbarg)
3855 if (!bfa_ioc_is_operational(sfp->ioc)) {
3857 return BFA_STATUS_IOC_NON_OP;
3861 if (sfp->state == BFA_SFP_STATE_INIT) {
3862 if (sfp->state_query_lock) {
3864 return BFA_STATUS_DEVBUSY;
3866 sfp->state_query_cbfn = cbfn;
3867 sfp->state_query_cbarg = cbarg;
3868 bfa_sfp_state_query(sfp);
3869 return BFA_STATUS_SFP_NOT_READY;
3873 bfa_sfp_media_get(sfp);
3874 return BFA_STATUS_OK;
3878 * Check if user set port speed is allowed by the SFP
3880 * @param[in] sfp - bfa sfp module
3881 * @param[in] portspeed - port speed from user
3885 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3886 bfa_cb_sfp_t cbfn, void *cbarg)
3888 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3890 if (!bfa_ioc_is_operational(sfp->ioc))
3891 return BFA_STATUS_IOC_NON_OP;
3893 /* For Mezz card, all speed is allowed */
3894 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3895 return BFA_STATUS_OK;
3897 /* Check SFP state */
3898 sfp->portspeed = portspeed;
3899 if (sfp->state == BFA_SFP_STATE_INIT) {
3900 if (sfp->state_query_lock) {
3902 return BFA_STATUS_DEVBUSY;
3904 sfp->state_query_cbfn = cbfn;
3905 sfp->state_query_cbarg = cbarg;
3906 bfa_sfp_state_query(sfp);
3907 return BFA_STATUS_SFP_NOT_READY;
3911 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3912 sfp->state == BFA_SFP_STATE_FAILED) {
3913 bfa_trc(sfp, sfp->state);
3914 return BFA_STATUS_NO_SFP_DEV;
3917 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3918 bfa_trc(sfp, sfp->state);
3919 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3922 /* For eloopback, all speed is allowed */
3924 return BFA_STATUS_OK;
3926 return bfa_sfp_speed_valid(sfp, portspeed);
3930 * Flash module specific
3934 * FLASH DMA buffer should be big enough to hold both MFG block and
3935 * asic block(64k) at the same time and also should be 2k aligned to
3936 * avoid write segement to cross sector boundary.
3938 #define BFA_FLASH_SEG_SZ 2048
3939 #define BFA_FLASH_DMA_BUF_SZ \
3940 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3943 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3946 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3947 struct bfa_aen_entry_s *aen_entry;
3949 bfad_get_aen_entry(bfad, aen_entry);
3953 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3954 aen_entry->aen_data.audit.partition_inst = inst;
3955 aen_entry->aen_data.audit.partition_type = type;
3957 /* Send the AEN notification */
3958 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3959 BFA_AEN_CAT_AUDIT, event);
3963 bfa_flash_cb(struct bfa_flash_s *flash)
3967 flash->cbfn(flash->cbarg, flash->status);
3971 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3973 struct bfa_flash_s *flash = cbarg;
3975 bfa_trc(flash, event);
3977 case BFA_IOC_E_DISABLED:
3978 case BFA_IOC_E_FAILED:
3979 if (flash->op_busy) {
3980 flash->status = BFA_STATUS_IOC_FAILURE;
3981 flash->cbfn(flash->cbarg, flash->status);
3992 * Send flash attribute query request.
3994 * @param[in] cbarg - callback argument
3997 bfa_flash_query_send(void *cbarg)
3999 struct bfa_flash_s *flash = cbarg;
4000 struct bfi_flash_query_req_s *msg =
4001 (struct bfi_flash_query_req_s *) flash->mb.msg;
4003 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4004 bfa_ioc_portid(flash->ioc));
4005 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4007 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4011 * Send flash write request.
4013 * @param[in] cbarg - callback argument
4016 bfa_flash_write_send(struct bfa_flash_s *flash)
4018 struct bfi_flash_write_req_s *msg =
4019 (struct bfi_flash_write_req_s *) flash->mb.msg;
4022 msg->type = be32_to_cpu(flash->type);
4023 msg->instance = flash->instance;
4024 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4025 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4026 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4027 msg->length = be32_to_cpu(len);
4029 /* indicate if it's the last msg of the whole write operation */
4030 msg->last = (len == flash->residue) ? 1 : 0;
4032 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4033 bfa_ioc_portid(flash->ioc));
4034 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4035 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4036 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4038 flash->residue -= len;
4039 flash->offset += len;
4043 * Send flash read request.
4045 * @param[in] cbarg - callback argument
4048 bfa_flash_read_send(void *cbarg)
4050 struct bfa_flash_s *flash = cbarg;
4051 struct bfi_flash_read_req_s *msg =
4052 (struct bfi_flash_read_req_s *) flash->mb.msg;
4055 msg->type = be32_to_cpu(flash->type);
4056 msg->instance = flash->instance;
4057 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4058 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4059 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4060 msg->length = be32_to_cpu(len);
4061 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4062 bfa_ioc_portid(flash->ioc));
4063 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4064 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4068 * Send flash erase request.
4070 * @param[in] cbarg - callback argument
4073 bfa_flash_erase_send(void *cbarg)
4075 struct bfa_flash_s *flash = cbarg;
4076 struct bfi_flash_erase_req_s *msg =
4077 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4079 msg->type = be32_to_cpu(flash->type);
4080 msg->instance = flash->instance;
4081 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4082 bfa_ioc_portid(flash->ioc));
4083 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4087 * Process flash response messages upon receiving interrupts.
4089 * @param[in] flasharg - flash structure
4090 * @param[in] msg - message structure
4093 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4095 struct bfa_flash_s *flash = flasharg;
4099 struct bfi_flash_query_rsp_s *query;
4100 struct bfi_flash_erase_rsp_s *erase;
4101 struct bfi_flash_write_rsp_s *write;
4102 struct bfi_flash_read_rsp_s *read;
4103 struct bfi_flash_event_s *event;
4104 struct bfi_mbmsg_s *msg;
4108 bfa_trc(flash, msg->mh.msg_id);
4110 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4111 /* receiving response after ioc failure */
4112 bfa_trc(flash, 0x9999);
4116 switch (msg->mh.msg_id) {
4117 case BFI_FLASH_I2H_QUERY_RSP:
4118 status = be32_to_cpu(m.query->status);
4119 bfa_trc(flash, status);
4120 if (status == BFA_STATUS_OK) {
4122 struct bfa_flash_attr_s *attr, *f;
4124 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4125 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4126 attr->status = be32_to_cpu(f->status);
4127 attr->npart = be32_to_cpu(f->npart);
4128 bfa_trc(flash, attr->status);
4129 bfa_trc(flash, attr->npart);
4130 for (i = 0; i < attr->npart; i++) {
4131 attr->part[i].part_type =
4132 be32_to_cpu(f->part[i].part_type);
4133 attr->part[i].part_instance =
4134 be32_to_cpu(f->part[i].part_instance);
4135 attr->part[i].part_off =
4136 be32_to_cpu(f->part[i].part_off);
4137 attr->part[i].part_size =
4138 be32_to_cpu(f->part[i].part_size);
4139 attr->part[i].part_len =
4140 be32_to_cpu(f->part[i].part_len);
4141 attr->part[i].part_status =
4142 be32_to_cpu(f->part[i].part_status);
4145 flash->status = status;
4146 bfa_flash_cb(flash);
4148 case BFI_FLASH_I2H_ERASE_RSP:
4149 status = be32_to_cpu(m.erase->status);
4150 bfa_trc(flash, status);
4151 flash->status = status;
4152 bfa_flash_cb(flash);
4154 case BFI_FLASH_I2H_WRITE_RSP:
4155 status = be32_to_cpu(m.write->status);
4156 bfa_trc(flash, status);
4157 if (status != BFA_STATUS_OK || flash->residue == 0) {
4158 flash->status = status;
4159 bfa_flash_cb(flash);
4161 bfa_trc(flash, flash->offset);
4162 bfa_flash_write_send(flash);
4165 case BFI_FLASH_I2H_READ_RSP:
4166 status = be32_to_cpu(m.read->status);
4167 bfa_trc(flash, status);
4168 if (status != BFA_STATUS_OK) {
4169 flash->status = status;
4170 bfa_flash_cb(flash);
4172 u32 len = be32_to_cpu(m.read->length);
4173 bfa_trc(flash, flash->offset);
4174 bfa_trc(flash, len);
4175 memcpy(flash->ubuf + flash->offset,
4176 flash->dbuf_kva, len);
4177 flash->residue -= len;
4178 flash->offset += len;
4179 if (flash->residue == 0) {
4180 flash->status = status;
4181 bfa_flash_cb(flash);
4183 bfa_flash_read_send(flash);
4186 case BFI_FLASH_I2H_BOOT_VER_RSP:
4188 case BFI_FLASH_I2H_EVENT:
4189 status = be32_to_cpu(m.event->status);
4190 bfa_trc(flash, status);
4191 if (status == BFA_STATUS_BAD_FWCFG)
4192 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4193 else if (status == BFA_STATUS_INVALID_VENDOR) {
4195 param = be32_to_cpu(m.event->param);
4196 bfa_trc(flash, param);
4197 bfa_ioc_aen_post(flash->ioc,
4198 BFA_IOC_AEN_INVALID_VENDOR);
4208 * Flash memory info API.
4210 * @param[in] mincfg - minimal cfg variable
4213 bfa_flash_meminfo(bfa_boolean_t mincfg)
4215 /* min driver doesn't need flash */
4218 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4224 * @param[in] flash - flash structure
4225 * @param[in] ioc - ioc structure
4226 * @param[in] dev - device structure
4227 * @param[in] trcmod - trace module
4228 * @param[in] logmod - log module
4231 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4232 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4235 flash->trcmod = trcmod;
4237 flash->cbarg = NULL;
4240 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4241 bfa_q_qe_init(&flash->ioc_notify);
4242 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4243 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4245 /* min driver doesn't need flash */
4247 flash->dbuf_kva = NULL;
4253 * Claim memory for flash
4255 * @param[in] flash - flash structure
4256 * @param[in] dm_kva - pointer to virtual memory address
4257 * @param[in] dm_pa - physical memory address
4258 * @param[in] mincfg - minimal cfg variable
4261 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4262 bfa_boolean_t mincfg)
4267 flash->dbuf_kva = dm_kva;
4268 flash->dbuf_pa = dm_pa;
4269 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4270 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4271 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4275 * Get flash attribute.
4277 * @param[in] flash - flash structure
4278 * @param[in] attr - flash attribute structure
4279 * @param[in] cbfn - callback function
4280 * @param[in] cbarg - callback argument
4285 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4286 bfa_cb_flash_t cbfn, void *cbarg)
4288 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4290 if (!bfa_ioc_is_operational(flash->ioc))
4291 return BFA_STATUS_IOC_NON_OP;
4293 if (flash->op_busy) {
4294 bfa_trc(flash, flash->op_busy);
4295 return BFA_STATUS_DEVBUSY;
4300 flash->cbarg = cbarg;
4301 flash->ubuf = (u8 *) attr;
4302 bfa_flash_query_send(flash);
4304 return BFA_STATUS_OK;
4308 * Erase flash partition.
4310 * @param[in] flash - flash structure
4311 * @param[in] type - flash partition type
4312 * @param[in] instance - flash partition instance
4313 * @param[in] cbfn - callback function
4314 * @param[in] cbarg - callback argument
4319 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4320 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4322 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4323 bfa_trc(flash, type);
4324 bfa_trc(flash, instance);
4326 if (!bfa_ioc_is_operational(flash->ioc))
4327 return BFA_STATUS_IOC_NON_OP;
4329 if (flash->op_busy) {
4330 bfa_trc(flash, flash->op_busy);
4331 return BFA_STATUS_DEVBUSY;
4336 flash->cbarg = cbarg;
4338 flash->instance = instance;
4340 bfa_flash_erase_send(flash);
4341 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4343 return BFA_STATUS_OK;
4347 * Update flash partition.
4349 * @param[in] flash - flash structure
4350 * @param[in] type - flash partition type
4351 * @param[in] instance - flash partition instance
4352 * @param[in] buf - update data buffer
4353 * @param[in] len - data buffer length
4354 * @param[in] offset - offset relative to the partition starting address
4355 * @param[in] cbfn - callback function
4356 * @param[in] cbarg - callback argument
4361 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4362 u8 instance, void *buf, u32 len, u32 offset,
4363 bfa_cb_flash_t cbfn, void *cbarg)
4365 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4366 bfa_trc(flash, type);
4367 bfa_trc(flash, instance);
4368 bfa_trc(flash, len);
4369 bfa_trc(flash, offset);
4371 if (!bfa_ioc_is_operational(flash->ioc))
4372 return BFA_STATUS_IOC_NON_OP;
4375 * 'len' must be in word (4-byte) boundary
4376 * 'offset' must be in sector (16kb) boundary
4378 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4379 return BFA_STATUS_FLASH_BAD_LEN;
4381 if (type == BFA_FLASH_PART_MFG)
4382 return BFA_STATUS_EINVAL;
4384 if (flash->op_busy) {
4385 bfa_trc(flash, flash->op_busy);
4386 return BFA_STATUS_DEVBUSY;
4391 flash->cbarg = cbarg;
4393 flash->instance = instance;
4394 flash->residue = len;
4396 flash->addr_off = offset;
4399 bfa_flash_write_send(flash);
4400 return BFA_STATUS_OK;
4404 * Read flash partition.
4406 * @param[in] flash - flash structure
4407 * @param[in] type - flash partition type
4408 * @param[in] instance - flash partition instance
4409 * @param[in] buf - read data buffer
4410 * @param[in] len - data buffer length
4411 * @param[in] offset - offset relative to the partition starting address
4412 * @param[in] cbfn - callback function
4413 * @param[in] cbarg - callback argument
4418 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4419 u8 instance, void *buf, u32 len, u32 offset,
4420 bfa_cb_flash_t cbfn, void *cbarg)
4422 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4423 bfa_trc(flash, type);
4424 bfa_trc(flash, instance);
4425 bfa_trc(flash, len);
4426 bfa_trc(flash, offset);
4428 if (!bfa_ioc_is_operational(flash->ioc))
4429 return BFA_STATUS_IOC_NON_OP;
4432 * 'len' must be in word (4-byte) boundary
4433 * 'offset' must be in sector (16kb) boundary
4435 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4436 return BFA_STATUS_FLASH_BAD_LEN;
4438 if (flash->op_busy) {
4439 bfa_trc(flash, flash->op_busy);
4440 return BFA_STATUS_DEVBUSY;
4445 flash->cbarg = cbarg;
4447 flash->instance = instance;
4448 flash->residue = len;
4450 flash->addr_off = offset;
4452 bfa_flash_read_send(flash);
4454 return BFA_STATUS_OK;
4458 * DIAG module specific
4461 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4462 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4464 /* IOC event handler */
4466 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4468 struct bfa_diag_s *diag = diag_arg;
4470 bfa_trc(diag, event);
4471 bfa_trc(diag, diag->block);
4472 bfa_trc(diag, diag->fwping.lock);
4473 bfa_trc(diag, diag->tsensor.lock);
4476 case BFA_IOC_E_DISABLED:
4477 case BFA_IOC_E_FAILED:
4478 if (diag->fwping.lock) {
4479 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4480 diag->fwping.cbfn(diag->fwping.cbarg,
4481 diag->fwping.status);
4482 diag->fwping.lock = 0;
4485 if (diag->tsensor.lock) {
4486 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4487 diag->tsensor.cbfn(diag->tsensor.cbarg,
4488 diag->tsensor.status);
4489 diag->tsensor.lock = 0;
4493 if (diag->timer_active) {
4494 bfa_timer_stop(&diag->timer);
4495 diag->timer_active = 0;
4498 diag->status = BFA_STATUS_IOC_FAILURE;
4499 diag->cbfn(diag->cbarg, diag->status);
4510 bfa_diag_memtest_done(void *cbarg)
4512 struct bfa_diag_s *diag = cbarg;
4513 struct bfa_ioc_s *ioc = diag->ioc;
4514 struct bfa_diag_memtest_result *res = diag->result;
4515 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4516 u32 pgnum, pgoff, i;
4518 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4519 pgoff = PSS_SMEM_PGOFF(loff);
4521 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4523 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4524 sizeof(u32)); i++) {
4525 /* read test result from smem */
4526 *((u32 *) res + i) =
4527 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4528 loff += sizeof(u32);
4531 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4532 bfa_ioc_reset_fwstate(ioc);
4534 res->status = swab32(res->status);
4535 bfa_trc(diag, res->status);
4537 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4538 diag->status = BFA_STATUS_OK;
4540 diag->status = BFA_STATUS_MEMTEST_FAILED;
4541 res->addr = swab32(res->addr);
4542 res->exp = swab32(res->exp);
4543 res->act = swab32(res->act);
4544 res->err_status = swab32(res->err_status);
4545 res->err_status1 = swab32(res->err_status1);
4546 res->err_addr = swab32(res->err_addr);
4547 bfa_trc(diag, res->addr);
4548 bfa_trc(diag, res->exp);
4549 bfa_trc(diag, res->act);
4550 bfa_trc(diag, res->err_status);
4551 bfa_trc(diag, res->err_status1);
4552 bfa_trc(diag, res->err_addr);
4554 diag->timer_active = 0;
4555 diag->cbfn(diag->cbarg, diag->status);
4564 * Perform DMA test directly
4567 diag_fwping_send(struct bfa_diag_s *diag)
4569 struct bfi_diag_fwping_req_s *fwping_req;
4572 bfa_trc(diag, diag->fwping.dbuf_pa);
4574 /* fill DMA area with pattern */
4575 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4576 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4579 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4582 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4583 diag->fwping.dbuf_pa);
4584 /* Set up dma count */
4585 fwping_req->count = cpu_to_be32(diag->fwping.count);
4586 /* Set up data pattern */
4587 fwping_req->data = diag->fwping.data;
4589 /* build host command */
4590 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4591 bfa_ioc_portid(diag->ioc));
4594 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4598 diag_fwping_comp(struct bfa_diag_s *diag,
4599 struct bfi_diag_fwping_rsp_s *diag_rsp)
4601 u32 rsp_data = diag_rsp->data;
4602 u8 rsp_dma_status = diag_rsp->dma_status;
4604 bfa_trc(diag, rsp_data);
4605 bfa_trc(diag, rsp_dma_status);
4607 if (rsp_dma_status == BFA_STATUS_OK) {
4609 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4611 /* Check mbox data */
4612 if (diag->fwping.data != rsp_data) {
4613 bfa_trc(diag, rsp_data);
4614 diag->fwping.result->dmastatus =
4615 BFA_STATUS_DATACORRUPTED;
4616 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4617 diag->fwping.cbfn(diag->fwping.cbarg,
4618 diag->fwping.status);
4619 diag->fwping.lock = 0;
4622 /* Check dma pattern */
4623 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4624 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4628 *((u32 *)diag->fwping.dbuf_kva + i));
4629 diag->fwping.result->dmastatus =
4630 BFA_STATUS_DATACORRUPTED;
4631 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4632 diag->fwping.cbfn(diag->fwping.cbarg,
4633 diag->fwping.status);
4634 diag->fwping.lock = 0;
4638 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4639 diag->fwping.status = BFA_STATUS_OK;
4640 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4641 diag->fwping.lock = 0;
4643 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4644 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4645 diag->fwping.lock = 0;
4650 * Temperature Sensor
4654 diag_tempsensor_send(struct bfa_diag_s *diag)
4656 struct bfi_diag_ts_req_s *msg;
4658 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4659 bfa_trc(diag, msg->temp);
4660 /* build host command */
4661 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4662 bfa_ioc_portid(diag->ioc));
4664 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4668 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4670 if (!diag->tsensor.lock) {
4671 /* receiving response after ioc failure */
4672 bfa_trc(diag, diag->tsensor.lock);
4677 * ASIC junction tempsensor is a reg read operation
4678 * it will always return OK
4680 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4681 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4682 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4683 diag->tsensor.temp->status = BFA_STATUS_OK;
4686 if (rsp->status == BFA_STATUS_OK) {
4687 diag->tsensor.temp->brd_temp =
4688 be16_to_cpu(rsp->brd_temp);
4690 bfa_trc(diag, rsp->status);
4691 diag->tsensor.temp->brd_temp = 0;
4692 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4695 bfa_trc(diag, rsp->ts_junc);
4696 bfa_trc(diag, rsp->temp);
4697 bfa_trc(diag, rsp->ts_brd);
4698 bfa_trc(diag, rsp->brd_temp);
4699 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4700 diag->tsensor.lock = 0;
4707 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4709 struct bfi_diag_ledtest_req_s *msg;
4711 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4712 /* build host command */
4713 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4714 bfa_ioc_portid(diag->ioc));
4717 * convert the freq from N blinks per 10 sec to
4718 * crossbow ontime value. We do it here because division is need
4721 ledtest->freq = 500 / ledtest->freq;
4723 if (ledtest->freq == 0)
4726 bfa_trc(diag, ledtest->freq);
4727 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4728 msg->cmd = (u8) ledtest->cmd;
4729 msg->color = (u8) ledtest->color;
4730 msg->portid = bfa_ioc_portid(diag->ioc);
4731 msg->led = ledtest->led;
4732 msg->freq = cpu_to_be16(ledtest->freq);
4735 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4739 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4741 bfa_trc(diag, diag->ledtest.lock);
4742 diag->ledtest.lock = BFA_FALSE;
4743 /* no bfa_cb_queue is needed because driver is not waiting */
4750 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4752 struct bfi_diag_portbeacon_req_s *msg;
4754 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4755 /* build host command */
4756 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4757 bfa_ioc_portid(diag->ioc));
4758 msg->beacon = beacon;
4759 msg->period = cpu_to_be32(sec);
4761 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4765 diag_portbeacon_comp(struct bfa_diag_s *diag)
4767 bfa_trc(diag, diag->beacon.state);
4768 diag->beacon.state = BFA_FALSE;
4769 if (diag->cbfn_beacon)
4770 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4774 * Diag hmbox handler
4777 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4779 struct bfa_diag_s *diag = diagarg;
4781 switch (msg->mh.msg_id) {
4782 case BFI_DIAG_I2H_PORTBEACON:
4783 diag_portbeacon_comp(diag);
4785 case BFI_DIAG_I2H_FWPING:
4786 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4788 case BFI_DIAG_I2H_TEMPSENSOR:
4789 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4791 case BFI_DIAG_I2H_LEDTEST:
4792 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4795 bfa_trc(diag, msg->mh.msg_id);
4803 * @param[in] *diag - diag data struct
4804 * @param[in] *memtest - mem test params input from upper layer,
4805 * @param[in] pattern - mem test pattern
4806 * @param[in] *result - mem test result
4807 * @param[in] cbfn - mem test callback functioin
4808 * @param[in] cbarg - callback functioin arg
4813 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4814 u32 pattern, struct bfa_diag_memtest_result *result,
4815 bfa_cb_diag_t cbfn, void *cbarg)
4819 bfa_trc(diag, pattern);
4821 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4822 return BFA_STATUS_ADAPTER_ENABLED;
4824 /* check to see if there is another destructive diag cmd running */
4826 bfa_trc(diag, diag->block);
4827 return BFA_STATUS_DEVBUSY;
4831 diag->result = result;
4833 diag->cbarg = cbarg;
4835 /* download memtest code and take LPU0 out of reset */
4836 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4838 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4839 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
4840 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4841 bfa_diag_memtest_done, diag, memtest_tov);
4842 diag->timer_active = 1;
4843 return BFA_STATUS_OK;
4847 * DIAG firmware ping command
4849 * @param[in] *diag - diag data struct
4850 * @param[in] cnt - dma loop count for testing PCIE
4851 * @param[in] data - data pattern to pass in fw
4852 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4853 * @param[in] cbfn - callback function
4854 * @param[in] *cbarg - callback functioin arg
4859 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4860 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4864 bfa_trc(diag, data);
4866 if (!bfa_ioc_is_operational(diag->ioc))
4867 return BFA_STATUS_IOC_NON_OP;
4869 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4870 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4871 return BFA_STATUS_CMD_NOTSUPP;
4873 /* check to see if there is another destructive diag cmd running */
4874 if (diag->block || diag->fwping.lock) {
4875 bfa_trc(diag, diag->block);
4876 bfa_trc(diag, diag->fwping.lock);
4877 return BFA_STATUS_DEVBUSY;
4880 /* Initialization */
4881 diag->fwping.lock = 1;
4882 diag->fwping.cbfn = cbfn;
4883 diag->fwping.cbarg = cbarg;
4884 diag->fwping.result = result;
4885 diag->fwping.data = data;
4886 diag->fwping.count = cnt;
4888 /* Init test results */
4889 diag->fwping.result->data = 0;
4890 diag->fwping.result->status = BFA_STATUS_OK;
4892 /* kick off the first ping */
4893 diag_fwping_send(diag);
4894 return BFA_STATUS_OK;
4898 * Read Temperature Sensor
4900 * @param[in] *diag - diag data struct
4901 * @param[in] *result - pt to bfa_diag_temp_t data struct
4902 * @param[in] cbfn - callback function
4903 * @param[in] *cbarg - callback functioin arg
4908 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4909 struct bfa_diag_results_tempsensor_s *result,
4910 bfa_cb_diag_t cbfn, void *cbarg)
4912 /* check to see if there is a destructive diag cmd running */
4913 if (diag->block || diag->tsensor.lock) {
4914 bfa_trc(diag, diag->block);
4915 bfa_trc(diag, diag->tsensor.lock);
4916 return BFA_STATUS_DEVBUSY;
4919 if (!bfa_ioc_is_operational(diag->ioc))
4920 return BFA_STATUS_IOC_NON_OP;
4922 /* Init diag mod params */
4923 diag->tsensor.lock = 1;
4924 diag->tsensor.temp = result;
4925 diag->tsensor.cbfn = cbfn;
4926 diag->tsensor.cbarg = cbarg;
4928 /* Send msg to fw */
4929 diag_tempsensor_send(diag);
4931 return BFA_STATUS_OK;
4937 * @param[in] *diag - diag data struct
4938 * @param[in] *ledtest - pt to ledtest data structure
4943 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4945 bfa_trc(diag, ledtest->cmd);
4947 if (!bfa_ioc_is_operational(diag->ioc))
4948 return BFA_STATUS_IOC_NON_OP;
4950 if (diag->beacon.state)
4951 return BFA_STATUS_BEACON_ON;
4953 if (diag->ledtest.lock)
4954 return BFA_STATUS_LEDTEST_OP;
4956 /* Send msg to fw */
4957 diag->ledtest.lock = BFA_TRUE;
4958 diag_ledtest_send(diag, ledtest);
4960 return BFA_STATUS_OK;
4964 * Port beaconing command
4966 * @param[in] *diag - diag data struct
4967 * @param[in] beacon - port beaconing 1:ON 0:OFF
4968 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4969 * @param[in] sec - beaconing duration in seconds
4974 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4975 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4977 bfa_trc(diag, beacon);
4978 bfa_trc(diag, link_e2e_beacon);
4981 if (!bfa_ioc_is_operational(diag->ioc))
4982 return BFA_STATUS_IOC_NON_OP;
4984 if (diag->ledtest.lock)
4985 return BFA_STATUS_LEDTEST_OP;
4987 if (diag->beacon.state && beacon) /* beacon alread on */
4988 return BFA_STATUS_BEACON_ON;
4990 diag->beacon.state = beacon;
4991 diag->beacon.link_e2e = link_e2e_beacon;
4992 if (diag->cbfn_beacon)
4993 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4995 /* Send msg to fw */
4996 diag_portbeacon_send(diag, beacon, sec);
4998 return BFA_STATUS_OK;
5002 * Return DMA memory needed by diag module.
5005 bfa_diag_meminfo(void)
5007 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5011 * Attach virtual and physical memory for Diag.
5014 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5015 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5019 diag->trcmod = trcmod;
5024 diag->result = NULL;
5025 diag->cbfn_beacon = cbfn_beacon;
5027 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5028 bfa_q_qe_init(&diag->ioc_notify);
5029 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5030 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5034 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5036 diag->fwping.dbuf_kva = dm_kva;
5037 diag->fwping.dbuf_pa = dm_pa;
5038 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5042 * PHY module specific
5044 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5045 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5048 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5052 for (i = 0; i < m; i++)
5053 obuf[i] = be32_to_cpu(ibuf[i]);
5056 static bfa_boolean_t
5057 bfa_phy_present(struct bfa_phy_s *phy)
5059 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5063 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5065 struct bfa_phy_s *phy = cbarg;
5067 bfa_trc(phy, event);
5070 case BFA_IOC_E_DISABLED:
5071 case BFA_IOC_E_FAILED:
5073 phy->status = BFA_STATUS_IOC_FAILURE;
5074 phy->cbfn(phy->cbarg, phy->status);
5085 * Send phy attribute query request.
5087 * @param[in] cbarg - callback argument
5090 bfa_phy_query_send(void *cbarg)
5092 struct bfa_phy_s *phy = cbarg;
5093 struct bfi_phy_query_req_s *msg =
5094 (struct bfi_phy_query_req_s *) phy->mb.msg;
5096 msg->instance = phy->instance;
5097 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5098 bfa_ioc_portid(phy->ioc));
5099 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5100 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5104 * Send phy write request.
5106 * @param[in] cbarg - callback argument
5109 bfa_phy_write_send(void *cbarg)
5111 struct bfa_phy_s *phy = cbarg;
5112 struct bfi_phy_write_req_s *msg =
5113 (struct bfi_phy_write_req_s *) phy->mb.msg;
5118 msg->instance = phy->instance;
5119 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5120 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5121 phy->residue : BFA_PHY_DMA_BUF_SZ;
5122 msg->length = cpu_to_be32(len);
5124 /* indicate if it's the last msg of the whole write operation */
5125 msg->last = (len == phy->residue) ? 1 : 0;
5127 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5128 bfa_ioc_portid(phy->ioc));
5129 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5131 buf = (u16 *) (phy->ubuf + phy->offset);
5132 dbuf = (u16 *)phy->dbuf_kva;
5134 for (i = 0; i < sz; i++)
5135 buf[i] = cpu_to_be16(dbuf[i]);
5137 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5139 phy->residue -= len;
5144 * Send phy read request.
5146 * @param[in] cbarg - callback argument
5149 bfa_phy_read_send(void *cbarg)
5151 struct bfa_phy_s *phy = cbarg;
5152 struct bfi_phy_read_req_s *msg =
5153 (struct bfi_phy_read_req_s *) phy->mb.msg;
5156 msg->instance = phy->instance;
5157 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5158 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5159 phy->residue : BFA_PHY_DMA_BUF_SZ;
5160 msg->length = cpu_to_be32(len);
5161 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5162 bfa_ioc_portid(phy->ioc));
5163 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5164 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5168 * Send phy stats request.
5170 * @param[in] cbarg - callback argument
5173 bfa_phy_stats_send(void *cbarg)
5175 struct bfa_phy_s *phy = cbarg;
5176 struct bfi_phy_stats_req_s *msg =
5177 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5179 msg->instance = phy->instance;
5180 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5181 bfa_ioc_portid(phy->ioc));
5182 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5183 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5187 * Flash memory info API.
5189 * @param[in] mincfg - minimal cfg variable
5192 bfa_phy_meminfo(bfa_boolean_t mincfg)
5194 /* min driver doesn't need phy */
5198 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5204 * @param[in] phy - phy structure
5205 * @param[in] ioc - ioc structure
5206 * @param[in] dev - device structure
5207 * @param[in] trcmod - trace module
5208 * @param[in] logmod - log module
5211 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5212 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5215 phy->trcmod = trcmod;
5220 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5221 bfa_q_qe_init(&phy->ioc_notify);
5222 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5223 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5225 /* min driver doesn't need phy */
5227 phy->dbuf_kva = NULL;
5233 * Claim memory for phy
5235 * @param[in] phy - phy structure
5236 * @param[in] dm_kva - pointer to virtual memory address
5237 * @param[in] dm_pa - physical memory address
5238 * @param[in] mincfg - minimal cfg variable
5241 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5242 bfa_boolean_t mincfg)
5247 phy->dbuf_kva = dm_kva;
5248 phy->dbuf_pa = dm_pa;
5249 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5250 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5251 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5255 bfa_phy_busy(struct bfa_ioc_s *ioc)
5259 rb = bfa_ioc_bar0(ioc);
5260 return readl(rb + BFA_PHY_LOCK_STATUS);
5264 * Get phy attribute.
5266 * @param[in] phy - phy structure
5267 * @param[in] attr - phy attribute structure
5268 * @param[in] cbfn - callback function
5269 * @param[in] cbarg - callback argument
5274 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5275 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5277 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5278 bfa_trc(phy, instance);
5280 if (!bfa_phy_present(phy))
5281 return BFA_STATUS_PHY_NOT_PRESENT;
5283 if (!bfa_ioc_is_operational(phy->ioc))
5284 return BFA_STATUS_IOC_NON_OP;
5286 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5287 bfa_trc(phy, phy->op_busy);
5288 return BFA_STATUS_DEVBUSY;
5294 phy->instance = instance;
5295 phy->ubuf = (uint8_t *) attr;
5296 bfa_phy_query_send(phy);
5298 return BFA_STATUS_OK;
5304 * @param[in] phy - phy structure
5305 * @param[in] instance - phy image instance
5306 * @param[in] stats - pointer to phy stats
5307 * @param[in] cbfn - callback function
5308 * @param[in] cbarg - callback argument
5313 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5314 struct bfa_phy_stats_s *stats,
5315 bfa_cb_phy_t cbfn, void *cbarg)
5317 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5318 bfa_trc(phy, instance);
5320 if (!bfa_phy_present(phy))
5321 return BFA_STATUS_PHY_NOT_PRESENT;
5323 if (!bfa_ioc_is_operational(phy->ioc))
5324 return BFA_STATUS_IOC_NON_OP;
5326 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5327 bfa_trc(phy, phy->op_busy);
5328 return BFA_STATUS_DEVBUSY;
5334 phy->instance = instance;
5335 phy->ubuf = (u8 *) stats;
5336 bfa_phy_stats_send(phy);
5338 return BFA_STATUS_OK;
5344 * @param[in] phy - phy structure
5345 * @param[in] instance - phy image instance
5346 * @param[in] buf - update data buffer
5347 * @param[in] len - data buffer length
5348 * @param[in] offset - offset relative to starting address
5349 * @param[in] cbfn - callback function
5350 * @param[in] cbarg - callback argument
5355 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5356 void *buf, u32 len, u32 offset,
5357 bfa_cb_phy_t cbfn, void *cbarg)
5359 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5360 bfa_trc(phy, instance);
5362 bfa_trc(phy, offset);
5364 if (!bfa_phy_present(phy))
5365 return BFA_STATUS_PHY_NOT_PRESENT;
5367 if (!bfa_ioc_is_operational(phy->ioc))
5368 return BFA_STATUS_IOC_NON_OP;
5370 /* 'len' must be in word (4-byte) boundary */
5371 if (!len || (len & 0x03))
5372 return BFA_STATUS_FAILED;
5374 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5375 bfa_trc(phy, phy->op_busy);
5376 return BFA_STATUS_DEVBUSY;
5382 phy->instance = instance;
5385 phy->addr_off = offset;
5388 bfa_phy_write_send(phy);
5389 return BFA_STATUS_OK;
5395 * @param[in] phy - phy structure
5396 * @param[in] instance - phy image instance
5397 * @param[in] buf - read data buffer
5398 * @param[in] len - data buffer length
5399 * @param[in] offset - offset relative to starting address
5400 * @param[in] cbfn - callback function
5401 * @param[in] cbarg - callback argument
5406 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5407 void *buf, u32 len, u32 offset,
5408 bfa_cb_phy_t cbfn, void *cbarg)
5410 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5411 bfa_trc(phy, instance);
5413 bfa_trc(phy, offset);
5415 if (!bfa_phy_present(phy))
5416 return BFA_STATUS_PHY_NOT_PRESENT;
5418 if (!bfa_ioc_is_operational(phy->ioc))
5419 return BFA_STATUS_IOC_NON_OP;
5421 /* 'len' must be in word (4-byte) boundary */
5422 if (!len || (len & 0x03))
5423 return BFA_STATUS_FAILED;
5425 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5426 bfa_trc(phy, phy->op_busy);
5427 return BFA_STATUS_DEVBUSY;
5433 phy->instance = instance;
5436 phy->addr_off = offset;
5438 bfa_phy_read_send(phy);
5440 return BFA_STATUS_OK;
5444 * Process phy response messages upon receiving interrupts.
5446 * @param[in] phyarg - phy structure
5447 * @param[in] msg - message structure
5450 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5452 struct bfa_phy_s *phy = phyarg;
5456 struct bfi_phy_query_rsp_s *query;
5457 struct bfi_phy_stats_rsp_s *stats;
5458 struct bfi_phy_write_rsp_s *write;
5459 struct bfi_phy_read_rsp_s *read;
5460 struct bfi_mbmsg_s *msg;
5464 bfa_trc(phy, msg->mh.msg_id);
5466 if (!phy->op_busy) {
5467 /* receiving response after ioc failure */
5468 bfa_trc(phy, 0x9999);
5472 switch (msg->mh.msg_id) {
5473 case BFI_PHY_I2H_QUERY_RSP:
5474 status = be32_to_cpu(m.query->status);
5475 bfa_trc(phy, status);
5477 if (status == BFA_STATUS_OK) {
5478 struct bfa_phy_attr_s *attr =
5479 (struct bfa_phy_attr_s *) phy->ubuf;
5480 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5481 sizeof(struct bfa_phy_attr_s));
5482 bfa_trc(phy, attr->status);
5483 bfa_trc(phy, attr->length);
5486 phy->status = status;
5489 phy->cbfn(phy->cbarg, phy->status);
5491 case BFI_PHY_I2H_STATS_RSP:
5492 status = be32_to_cpu(m.stats->status);
5493 bfa_trc(phy, status);
5495 if (status == BFA_STATUS_OK) {
5496 struct bfa_phy_stats_s *stats =
5497 (struct bfa_phy_stats_s *) phy->ubuf;
5498 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5499 sizeof(struct bfa_phy_stats_s));
5500 bfa_trc(phy, stats->status);
5503 phy->status = status;
5506 phy->cbfn(phy->cbarg, phy->status);
5508 case BFI_PHY_I2H_WRITE_RSP:
5509 status = be32_to_cpu(m.write->status);
5510 bfa_trc(phy, status);
5512 if (status != BFA_STATUS_OK || phy->residue == 0) {
5513 phy->status = status;
5516 phy->cbfn(phy->cbarg, phy->status);
5518 bfa_trc(phy, phy->offset);
5519 bfa_phy_write_send(phy);
5522 case BFI_PHY_I2H_READ_RSP:
5523 status = be32_to_cpu(m.read->status);
5524 bfa_trc(phy, status);
5526 if (status != BFA_STATUS_OK) {
5527 phy->status = status;
5530 phy->cbfn(phy->cbarg, phy->status);
5532 u32 len = be32_to_cpu(m.read->length);
5533 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5534 u16 *dbuf = (u16 *)phy->dbuf_kva;
5535 int i, sz = len >> 1;
5537 bfa_trc(phy, phy->offset);
5540 for (i = 0; i < sz; i++)
5541 buf[i] = be16_to_cpu(dbuf[i]);
5543 phy->residue -= len;
5546 if (phy->residue == 0) {
5547 phy->status = status;
5550 phy->cbfn(phy->cbarg, phy->status);
5552 bfa_phy_read_send(phy);
5561 * DCONF module specific
5567 * DCONF state machine events
5569 enum bfa_dconf_event {
5570 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5571 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5572 BFA_DCONF_SM_WR = 3, /* binding change, map */
5573 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5574 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5575 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5578 /* forward declaration of DCONF state machine */
5579 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5580 enum bfa_dconf_event event);
5581 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5582 enum bfa_dconf_event event);
5583 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5584 enum bfa_dconf_event event);
5585 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5586 enum bfa_dconf_event event);
5587 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5588 enum bfa_dconf_event event);
5589 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5590 enum bfa_dconf_event event);
5591 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5592 enum bfa_dconf_event event);
5594 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5595 static void bfa_dconf_timer(void *cbarg);
5596 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5597 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5600 * Beginning state of dconf module. Waiting for an event to start.
5603 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5605 bfa_status_t bfa_status;
5606 bfa_trc(dconf->bfa, event);
5609 case BFA_DCONF_SM_INIT:
5610 if (dconf->min_cfg) {
5611 bfa_trc(dconf->bfa, dconf->min_cfg);
5612 bfa_fsm_send_event(&dconf->bfa->iocfc,
5613 IOCFC_E_DCONF_DONE);
5616 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5617 bfa_timer_start(dconf->bfa, &dconf->timer,
5618 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5619 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5620 BFA_FLASH_PART_DRV, dconf->instance,
5622 sizeof(struct bfa_dconf_s), 0,
5623 bfa_dconf_init_cb, dconf->bfa);
5624 if (bfa_status != BFA_STATUS_OK) {
5625 bfa_timer_stop(&dconf->timer);
5626 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5627 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5631 case BFA_DCONF_SM_EXIT:
5632 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5633 case BFA_DCONF_SM_IOCDISABLE:
5634 case BFA_DCONF_SM_WR:
5635 case BFA_DCONF_SM_FLASH_COMP:
5638 bfa_sm_fault(dconf->bfa, event);
5643 * Read flash for dconf entries and make a call back to the driver once done.
5646 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5647 enum bfa_dconf_event event)
5649 bfa_trc(dconf->bfa, event);
5652 case BFA_DCONF_SM_FLASH_COMP:
5653 bfa_timer_stop(&dconf->timer);
5654 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5656 case BFA_DCONF_SM_TIMEOUT:
5657 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5658 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
5660 case BFA_DCONF_SM_EXIT:
5661 bfa_timer_stop(&dconf->timer);
5662 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5663 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5665 case BFA_DCONF_SM_IOCDISABLE:
5666 bfa_timer_stop(&dconf->timer);
5667 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5670 bfa_sm_fault(dconf->bfa, event);
5675 * DCONF Module is in ready state. Has completed the initialization.
5678 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5680 bfa_trc(dconf->bfa, event);
5683 case BFA_DCONF_SM_WR:
5684 bfa_timer_start(dconf->bfa, &dconf->timer,
5685 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5686 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5688 case BFA_DCONF_SM_EXIT:
5689 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5690 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5692 case BFA_DCONF_SM_INIT:
5693 case BFA_DCONF_SM_IOCDISABLE:
5696 bfa_sm_fault(dconf->bfa, event);
5701 * entries are dirty, write back to the flash.
5705 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5707 bfa_trc(dconf->bfa, event);
5710 case BFA_DCONF_SM_TIMEOUT:
5711 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5712 bfa_dconf_flash_write(dconf);
5714 case BFA_DCONF_SM_WR:
5715 bfa_timer_stop(&dconf->timer);
5716 bfa_timer_start(dconf->bfa, &dconf->timer,
5717 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5719 case BFA_DCONF_SM_EXIT:
5720 bfa_timer_stop(&dconf->timer);
5721 bfa_timer_start(dconf->bfa, &dconf->timer,
5722 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5723 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5724 bfa_dconf_flash_write(dconf);
5726 case BFA_DCONF_SM_FLASH_COMP:
5728 case BFA_DCONF_SM_IOCDISABLE:
5729 bfa_timer_stop(&dconf->timer);
5730 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5733 bfa_sm_fault(dconf->bfa, event);
5738 * Sync the dconf entries to the flash.
5741 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5742 enum bfa_dconf_event event)
5744 bfa_trc(dconf->bfa, event);
5747 case BFA_DCONF_SM_IOCDISABLE:
5748 case BFA_DCONF_SM_FLASH_COMP:
5749 bfa_timer_stop(&dconf->timer);
5750 case BFA_DCONF_SM_TIMEOUT:
5751 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5752 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5755 bfa_sm_fault(dconf->bfa, event);
5760 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5762 bfa_trc(dconf->bfa, event);
5765 case BFA_DCONF_SM_FLASH_COMP:
5766 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5768 case BFA_DCONF_SM_WR:
5769 bfa_timer_start(dconf->bfa, &dconf->timer,
5770 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5771 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5773 case BFA_DCONF_SM_EXIT:
5774 bfa_timer_start(dconf->bfa, &dconf->timer,
5775 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5778 case BFA_DCONF_SM_IOCDISABLE:
5779 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5782 bfa_sm_fault(dconf->bfa, event);
5787 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5788 enum bfa_dconf_event event)
5790 bfa_trc(dconf->bfa, event);
5793 case BFA_DCONF_SM_INIT:
5794 bfa_timer_start(dconf->bfa, &dconf->timer,
5795 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5796 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5798 case BFA_DCONF_SM_EXIT:
5799 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5800 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5802 case BFA_DCONF_SM_IOCDISABLE:
5805 bfa_sm_fault(dconf->bfa, event);
5810 * Compute and return memory needed by DRV_CFG module.
5813 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5816 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5818 if (cfg->drvcfg.min_cfg)
5819 bfa_mem_kva_setup(meminfo, dconf_kva,
5820 sizeof(struct bfa_dconf_hdr_s));
5822 bfa_mem_kva_setup(meminfo, dconf_kva,
5823 sizeof(struct bfa_dconf_s));
5827 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5828 struct bfa_pcidev_s *pcidev)
5830 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5834 dconf->instance = bfa->ioc.port_id;
5835 bfa_trc(bfa, dconf->instance);
5837 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5838 if (cfg->drvcfg.min_cfg) {
5839 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5840 dconf->min_cfg = BFA_TRUE;
5842 dconf->min_cfg = BFA_FALSE;
5843 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5846 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5847 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5851 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5853 struct bfa_s *bfa = arg;
5854 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5856 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5857 if (status == BFA_STATUS_OK) {
5858 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5859 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5860 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5861 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5862 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5864 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5868 bfa_dconf_modinit(struct bfa_s *bfa)
5870 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5871 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5874 bfa_dconf_start(struct bfa_s *bfa)
5879 bfa_dconf_stop(struct bfa_s *bfa)
5883 static void bfa_dconf_timer(void *cbarg)
5885 struct bfa_dconf_mod_s *dconf = cbarg;
5886 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5889 bfa_dconf_iocdisable(struct bfa_s *bfa)
5891 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5892 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5896 bfa_dconf_detach(struct bfa_s *bfa)
5901 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5903 bfa_status_t bfa_status;
5904 bfa_trc(dconf->bfa, 0);
5906 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5907 BFA_FLASH_PART_DRV, dconf->instance,
5908 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5909 bfa_dconf_cbfn, dconf);
5910 if (bfa_status != BFA_STATUS_OK)
5911 WARN_ON(bfa_status);
5912 bfa_trc(dconf->bfa, bfa_status);
5918 bfa_dconf_update(struct bfa_s *bfa)
5920 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5921 bfa_trc(dconf->bfa, 0);
5922 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5923 return BFA_STATUS_FAILED;
5925 if (dconf->min_cfg) {
5926 bfa_trc(dconf->bfa, dconf->min_cfg);
5927 return BFA_STATUS_FAILED;
5930 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5931 return BFA_STATUS_OK;
5935 bfa_dconf_cbfn(void *arg, bfa_status_t status)
5937 struct bfa_dconf_mod_s *dconf = arg;
5939 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5943 bfa_dconf_modexit(struct bfa_s *bfa)
5945 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5946 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);