]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bna/bfa_ioc.c
bna: scope and dead code cleanup
[net-next-2.6.git] / drivers / net / bna / bfa_ioc.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52#define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54#define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
60
61#define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
63
64#define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67
b7ee31c5 68static bool bfa_nw_auto_recover = true;
8b230ed8
RM
69
70/*
71 * forward declarations
72 */
73static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84static void bfa_ioc_recover(struct bfa_ioc *ioc);
85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
8a891429
RM
88static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
8b230ed8
RM
105
106/**
107 * IOC state machine events
108 */
109enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
121};
122
123bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
135
136static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149};
150
151/**
152 * Reset entry actions -- initialize state machine
153 */
154static void
155bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
156{
157 ioc->retry_count = 0;
8a891429 158 ioc->auto_recover = bfa_nw_auto_recover;
8b230ed8
RM
159}
160
161/**
162 * Beginning state. IOC is in reset state.
163 */
164static void
165bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
166{
167 switch (event) {
168 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
170 break;
171
172 case IOC_E_DISABLE:
173 bfa_ioc_disable_comp(ioc);
174 break;
175
176 case IOC_E_DETACH:
177 break;
178
179 default:
180 bfa_sm_fault(ioc, event);
181 }
182}
183
184/**
185 * Semaphore should be acquired for version check.
186 */
187static void
188bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
189{
190 bfa_ioc_hw_sem_get(ioc);
191}
192
193/**
194 * Awaiting h/w semaphore to continue with version check.
195 */
196static void
197bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
198{
199 switch (event) {
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
204 } else {
8a891429 205 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
207 }
208 break;
209
210 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc);
212 /* fall through */
213
214 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 break;
218
219 case IOC_E_FWREADY:
220 break;
221
222 default:
223 bfa_sm_fault(ioc, event);
224 }
225}
226
227/**
228 * Notify enable completion callback and generate mismatch AEN.
229 */
230static void
231bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
232{
233 /**
234 * Provide enable completion callback and AEN notification only once.
235 */
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
240}
241
242/**
243 * Awaiting firmware version match.
244 */
245static void
246bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
247{
248 switch (event) {
249 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 break;
252
253 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc);
255 /* fall through */
256
257 case IOC_E_DETACH:
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
260 break;
261
262 case IOC_E_FWREADY:
263 break;
264
265 default:
266 bfa_sm_fault(ioc, event);
267 }
268}
269
270/**
271 * Request for semaphore.
272 */
273static void
274bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
275{
276 bfa_ioc_hw_sem_get(ioc);
277}
278
279/**
280 * Awaiting semaphore for h/w initialzation.
281 */
282static void
283bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
284{
285 switch (event) {
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break;
290
291 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
294 break;
295
296 default:
297 bfa_sm_fault(ioc, event);
298 }
299}
300
301static void
302bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
303{
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
306}
307
308/**
309 * @brief
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
312 */
313static void
314bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
315{
316 switch (event) {
317 case IOC_E_FWREADY:
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break;
321
322 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc);
324 /* fall through */
325
326 case IOC_E_TIMEOUT:
327 ioc->retry_count++;
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
332 }
333
8a891429 334 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break;
337
338 case IOC_E_DISABLE:
8a891429 339 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347}
348
349static void
350bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
351{
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
354}
355
356/**
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
359 */
360static void
361bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
362{
363 switch (event) {
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
8a891429 366 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
368 break;
369
370 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc);
372 /* fall through */
373
374 case IOC_E_TIMEOUT:
375 ioc->retry_count++;
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
380 break;
381 }
382
8a891429 383 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_timer_stop(ioc);
8a891429 389 bfa_nw_ioc_hw_sem_release(ioc);
8b230ed8
RM
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
391 break;
392
393 case IOC_E_FWREADY:
394 bfa_ioc_send_enable(ioc);
395 break;
396
397 default:
398 bfa_sm_fault(ioc, event);
399 }
400}
401
402static void
403bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
404{
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
407}
408
409/**
410 * @brief
411 * IOC configuration in progress. Timer is active.
412 */
413static void
414bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
415{
416 switch (event) {
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
421 break;
422
423 case IOC_E_HWERROR:
424 bfa_ioc_timer_stop(ioc);
425 /* fall through */
426
427 case IOC_E_TIMEOUT:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
429 break;
430
431 case IOC_E_DISABLE:
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439}
440
441static void
442bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
443{
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
446}
447
448static void
449bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
450{
451 switch (event) {
452 case IOC_E_ENABLE:
453 break;
454
455 case IOC_E_DISABLE:
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break;
459
460 case IOC_E_HWERROR:
461 case IOC_E_FWREADY:
462 /**
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
465 */
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
468
469 case IOC_E_HBFAIL:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
471 break;
472
473 default:
474 bfa_sm_fault(ioc, event);
475 }
476}
477
478static void
479bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
480{
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
483}
484
485/**
486 * IOC is being disabled
487 */
488static void
489bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
490{
491 switch (event) {
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
495 break;
496
497 case IOC_E_HWERROR:
498 bfa_ioc_timer_stop(ioc);
499 /*
500 * !!! fall through !!!
501 */
502
503 case IOC_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
506 break;
507
508 default:
509 bfa_sm_fault(ioc, event);
510 }
511}
512
513/**
514 * IOC disable completion entry.
515 */
516static void
517bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
518{
519 bfa_ioc_disable_comp(ioc);
520}
521
522static void
523bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
524{
525 switch (event) {
526 case IOC_E_ENABLE:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
528 break;
529
530 case IOC_E_DISABLE:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break;
533
534 case IOC_E_FWREADY:
535 break;
536
537 case IOC_E_DETACH:
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
540 break;
541
542 default:
543 bfa_sm_fault(ioc, event);
544 }
545}
546
547static void
548bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
549{
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
552}
553
554/**
555 * @brief
556 * Hardware initialization failed.
557 */
558static void
559bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
560{
561 switch (event) {
562 case IOC_E_DISABLE:
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
565 break;
566
567 case IOC_E_DETACH:
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
571 break;
572
573 case IOC_E_TIMEOUT:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break;
576
577 default:
578 bfa_sm_fault(ioc, event);
579 }
580}
581
582static void
583bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
584{
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
587
588 /**
589 * Mark IOC as failed in hardware and stop firmware.
590 */
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
593
594 /**
595 * Notify other functions on HB failure.
596 */
597 bfa_ioc_notify_hbfail(ioc);
598
599 /**
600 * Notify driver and common modules registered for notification.
601 */
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
606 }
607
608 /**
609 * Flush any queued up mailbox requests.
610 */
611 bfa_ioc_mbox_hbfail(ioc);
612
613 /**
614 * Trigger auto-recovery after a delay.
615 */
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
619}
620
621/**
622 * @brief
623 * IOC heartbeat failure.
624 */
625static void
626bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
627{
628 switch (event) {
629
630 case IOC_E_ENABLE:
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
632 break;
633
634 case IOC_E_DISABLE:
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break;
639
640 case IOC_E_TIMEOUT:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break;
643
644 case IOC_E_FWREADY:
645 /**
646 * Recovery is already initiated by other function.
647 */
648 break;
649
650 case IOC_E_HWERROR:
651 /*
652 * HB failure notification, ignore.
653 */
654 break;
655 default:
656 bfa_sm_fault(ioc, event);
657 }
658}
659
660/**
661 * BFA IOC private functions
662 */
663
664static void
665bfa_ioc_disable_comp(struct bfa_ioc *ioc)
666{
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
669
670 ioc->cbfn->disable_cbfn(ioc->bfa);
671
672 /**
673 * Notify common modules registered for notification.
674 */
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
678 }
679}
680
681void
8a891429 682bfa_nw_ioc_sem_timeout(void *ioc_arg)
8b230ed8
RM
683{
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
685
686 bfa_ioc_hw_sem_get(ioc);
687}
688
689bool
8a891429 690bfa_nw_ioc_sem_get(void __iomem *sem_reg)
8b230ed8
RM
691{
692 u32 r32;
693 int cnt = 0;
694#define BFA_SEM_SPINCNT 3000
695
696 r32 = readl(sem_reg);
697
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
699 cnt++;
700 udelay(2);
701 r32 = readl(sem_reg);
702 }
703
704 if (r32 == 0)
705 return true;
706
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
708 return false;
709}
710
711void
8a891429 712bfa_nw_ioc_sem_release(void __iomem *sem_reg)
8b230ed8
RM
713{
714 writel(1, sem_reg);
715}
716
717static void
718bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
719{
720 u32 r32;
721
722 /**
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
725 */
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
729 return;
730 }
731
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
734}
735
736void
8a891429 737bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
8b230ed8
RM
738{
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
740}
741
742static void
743bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
744{
745 del_timer(&ioc->sem_timer);
746}
747
748/**
749 * @brief
750 * Initialize LPU local memory (aka secondary memory / SRAM)
751 */
752static void
753bfa_ioc_lmem_init(struct bfa_ioc *ioc)
754{
755 u32 pss_ctl;
756 int i;
757#define PSS_LMEM_INIT_TIME 10000
758
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
762
763 /*
764 * i2c workaround 12.5khz clock
765 */
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
768
769 /**
770 * wait for memory initialization to be complete
771 */
772 i = 0;
773 do {
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
775 i++;
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
777
778 /**
779 * If memory initialization is not successful, IOC timeout will catch
780 * such failures.
781 */
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
783
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
786}
787
788static void
789bfa_ioc_lpu_start(struct bfa_ioc *ioc)
790{
791 u32 pss_ctl;
792
793 /**
794 * Take processor out of reset.
795 */
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
798
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
800}
801
802static void
803bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
804{
805 u32 pss_ctl;
806
807 /**
808 * Put processors in reset.
809 */
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
812
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
814}
815
816/**
817 * Get driver and firmware versions.
818 */
819void
8a891429 820bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
8b230ed8
RM
821{
822 u32 pgnum, pgoff;
823 u32 loff = 0;
824 int i;
825 u32 *fwsig = (u32 *) fwhdr;
826
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
830
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
832 i++) {
833 fwsig[i] =
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
835 loff += sizeof(u32);
836 }
837}
838
839/**
840 * Returns TRUE if same.
841 */
842bool
8a891429 843bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
8b230ed8
RM
844{
845 struct bfi_ioc_image_hdr *drv_fwhdr;
846 int i;
847
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
850
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
853 return false;
854 }
855
856 return true;
857}
858
859/**
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
862 */
863static bool
864bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
865{
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
867
868 /**
869 * If bios/efi boot (flash based) -- return true
870 */
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
873
8a891429 874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
8b230ed8
RM
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
877
878 if (fwhdr.signature != drv_fwhdr->signature)
879 return false;
880
881 if (fwhdr.exec != drv_fwhdr->exec)
882 return false;
883
8a891429 884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
8b230ed8
RM
885}
886
887/**
888 * Conditionally flush any pending message from firmware at start.
889 */
890static void
891bfa_ioc_msgflush(struct bfa_ioc *ioc)
892{
893 u32 r32;
894
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
896 if (r32)
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
898}
899
900/**
901 * @img ioc_init_logic.jpg
902 */
903static void
904bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
905{
906 enum bfi_ioc_state ioc_fwstate;
907 bool fwvalid;
908
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
910
911 if (force)
912 ioc_fwstate = BFI_IOC_UNINIT;
913
914 /**
915 * check if firmware is valid
916 */
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
919
920 if (!fwvalid) {
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
922 return;
923 }
924
925 /**
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
928 */
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
931 return;
932 }
933
934 /**
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
937 *
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
941 */
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
944 /**
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
947 */
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
951 return;
952 }
953
954 /**
955 * Initialize the h/w for any other states.
956 */
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
958}
959
960void
8a891429 961bfa_nw_ioc_timeout(void *ioc_arg)
8b230ed8
RM
962{
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
964
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
966}
967
8a891429 968static void
8b230ed8
RM
969bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
970{
971 u32 *msgp = (u32 *) ioc_msg;
972 u32 i;
973
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
975
976 /*
977 * first write msg to mailbox registers
978 */
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
982
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
985
986 /*
987 * write 1 to mailbox CMD to trigger LPU event
988 */
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
991}
992
993static void
994bfa_ioc_send_enable(struct bfa_ioc *ioc)
995{
996 struct bfi_ioc_ctrl_req enable_req;
997 struct timeval tv;
998
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1005}
1006
1007static void
1008bfa_ioc_send_disable(struct bfa_ioc *ioc)
1009{
1010 struct bfi_ioc_ctrl_req disable_req;
1011
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1015}
1016
1017static void
1018bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1019{
1020 struct bfi_ioc_getattr_req attr_req;
1021
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1026}
1027
1028void
8a891429 1029bfa_nw_ioc_hb_check(void *cbarg)
8b230ed8
RM
1030{
1031 struct bfa_ioc *ioc = cbarg;
1032 u32 hb_count;
1033
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1038 return;
1039 } else {
1040 ioc->hb_count = hb_count;
1041 }
1042
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1046}
1047
1048static void
1049bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1050{
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1054}
1055
1056static void
1057bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1058{
1059 del_timer(&ioc->hb_timer);
1060}
1061
1062/**
1063 * @brief
1064 * Initiate a full firmware download.
1065 */
1066static void
1067bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1068 u32 boot_param)
1069{
1070 u32 *fwimg;
1071 u32 pgnum, pgoff;
1072 u32 loff = 0;
1073 u32 chunkno = 0;
1074 u32 i;
1075
1076 /**
1077 * Initialize LMEM first before code download
1078 */
1079 bfa_ioc_lmem_init(ioc);
1080
1081 /**
1082 * Flash based firmware boot
1083 */
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1087
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1090
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1092
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1098 }
1099
1100 /**
1101 * write smem
1102 */
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1105
1106 loff += sizeof(u32);
1107
1108 /**
1109 * handle page offset wrap around
1110 */
1111 loff = PSS_SMEM_PGOFF(loff);
1112 if (loff == 0) {
1113 pgnum++;
1114 writel(pgnum,
1115 ioc->ioc_regs.host_page_num_fn);
1116 }
1117 }
1118
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1121
1122 /*
1123 * Set boot type and boot param at the end.
1124 */
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1129}
1130
1131static void
1132bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1133{
1134 bfa_ioc_hwinit(ioc, force);
1135}
1136
1137/**
1138 * @brief
1139 * Update BFA configuration from firmware configuration.
1140 */
1141static void
1142bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1143{
1144 struct bfi_ioc_attr *attr = ioc->attr;
1145
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1149
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1151}
1152
1153/**
1154 * Attach time initialization of mbox logic.
1155 */
1156static void
1157bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1158{
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1160 int mc;
1161
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1166 }
1167}
1168
1169/**
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1171 */
1172static void
1173bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1174{
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1177 u32 stat;
1178
1179 /**
1180 * If no command pending, do nothing
1181 */
1182 if (list_empty(&mod->cmd_q))
1183 return;
1184
1185 /**
1186 * If previous command is not yet fetched by firmware, do nothing
1187 */
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1189 if (stat)
1190 return;
1191
1192 /**
1193 * Enqueue command to firmware.
1194 */
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1197}
1198
1199/**
1200 * Cleanup any pending requests.
1201 */
1202static void
1203bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1204{
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1207
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1210}
1211
1212/**
1213 * IOC public
1214 */
8a891429 1215static enum bfa_status
8b230ed8
RM
1216bfa_ioc_pll_init(struct bfa_ioc *ioc)
1217{
1218 /*
1219 * Hold semaphore so that nobody can access the chip during init.
1220 */
8a891429 1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
8b230ed8
RM
1222
1223 bfa_ioc_pll_init_asic(ioc);
1224
1225 ioc->pllinit = true;
1226 /*
1227 * release semaphore.
1228 */
8a891429 1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
8b230ed8
RM
1230
1231 return BFA_STATUS_OK;
1232}
1233
1234/**
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1237 */
8a891429 1238static void
8b230ed8
RM
1239bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1240{
1241 void __iomem *rb;
1242
1243 bfa_ioc_stats(ioc, ioc_boots);
1244
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1246 return;
1247
1248 /**
1249 * Initialize IOC state of all functions on a chip reset.
1250 */
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1255 } else {
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1258 }
1259
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1262
1263 /**
1264 * Enable interrupts just before starting LPU
1265 */
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1268}
1269
1270/**
1271 * Enable/disable IOC failure auto recovery.
1272 */
1273void
8a891429 1274bfa_nw_ioc_auto_recover(bool auto_recover)
8b230ed8 1275{
8a891429 1276 bfa_nw_auto_recover = auto_recover;
8b230ed8
RM
1277}
1278
8a891429 1279static void
8b230ed8
RM
1280bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1281{
1282 u32 *msgp = mbmsg;
1283 u32 r32;
1284 int i;
1285
1286 /**
1287 * read the MBOX msg
1288 */
1289 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1290 i++) {
1291 r32 = readl(ioc->ioc_regs.lpu_mbox +
1292 i * sizeof(u32));
1293 msgp[i] = htonl(r32);
1294 }
1295
1296 /**
1297 * turn off mailbox interrupt by clearing mailbox status
1298 */
1299 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1300 readl(ioc->ioc_regs.lpu_mbox_cmd);
1301}
1302
8a891429 1303static void
8b230ed8
RM
1304bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1305{
1306 union bfi_ioc_i2h_msg_u *msg;
1307
1308 msg = (union bfi_ioc_i2h_msg_u *) m;
1309
1310 bfa_ioc_stats(ioc, ioc_isrs);
1311
1312 switch (msg->mh.msg_id) {
1313 case BFI_IOC_I2H_HBEAT:
1314 break;
1315
1316 case BFI_IOC_I2H_READY_EVENT:
1317 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1318 break;
1319
1320 case BFI_IOC_I2H_ENABLE_REPLY:
1321 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1322 break;
1323
1324 case BFI_IOC_I2H_DISABLE_REPLY:
1325 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1326 break;
1327
1328 case BFI_IOC_I2H_GETATTR_REPLY:
1329 bfa_ioc_getattr_reply(ioc);
1330 break;
1331
1332 default:
1333 BUG_ON(1);
1334 }
1335}
1336
1337/**
1338 * IOC attach time initialization and setup.
1339 *
1340 * @param[in] ioc memory for IOC
1341 * @param[in] bfa driver instance structure
1342 */
1343void
8a891429 1344bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
8b230ed8
RM
1345{
1346 ioc->bfa = bfa;
1347 ioc->cbfn = cbfn;
1348 ioc->fcmode = false;
1349 ioc->pllinit = false;
1350 ioc->dbg_fwsave_once = true;
1351
1352 bfa_ioc_mbox_attach(ioc);
1353 INIT_LIST_HEAD(&ioc->hb_notify_q);
1354
1355 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1356}
1357
1358/**
1359 * Driver detach time IOC cleanup.
1360 */
1361void
8a891429 1362bfa_nw_ioc_detach(struct bfa_ioc *ioc)
8b230ed8
RM
1363{
1364 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1365}
1366
1367/**
1368 * Setup IOC PCI properties.
1369 *
1370 * @param[in] pcidev PCI device information for this IOC
1371 */
1372void
8a891429 1373bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
8b230ed8
RM
1374 enum bfi_mclass mc)
1375{
1376 ioc->ioc_mc = mc;
1377 ioc->pcidev = *pcidev;
1378 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1379 ioc->cna = ioc->ctdev && !ioc->fcmode;
1380
8a891429 1381 bfa_nw_ioc_set_ct_hwif(ioc);
8b230ed8
RM
1382
1383 bfa_ioc_map_port(ioc);
1384 bfa_ioc_reg_init(ioc);
1385}
1386
1387/**
1388 * Initialize IOC dma memory
1389 *
1390 * @param[in] dm_kva kernel virtual address of IOC dma memory
1391 * @param[in] dm_pa physical address of IOC dma memory
1392 */
1393void
8a891429 1394bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
8b230ed8
RM
1395{
1396 /**
1397 * dma memory for firmware attribute
1398 */
1399 ioc->attr_dma.kva = dm_kva;
1400 ioc->attr_dma.pa = dm_pa;
1401 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1402}
1403
1404/**
1405 * Return size of dma memory required.
1406 */
1407u32
8a891429 1408bfa_nw_ioc_meminfo(void)
8b230ed8
RM
1409{
1410 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1411}
1412
1413void
8a891429 1414bfa_nw_ioc_enable(struct bfa_ioc *ioc)
8b230ed8
RM
1415{
1416 bfa_ioc_stats(ioc, ioc_enables);
1417 ioc->dbg_fwsave_once = true;
1418
1419 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1420}
1421
1422void
8a891429 1423bfa_nw_ioc_disable(struct bfa_ioc *ioc)
8b230ed8
RM
1424{
1425 bfa_ioc_stats(ioc, ioc_disables);
1426 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1427}
1428
8a891429 1429static u32
8b230ed8
RM
1430bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1431{
1432 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1433}
1434
8a891429 1435static u32
8b230ed8
RM
1436bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1437{
1438 return PSS_SMEM_PGOFF(fmaddr);
1439}
1440
8b230ed8
RM
1441/**
1442 * Register mailbox message handler function, to be called by common modules
1443 */
1444void
8a891429 1445bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
8b230ed8
RM
1446 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1447{
1448 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1449
1450 mod->mbhdlr[mc].cbfn = cbfn;
1451 mod->mbhdlr[mc].cbarg = cbarg;
1452}
1453
1454/**
1455 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1456 * Responsibility of caller to serialize
1457 *
1458 * @param[in] ioc IOC instance
1459 * @param[i] cmd Mailbox command
1460 */
1461void
8a891429 1462bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
8b230ed8
RM
1463{
1464 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1465 u32 stat;
1466
1467 /**
1468 * If a previous command is pending, queue new command
1469 */
1470 if (!list_empty(&mod->cmd_q)) {
1471 list_add_tail(&cmd->qe, &mod->cmd_q);
1472 return;
1473 }
1474
1475 /**
1476 * If mailbox is busy, queue command for poll timer
1477 */
1478 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1479 if (stat) {
1480 list_add_tail(&cmd->qe, &mod->cmd_q);
1481 return;
1482 }
1483
1484 /**
1485 * mailbox is free -- queue command to firmware
1486 */
1487 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1488}
1489
1490/**
1491 * Handle mailbox interrupts
1492 */
1493void
8a891429 1494bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
8b230ed8
RM
1495{
1496 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1497 struct bfi_mbmsg m;
1498 int mc;
1499
1500 bfa_ioc_msgget(ioc, &m);
1501
1502 /**
1503 * Treat IOC message class as special.
1504 */
1505 mc = m.mh.msg_class;
1506 if (mc == BFI_MC_IOC) {
1507 bfa_ioc_isr(ioc, &m);
1508 return;
1509 }
1510
0746556b 1511 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
8b230ed8
RM
1512 return;
1513
1514 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1515}
1516
1517void
8a891429 1518bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
8b230ed8
RM
1519{
1520 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1521}
1522
8b230ed8
RM
1523/**
1524 * Add to IOC heartbeat failure notification queue. To be used by common
1525 * modules such as cee, port, diag.
1526 */
1527void
8a891429 1528bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
8b230ed8
RM
1529 struct bfa_ioc_hbfail_notify *notify)
1530{
1531 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1532}
1533
1534#define BFA_MFG_NAME "Brocade"
8a891429 1535static void
8b230ed8
RM
1536bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1537 struct bfa_adapter_attr *ad_attr)
1538{
1539 struct bfi_ioc_attr *ioc_attr;
1540
1541 ioc_attr = ioc->attr;
1542
1543 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1544 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1545 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1546 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1547 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1548 sizeof(struct bfa_mfg_vpd));
1549
1550 ad_attr->nports = bfa_ioc_get_nports(ioc);
1551 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1552
1553 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1554 /* For now, model descr uses same model string */
1555 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1556
1557 ad_attr->card_type = ioc_attr->card_type;
1558 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1559
1560 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1561 ad_attr->prototype = 1;
1562 else
1563 ad_attr->prototype = 0;
1564
1565 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
8a891429 1566 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
8b230ed8
RM
1567
1568 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1569 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1570 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1571 ad_attr->asic_rev = ioc_attr->asic_rev;
1572
1573 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1574
1575 ad_attr->cna_capable = ioc->cna;
1576 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1577}
1578
8a891429 1579static enum bfa_ioc_type
8b230ed8
RM
1580bfa_ioc_get_type(struct bfa_ioc *ioc)
1581{
1582 if (!ioc->ctdev || ioc->fcmode)
1583 return BFA_IOC_TYPE_FC;
1584 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1585 return BFA_IOC_TYPE_FCoE;
1586 else if (ioc->ioc_mc == BFI_MC_LL)
1587 return BFA_IOC_TYPE_LL;
1588 else {
1589 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1590 return BFA_IOC_TYPE_LL;
1591 }
1592}
1593
8a891429 1594static void
8b230ed8
RM
1595bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1596{
1597 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1598 memcpy(serial_num,
1599 (void *)ioc->attr->brcd_serialnum,
1600 BFA_ADAPTER_SERIAL_NUM_LEN);
1601}
1602
8a891429 1603static void
8b230ed8
RM
1604bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1605{
1606 memset(fw_ver, 0, BFA_VERSION_LEN);
1607 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1608}
1609
8a891429 1610static void
8b230ed8
RM
1611bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1612{
1613 BUG_ON(!(chip_rev));
1614
1615 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1616
1617 chip_rev[0] = 'R';
1618 chip_rev[1] = 'e';
1619 chip_rev[2] = 'v';
1620 chip_rev[3] = '-';
1621 chip_rev[4] = ioc->attr->asic_rev;
1622 chip_rev[5] = '\0';
1623}
1624
8a891429 1625static void
8b230ed8
RM
1626bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1627{
1628 memset(optrom_ver, 0, BFA_VERSION_LEN);
1629 memcpy(optrom_ver, ioc->attr->optrom_version,
1630 BFA_VERSION_LEN);
1631}
1632
8a891429 1633static void
8b230ed8
RM
1634bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1635{
1636 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1637 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1638}
1639
8a891429 1640static void
8b230ed8
RM
1641bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1642{
1643 struct bfi_ioc_attr *ioc_attr;
1644
1645 BUG_ON(!(model));
1646 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1647
1648 ioc_attr = ioc->attr;
1649
1650 /**
1651 * model name
1652 */
1653 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1654 BFA_MFG_NAME, ioc_attr->card_type);
1655}
1656
8a891429 1657static enum bfa_ioc_state
8b230ed8
RM
1658bfa_ioc_get_state(struct bfa_ioc *ioc)
1659{
1660 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1661}
1662
1663void
8a891429 1664bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
8b230ed8
RM
1665{
1666 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1667
1668 ioc_attr->state = bfa_ioc_get_state(ioc);
1669 ioc_attr->port_id = ioc->port_id;
1670
1671 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1672
1673 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1674
1675 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1676 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1677 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1678}
1679
1680/**
1681 * WWN public
1682 */
8a891429 1683static u64
8b230ed8
RM
1684bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1685{
1686 return ioc->attr->pwwn;
1687}
1688
8b230ed8 1689mac_t
8a891429 1690bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
8b230ed8
RM
1691{
1692 /*
1693 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1694 */
1695 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1696 return bfa_ioc_get_mfg_mac(ioc);
1697 else
1698 return ioc->attr->mac;
1699}
1700
8a891429 1701static mac_t
8b230ed8
RM
1702bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1703{
1704 mac_t m;
1705
1706 m = ioc->attr->mfg_mac;
1707 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1708 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1709 else
1710 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1711 bfa_ioc_pcifn(ioc));
1712
1713 return m;
1714}
1715
8b230ed8
RM
1716/**
1717 * Firmware failure detected. Start recovery actions.
1718 */
1719static void
1720bfa_ioc_recover(struct bfa_ioc *ioc)
1721{
1722 bfa_ioc_stats(ioc, ioc_hbfails);
1723 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1724}
1725
1726static void
1727bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1728{
1729 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1730 return;
1731
1732}