]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bna/bfa_ioc_ct.c
bna: Brocade 10Gb Ethernet device driver
[net-next-2.6.git] / drivers / net / bna / bfa_ioc_ct.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
24
25 /*
26  * forward declarations
27  */
28 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
29 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
30 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
31 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33 static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35
36 struct bfa_ioc_hwif hwif_ct;
37
38 /**
39  * Called from bfa_ioc_attach() to map asic specific calls.
40  */
41 void
42 bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc)
43 {
44         hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
45         hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
46         hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
47         hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
48         hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
49         hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
50         hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
51         hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
52
53         ioc->ioc_hwif = &hwif_ct;
54 }
55
56 /**
57  * Return true if firmware of current driver matches the running firmware.
58  */
59 static bool
60 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
61 {
62         enum bfi_ioc_state ioc_fwstate;
63         u32 usecnt;
64         struct bfi_ioc_image_hdr fwhdr;
65
66         /**
67          * Firmware match check is relevant only for CNA.
68          */
69         if (!ioc->cna)
70                 return true;
71
72         /**
73          * If bios boot (flash based) -- do not increment usage count
74          */
75         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
76                                                 BFA_IOC_FWIMG_MINSZ)
77                 return true;
78
79         bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
80         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
81
82         /**
83          * If usage count is 0, always return TRUE.
84          */
85         if (usecnt == 0) {
86                 writel(1, ioc->ioc_regs.ioc_usage_reg);
87                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
88                 return true;
89         }
90
91         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
92
93         /**
94          * Use count cannot be non-zero and chip in uninitialized state.
95          */
96         BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
97
98         /**
99          * Check if another driver with a different firmware is active
100          */
101         bfa_ioc_fwver_get(ioc, &fwhdr);
102         if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
103                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
104                 return false;
105         }
106
107         /**
108          * Same firmware version. Increment the reference count.
109          */
110         usecnt++;
111         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
112         bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
113         return true;
114 }
115
116 static void
117 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
118 {
119         u32 usecnt;
120
121         /**
122          * Firmware lock is relevant only for CNA.
123          */
124         if (!ioc->cna)
125                 return;
126
127         /**
128          * If bios boot (flash based) -- do not decrement usage count
129          */
130         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
131                                                 BFA_IOC_FWIMG_MINSZ)
132                 return;
133
134         /**
135          * decrement usage count
136          */
137         bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
138         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
139         BUG_ON(!(usecnt > 0));
140
141         usecnt--;
142         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
143
144         bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
145 }
146
147 /**
148  * Notify other functions on HB failure.
149  */
150 static void
151 bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
152 {
153         if (ioc->cna) {
154                 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
155                 /* Wait for halt to take effect */
156                 readl(ioc->ioc_regs.ll_halt);
157         } else {
158                 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
159                 readl(ioc->ioc_regs.err_set);
160         }
161 }
162
163 /**
164  * Host to LPU mailbox message addresses
165  */
166 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
167         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
168         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
169         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
170         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
171 };
172
173 /**
174  * Host <-> LPU mailbox command/status registers - port 0
175  */
176 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
177         { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
178         { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
179         { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
180         { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
181 };
182
183 /**
184  * Host <-> LPU mailbox command/status registers - port 1
185  */
186 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
187         { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
188         { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
189         { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
190         { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
191 };
192
193 static void
194 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
195 {
196         void __iomem *rb;
197         int             pcifn = bfa_ioc_pcifn(ioc);
198
199         rb = bfa_ioc_bar0(ioc);
200
201         ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
202         ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
203         ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
204
205         if (ioc->port_id == 0) {
206                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
207                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
208                 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
209                 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
210                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
211         } else {
212                 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
213                 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
214                 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
215                 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
216                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
217         }
218
219         /*
220          * PSS control registers
221          */
222         ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
223         ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
224         ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
225         ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
226
227         /*
228          * IOC semaphore registers and serialization
229          */
230         ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
231         ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
232         ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
233         ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
234
235         /**
236          * sram memory access
237          */
238         ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
239         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
240
241         /*
242          * err set reg : for notification of hb failure in fcmode
243          */
244         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
245 }
246
247 /**
248  * Initialize IOC to port mapping.
249  */
250
251 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
252 static void
253 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
254 {
255         void __iomem *rb = ioc->pcidev.pci_bar_kva;
256         u32     r32;
257
258         /**
259          * For catapult, base port id on personality register and IOC type
260          */
261         r32 = readl(rb + FNC_PERS_REG);
262         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
263         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
264
265 }
266
267 /**
268  * Set interrupt mode for a function: INTX or MSIX
269  */
270 static void
271 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
272 {
273         void __iomem *rb = ioc->pcidev.pci_bar_kva;
274         u32     r32, mode;
275
276         r32 = readl(rb + FNC_PERS_REG);
277
278         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
279                 __F0_INTX_STATUS;
280
281         /**
282          * If already in desired mode, do not change anything
283          */
284         if (!msix && mode)
285                 return;
286
287         if (msix)
288                 mode = __F0_INTX_STATUS_MSIX;
289         else
290                 mode = __F0_INTX_STATUS_INTA;
291
292         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
293         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
294
295         writel(r32, rb + FNC_PERS_REG);
296 }
297
298 /**
299  * Cleanup hw semaphore and usecnt registers
300  */
301 static void
302 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
303 {
304         if (ioc->cna) {
305                 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
306                 writel(0, ioc->ioc_regs.ioc_usage_reg);
307                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
308         }
309
310         /*
311          * Read the hw sem reg to make sure that it is locked
312          * before we clear it. If it is not locked, writing 1
313          * will lock it instead of clearing it.
314          */
315         readl(ioc->ioc_regs.ioc_sem_reg);
316         bfa_ioc_hw_sem_release(ioc);
317 }
318
319 enum bfa_status
320 bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
321 {
322         u32     pll_sclk, pll_fclk, r32;
323
324         pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
325                 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
326                 __APP_PLL_312_JITLMT0_1(3U) |
327                 __APP_PLL_312_CNTLMT0_1(1U);
328         pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
329                 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
330                 __APP_PLL_425_JITLMT0_1(3U) |
331                 __APP_PLL_425_CNTLMT0_1(1U);
332         if (fcmode) {
333                 writel(0, (rb + OP_MODE));
334                 writel(__APP_EMS_CMLCKSEL |
335                                 __APP_EMS_REFCKBUFEN2 |
336                                 __APP_EMS_CHANNEL_SEL,
337                                 (rb + ETH_MAC_SER_REG));
338         } else {
339                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
340                 writel(__APP_EMS_REFCKBUFEN1,
341                                 (rb + ETH_MAC_SER_REG));
342         }
343         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
344         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
345         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
346         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
347         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
348         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
349         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
350         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
351         writel(pll_sclk |
352                 __APP_PLL_312_LOGIC_SOFT_RESET,
353                 rb + APP_PLL_312_CTL_REG);
354         writel(pll_fclk |
355                 __APP_PLL_425_LOGIC_SOFT_RESET,
356                 rb + APP_PLL_425_CTL_REG);
357         writel(pll_sclk |
358                 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
359                 rb + APP_PLL_312_CTL_REG);
360         writel(pll_fclk |
361                 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
362                 rb + APP_PLL_425_CTL_REG);
363         readl(rb + HOSTFN0_INT_MSK);
364         udelay(2000);
365         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
366         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
367         writel(pll_sclk |
368                 __APP_PLL_312_ENABLE,
369                 rb + APP_PLL_312_CTL_REG);
370         writel(pll_fclk |
371                 __APP_PLL_425_ENABLE,
372                 rb + APP_PLL_425_CTL_REG);
373         if (!fcmode) {
374                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
375                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
376         }
377         r32 = readl((rb + PSS_CTL_REG));
378         r32 &= ~__PSS_LMEM_RESET;
379         writel(r32, (rb + PSS_CTL_REG));
380         udelay(1000);
381         if (!fcmode) {
382                 writel(0, (rb + PMM_1T_RESET_REG_P0));
383                 writel(0, (rb + PMM_1T_RESET_REG_P1));
384         }
385
386         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
387         udelay(1000);
388         r32 = readl((rb + MBIST_STAT_REG));
389         writel(0, (rb + MBIST_CTL_REG));
390         return BFA_STATUS_OK;
391 }