]>
Commit | Line | Data |
---|---|---|
a9083016 GM |
1 | /* |
2 | * QLogic Fibre Channel HBA Driver | |
de7c5d05 | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
a9083016 GM |
4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | |
6 | */ | |
7 | #include "qla_def.h" | |
8 | #include <linux/delay.h> | |
9 | #include <linux/pci.h> | |
10 | ||
11 | #define MASK(n) ((1ULL<<(n))-1) | |
12 | #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ | |
13 | ((addr >> 25) & 0x3ff)) | |
14 | #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ | |
15 | ((addr >> 25) & 0x3ff)) | |
16 | #define MS_WIN(addr) (addr & 0x0ffc0000) | |
17 | #define QLA82XX_PCI_MN_2M (0) | |
18 | #define QLA82XX_PCI_MS_2M (0x80000) | |
19 | #define QLA82XX_PCI_OCM0_2M (0xc0000) | |
20 | #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) | |
21 | #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) | |
0547fb37 | 22 | #define BLOCK_PROTECT_BITS 0x0F |
a9083016 GM |
23 | |
24 | /* CRB window related */ | |
25 | #define CRB_BLK(off) ((off >> 20) & 0x3f) | |
26 | #define CRB_SUBBLK(off) ((off >> 16) & 0xf) | |
27 | #define CRB_WINDOW_2M (0x130060) | |
28 | #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) | |
29 | #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ | |
30 | ((off) & 0xf0000)) | |
31 | #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) | |
32 | #define CRB_INDIRECT_2M (0x1e0000UL) | |
33 | ||
a9083016 GM |
34 | #define MAX_CRB_XFORM 60 |
35 | static unsigned long crb_addr_xform[MAX_CRB_XFORM]; | |
36 | int qla82xx_crb_table_initialized; | |
37 | ||
38 | #define qla82xx_crb_addr_transform(name) \ | |
39 | (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ | |
40 | QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) | |
41 | ||
42 | static void qla82xx_crb_addr_transform_setup(void) | |
43 | { | |
44 | qla82xx_crb_addr_transform(XDMA); | |
45 | qla82xx_crb_addr_transform(TIMR); | |
46 | qla82xx_crb_addr_transform(SRE); | |
47 | qla82xx_crb_addr_transform(SQN3); | |
48 | qla82xx_crb_addr_transform(SQN2); | |
49 | qla82xx_crb_addr_transform(SQN1); | |
50 | qla82xx_crb_addr_transform(SQN0); | |
51 | qla82xx_crb_addr_transform(SQS3); | |
52 | qla82xx_crb_addr_transform(SQS2); | |
53 | qla82xx_crb_addr_transform(SQS1); | |
54 | qla82xx_crb_addr_transform(SQS0); | |
55 | qla82xx_crb_addr_transform(RPMX7); | |
56 | qla82xx_crb_addr_transform(RPMX6); | |
57 | qla82xx_crb_addr_transform(RPMX5); | |
58 | qla82xx_crb_addr_transform(RPMX4); | |
59 | qla82xx_crb_addr_transform(RPMX3); | |
60 | qla82xx_crb_addr_transform(RPMX2); | |
61 | qla82xx_crb_addr_transform(RPMX1); | |
62 | qla82xx_crb_addr_transform(RPMX0); | |
63 | qla82xx_crb_addr_transform(ROMUSB); | |
64 | qla82xx_crb_addr_transform(SN); | |
65 | qla82xx_crb_addr_transform(QMN); | |
66 | qla82xx_crb_addr_transform(QMS); | |
67 | qla82xx_crb_addr_transform(PGNI); | |
68 | qla82xx_crb_addr_transform(PGND); | |
69 | qla82xx_crb_addr_transform(PGN3); | |
70 | qla82xx_crb_addr_transform(PGN2); | |
71 | qla82xx_crb_addr_transform(PGN1); | |
72 | qla82xx_crb_addr_transform(PGN0); | |
73 | qla82xx_crb_addr_transform(PGSI); | |
74 | qla82xx_crb_addr_transform(PGSD); | |
75 | qla82xx_crb_addr_transform(PGS3); | |
76 | qla82xx_crb_addr_transform(PGS2); | |
77 | qla82xx_crb_addr_transform(PGS1); | |
78 | qla82xx_crb_addr_transform(PGS0); | |
79 | qla82xx_crb_addr_transform(PS); | |
80 | qla82xx_crb_addr_transform(PH); | |
81 | qla82xx_crb_addr_transform(NIU); | |
82 | qla82xx_crb_addr_transform(I2Q); | |
83 | qla82xx_crb_addr_transform(EG); | |
84 | qla82xx_crb_addr_transform(MN); | |
85 | qla82xx_crb_addr_transform(MS); | |
86 | qla82xx_crb_addr_transform(CAS2); | |
87 | qla82xx_crb_addr_transform(CAS1); | |
88 | qla82xx_crb_addr_transform(CAS0); | |
89 | qla82xx_crb_addr_transform(CAM); | |
90 | qla82xx_crb_addr_transform(C2C1); | |
91 | qla82xx_crb_addr_transform(C2C0); | |
92 | qla82xx_crb_addr_transform(SMB); | |
93 | qla82xx_crb_addr_transform(OCM0); | |
94 | /* | |
95 | * Used only in P3 just define it for P2 also. | |
96 | */ | |
97 | qla82xx_crb_addr_transform(I2C0); | |
98 | ||
99 | qla82xx_crb_table_initialized = 1; | |
100 | } | |
101 | ||
102 | struct crb_128M_2M_block_map crb_128M_2M_map[64] = { | |
103 | {{{0, 0, 0, 0} } }, | |
104 | {{{1, 0x0100000, 0x0102000, 0x120000}, | |
105 | {1, 0x0110000, 0x0120000, 0x130000}, | |
106 | {1, 0x0120000, 0x0122000, 0x124000}, | |
107 | {1, 0x0130000, 0x0132000, 0x126000}, | |
108 | {1, 0x0140000, 0x0142000, 0x128000}, | |
109 | {1, 0x0150000, 0x0152000, 0x12a000}, | |
110 | {1, 0x0160000, 0x0170000, 0x110000}, | |
111 | {1, 0x0170000, 0x0172000, 0x12e000}, | |
112 | {0, 0x0000000, 0x0000000, 0x000000}, | |
113 | {0, 0x0000000, 0x0000000, 0x000000}, | |
114 | {0, 0x0000000, 0x0000000, 0x000000}, | |
115 | {0, 0x0000000, 0x0000000, 0x000000}, | |
116 | {0, 0x0000000, 0x0000000, 0x000000}, | |
117 | {0, 0x0000000, 0x0000000, 0x000000}, | |
118 | {1, 0x01e0000, 0x01e0800, 0x122000}, | |
119 | {0, 0x0000000, 0x0000000, 0x000000} } } , | |
120 | {{{1, 0x0200000, 0x0210000, 0x180000} } }, | |
121 | {{{0, 0, 0, 0} } }, | |
122 | {{{1, 0x0400000, 0x0401000, 0x169000} } }, | |
123 | {{{1, 0x0500000, 0x0510000, 0x140000} } }, | |
124 | {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, | |
125 | {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, | |
126 | {{{1, 0x0800000, 0x0802000, 0x170000}, | |
127 | {0, 0x0000000, 0x0000000, 0x000000}, | |
128 | {0, 0x0000000, 0x0000000, 0x000000}, | |
129 | {0, 0x0000000, 0x0000000, 0x000000}, | |
130 | {0, 0x0000000, 0x0000000, 0x000000}, | |
131 | {0, 0x0000000, 0x0000000, 0x000000}, | |
132 | {0, 0x0000000, 0x0000000, 0x000000}, | |
133 | {0, 0x0000000, 0x0000000, 0x000000}, | |
134 | {0, 0x0000000, 0x0000000, 0x000000}, | |
135 | {0, 0x0000000, 0x0000000, 0x000000}, | |
136 | {0, 0x0000000, 0x0000000, 0x000000}, | |
137 | {0, 0x0000000, 0x0000000, 0x000000}, | |
138 | {0, 0x0000000, 0x0000000, 0x000000}, | |
139 | {0, 0x0000000, 0x0000000, 0x000000}, | |
140 | {0, 0x0000000, 0x0000000, 0x000000}, | |
141 | {1, 0x08f0000, 0x08f2000, 0x172000} } }, | |
142 | {{{1, 0x0900000, 0x0902000, 0x174000}, | |
143 | {0, 0x0000000, 0x0000000, 0x000000}, | |
144 | {0, 0x0000000, 0x0000000, 0x000000}, | |
145 | {0, 0x0000000, 0x0000000, 0x000000}, | |
146 | {0, 0x0000000, 0x0000000, 0x000000}, | |
147 | {0, 0x0000000, 0x0000000, 0x000000}, | |
148 | {0, 0x0000000, 0x0000000, 0x000000}, | |
149 | {0, 0x0000000, 0x0000000, 0x000000}, | |
150 | {0, 0x0000000, 0x0000000, 0x000000}, | |
151 | {0, 0x0000000, 0x0000000, 0x000000}, | |
152 | {0, 0x0000000, 0x0000000, 0x000000}, | |
153 | {0, 0x0000000, 0x0000000, 0x000000}, | |
154 | {0, 0x0000000, 0x0000000, 0x000000}, | |
155 | {0, 0x0000000, 0x0000000, 0x000000}, | |
156 | {0, 0x0000000, 0x0000000, 0x000000}, | |
157 | {1, 0x09f0000, 0x09f2000, 0x176000} } }, | |
158 | {{{0, 0x0a00000, 0x0a02000, 0x178000}, | |
159 | {0, 0x0000000, 0x0000000, 0x000000}, | |
160 | {0, 0x0000000, 0x0000000, 0x000000}, | |
161 | {0, 0x0000000, 0x0000000, 0x000000}, | |
162 | {0, 0x0000000, 0x0000000, 0x000000}, | |
163 | {0, 0x0000000, 0x0000000, 0x000000}, | |
164 | {0, 0x0000000, 0x0000000, 0x000000}, | |
165 | {0, 0x0000000, 0x0000000, 0x000000}, | |
166 | {0, 0x0000000, 0x0000000, 0x000000}, | |
167 | {0, 0x0000000, 0x0000000, 0x000000}, | |
168 | {0, 0x0000000, 0x0000000, 0x000000}, | |
169 | {0, 0x0000000, 0x0000000, 0x000000}, | |
170 | {0, 0x0000000, 0x0000000, 0x000000}, | |
171 | {0, 0x0000000, 0x0000000, 0x000000}, | |
172 | {0, 0x0000000, 0x0000000, 0x000000}, | |
173 | {1, 0x0af0000, 0x0af2000, 0x17a000} } }, | |
174 | {{{0, 0x0b00000, 0x0b02000, 0x17c000}, | |
175 | {0, 0x0000000, 0x0000000, 0x000000}, | |
176 | {0, 0x0000000, 0x0000000, 0x000000}, | |
177 | {0, 0x0000000, 0x0000000, 0x000000}, | |
178 | {0, 0x0000000, 0x0000000, 0x000000}, | |
179 | {0, 0x0000000, 0x0000000, 0x000000}, | |
180 | {0, 0x0000000, 0x0000000, 0x000000}, | |
181 | {0, 0x0000000, 0x0000000, 0x000000}, | |
182 | {0, 0x0000000, 0x0000000, 0x000000}, | |
183 | {0, 0x0000000, 0x0000000, 0x000000}, | |
184 | {0, 0x0000000, 0x0000000, 0x000000}, | |
185 | {0, 0x0000000, 0x0000000, 0x000000}, | |
186 | {0, 0x0000000, 0x0000000, 0x000000}, | |
187 | {0, 0x0000000, 0x0000000, 0x000000}, | |
188 | {0, 0x0000000, 0x0000000, 0x000000}, | |
189 | {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, | |
190 | {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, | |
191 | {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, | |
192 | {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, | |
193 | {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, | |
194 | {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, | |
195 | {{{1, 0x1100000, 0x1101000, 0x160000} } }, | |
196 | {{{1, 0x1200000, 0x1201000, 0x161000} } }, | |
197 | {{{1, 0x1300000, 0x1301000, 0x162000} } }, | |
198 | {{{1, 0x1400000, 0x1401000, 0x163000} } }, | |
199 | {{{1, 0x1500000, 0x1501000, 0x165000} } }, | |
200 | {{{1, 0x1600000, 0x1601000, 0x166000} } }, | |
201 | {{{0, 0, 0, 0} } }, | |
202 | {{{0, 0, 0, 0} } }, | |
203 | {{{0, 0, 0, 0} } }, | |
204 | {{{0, 0, 0, 0} } }, | |
205 | {{{0, 0, 0, 0} } }, | |
206 | {{{0, 0, 0, 0} } }, | |
207 | {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, | |
208 | {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, | |
209 | {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, | |
210 | {{{0} } }, | |
211 | {{{1, 0x2100000, 0x2102000, 0x120000}, | |
212 | {1, 0x2110000, 0x2120000, 0x130000}, | |
213 | {1, 0x2120000, 0x2122000, 0x124000}, | |
214 | {1, 0x2130000, 0x2132000, 0x126000}, | |
215 | {1, 0x2140000, 0x2142000, 0x128000}, | |
216 | {1, 0x2150000, 0x2152000, 0x12a000}, | |
217 | {1, 0x2160000, 0x2170000, 0x110000}, | |
218 | {1, 0x2170000, 0x2172000, 0x12e000}, | |
219 | {0, 0x0000000, 0x0000000, 0x000000}, | |
220 | {0, 0x0000000, 0x0000000, 0x000000}, | |
221 | {0, 0x0000000, 0x0000000, 0x000000}, | |
222 | {0, 0x0000000, 0x0000000, 0x000000}, | |
223 | {0, 0x0000000, 0x0000000, 0x000000}, | |
224 | {0, 0x0000000, 0x0000000, 0x000000}, | |
225 | {0, 0x0000000, 0x0000000, 0x000000}, | |
226 | {0, 0x0000000, 0x0000000, 0x000000} } }, | |
227 | {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, | |
228 | {{{0} } }, | |
229 | {{{0} } }, | |
230 | {{{0} } }, | |
231 | {{{0} } }, | |
232 | {{{0} } }, | |
233 | {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, | |
234 | {{{1, 0x2900000, 0x2901000, 0x16b000} } }, | |
235 | {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, | |
236 | {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, | |
237 | {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, | |
238 | {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, | |
239 | {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, | |
240 | {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, | |
241 | {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, | |
242 | {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, | |
243 | {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, | |
244 | {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, | |
245 | {{{0} } }, | |
246 | {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, | |
247 | {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, | |
248 | {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, | |
249 | {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, | |
250 | {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, | |
251 | {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, | |
252 | {{{0} } }, | |
253 | {{{0} } }, | |
254 | {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, | |
255 | {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, | |
256 | {{{1, 0x3f00000, 0x3f01000, 0x168000} } } | |
257 | }; | |
258 | ||
259 | /* | |
260 | * top 12 bits of crb internal address (hub, agent) | |
261 | */ | |
262 | unsigned qla82xx_crb_hub_agt[64] = { | |
263 | 0, | |
264 | QLA82XX_HW_CRB_HUB_AGT_ADR_PS, | |
265 | QLA82XX_HW_CRB_HUB_AGT_ADR_MN, | |
266 | QLA82XX_HW_CRB_HUB_AGT_ADR_MS, | |
267 | 0, | |
268 | QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, | |
269 | QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, | |
270 | QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, | |
271 | QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, | |
272 | QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, | |
273 | QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, | |
274 | QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, | |
275 | QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, | |
276 | QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, | |
277 | QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, | |
278 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, | |
279 | QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, | |
280 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, | |
281 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, | |
282 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, | |
283 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, | |
284 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, | |
285 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, | |
286 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, | |
287 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, | |
288 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, | |
289 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, | |
290 | 0, | |
291 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, | |
292 | QLA82XX_HW_CRB_HUB_AGT_ADR_SN, | |
293 | 0, | |
294 | QLA82XX_HW_CRB_HUB_AGT_ADR_EG, | |
295 | 0, | |
296 | QLA82XX_HW_CRB_HUB_AGT_ADR_PS, | |
297 | QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, | |
298 | 0, | |
299 | 0, | |
300 | 0, | |
301 | 0, | |
302 | 0, | |
303 | QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, | |
304 | 0, | |
305 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, | |
306 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, | |
307 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, | |
308 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, | |
309 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, | |
310 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, | |
311 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, | |
312 | QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, | |
313 | QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, | |
314 | QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, | |
315 | 0, | |
316 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, | |
317 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, | |
318 | QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, | |
319 | QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, | |
320 | 0, | |
321 | QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, | |
322 | QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, | |
323 | QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, | |
324 | 0, | |
325 | QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, | |
326 | 0, | |
327 | }; | |
328 | ||
f1af6208 GM |
329 | /* Device states */ |
330 | char *qdev_state[] = { | |
331 | "Unknown", | |
332 | "Cold", | |
333 | "Initializing", | |
334 | "Ready", | |
335 | "Need Reset", | |
336 | "Need Quiescent", | |
337 | "Failed", | |
338 | "Quiescent", | |
339 | }; | |
340 | ||
a9083016 GM |
341 | /* |
342 | * In: 'off' is offset from CRB space in 128M pci map | |
343 | * Out: 'off' is 2M pci map addr | |
344 | * side effect: lock crb window | |
345 | */ | |
346 | static void | |
347 | qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) | |
348 | { | |
349 | u32 win_read; | |
350 | ||
351 | ha->crb_win = CRB_HI(*off); | |
352 | writel(ha->crb_win, | |
353 | (void *)(CRB_WINDOW_2M + ha->nx_pcibase)); | |
354 | ||
355 | /* Read back value to make sure write has gone through before trying | |
356 | * to use it. | |
357 | */ | |
358 | win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); | |
359 | if (win_read != ha->crb_win) { | |
360 | DEBUG2(qla_printk(KERN_INFO, ha, | |
361 | "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " | |
362 | "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); | |
363 | } | |
364 | *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; | |
365 | } | |
366 | ||
367 | static inline unsigned long | |
368 | qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) | |
369 | { | |
370 | /* See if we are currently pointing to the region we want to use next */ | |
371 | if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { | |
372 | /* No need to change window. PCIX and PCIEregs are in both | |
373 | * regs are in both windows. | |
374 | */ | |
375 | return off; | |
376 | } | |
377 | ||
378 | if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { | |
379 | /* We are in first CRB window */ | |
380 | if (ha->curr_window != 0) | |
381 | WARN_ON(1); | |
382 | return off; | |
383 | } | |
384 | ||
385 | if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { | |
386 | /* We are in second CRB window */ | |
387 | off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; | |
388 | ||
389 | if (ha->curr_window != 1) | |
390 | return off; | |
391 | ||
392 | /* We are in the QM or direct access | |
393 | * register region - do nothing | |
394 | */ | |
395 | if ((off >= QLA82XX_PCI_DIRECT_CRB) && | |
396 | (off < QLA82XX_PCI_CAMQM_MAX)) | |
397 | return off; | |
398 | } | |
399 | /* strange address given */ | |
400 | qla_printk(KERN_WARNING, ha, | |
401 | "%s: Warning: unm_nic_pci_set_crbwindow called with" | |
402 | " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); | |
403 | return off; | |
404 | } | |
405 | ||
406 | int | |
407 | qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) | |
408 | { | |
409 | unsigned long flags = 0; | |
410 | int rv; | |
411 | ||
412 | rv = qla82xx_pci_get_crb_addr_2M(ha, &off); | |
413 | ||
414 | BUG_ON(rv == -1); | |
415 | ||
416 | if (rv == 1) { | |
417 | write_lock_irqsave(&ha->hw_lock, flags); | |
418 | qla82xx_crb_win_lock(ha); | |
419 | qla82xx_pci_set_crbwindow_2M(ha, &off); | |
420 | } | |
421 | ||
422 | writel(data, (void __iomem *)off); | |
423 | ||
424 | if (rv == 1) { | |
425 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); | |
426 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
427 | } | |
428 | return 0; | |
429 | } | |
430 | ||
431 | int | |
432 | qla82xx_rd_32(struct qla_hw_data *ha, ulong off) | |
433 | { | |
434 | unsigned long flags = 0; | |
435 | int rv; | |
436 | u32 data; | |
437 | ||
438 | rv = qla82xx_pci_get_crb_addr_2M(ha, &off); | |
439 | ||
440 | BUG_ON(rv == -1); | |
441 | ||
442 | if (rv == 1) { | |
443 | write_lock_irqsave(&ha->hw_lock, flags); | |
444 | qla82xx_crb_win_lock(ha); | |
445 | qla82xx_pci_set_crbwindow_2M(ha, &off); | |
446 | } | |
447 | data = RD_REG_DWORD((void __iomem *)off); | |
448 | ||
449 | if (rv == 1) { | |
450 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); | |
451 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
452 | } | |
453 | return data; | |
454 | } | |
455 | ||
456 | #define CRB_WIN_LOCK_TIMEOUT 100000000 | |
457 | int qla82xx_crb_win_lock(struct qla_hw_data *ha) | |
458 | { | |
459 | int done = 0, timeout = 0; | |
460 | ||
461 | while (!done) { | |
462 | /* acquire semaphore3 from PCI HW block */ | |
463 | done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); | |
464 | if (done == 1) | |
465 | break; | |
466 | if (timeout >= CRB_WIN_LOCK_TIMEOUT) | |
467 | return -1; | |
468 | timeout++; | |
469 | } | |
470 | qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); | |
471 | return 0; | |
472 | } | |
473 | ||
474 | #define IDC_LOCK_TIMEOUT 100000000 | |
475 | int qla82xx_idc_lock(struct qla_hw_data *ha) | |
476 | { | |
477 | int i; | |
478 | int done = 0, timeout = 0; | |
479 | ||
480 | while (!done) { | |
481 | /* acquire semaphore5 from PCI HW block */ | |
482 | done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); | |
483 | if (done == 1) | |
484 | break; | |
485 | if (timeout >= IDC_LOCK_TIMEOUT) | |
486 | return -1; | |
487 | ||
488 | timeout++; | |
489 | ||
490 | /* Yield CPU */ | |
491 | if (!in_interrupt()) | |
492 | schedule(); | |
493 | else { | |
494 | for (i = 0; i < 20; i++) | |
495 | cpu_relax(); | |
496 | } | |
497 | } | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | void qla82xx_idc_unlock(struct qla_hw_data *ha) | |
503 | { | |
504 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); | |
505 | } | |
506 | ||
507 | int | |
508 | qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) | |
509 | { | |
510 | struct crb_128M_2M_sub_block_map *m; | |
511 | ||
512 | if (*off >= QLA82XX_CRB_MAX) | |
513 | return -1; | |
514 | ||
515 | if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { | |
516 | *off = (*off - QLA82XX_PCI_CAMQM) + | |
517 | QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; | |
518 | return 0; | |
519 | } | |
520 | ||
521 | if (*off < QLA82XX_PCI_CRBSPACE) | |
522 | return -1; | |
523 | ||
524 | *off -= QLA82XX_PCI_CRBSPACE; | |
525 | ||
526 | /* Try direct map */ | |
527 | m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; | |
528 | ||
529 | if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { | |
530 | *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; | |
531 | return 0; | |
532 | } | |
533 | /* Not in direct map, use crb window */ | |
534 | return 1; | |
535 | } | |
536 | ||
537 | /* PCI Windowing for DDR regions. */ | |
538 | #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ | |
539 | (((addr) <= (high)) && ((addr) >= (low))) | |
540 | /* | |
541 | * check memory access boundary. | |
542 | * used by test agent. support ddr access only for now | |
543 | */ | |
544 | static unsigned long | |
545 | qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, | |
546 | unsigned long long addr, int size) | |
547 | { | |
548 | if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, | |
549 | QLA82XX_ADDR_DDR_NET_MAX) || | |
550 | !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, | |
551 | QLA82XX_ADDR_DDR_NET_MAX) || | |
552 | ((size != 1) && (size != 2) && (size != 4) && (size != 8))) | |
553 | return 0; | |
554 | else | |
555 | return 1; | |
556 | } | |
557 | ||
558 | int qla82xx_pci_set_window_warning_count; | |
559 | ||
560 | unsigned long | |
561 | qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) | |
562 | { | |
563 | int window; | |
564 | u32 win_read; | |
565 | ||
566 | if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, | |
567 | QLA82XX_ADDR_DDR_NET_MAX)) { | |
568 | /* DDR network side */ | |
569 | window = MN_WIN(addr); | |
570 | ha->ddr_mn_window = window; | |
571 | qla82xx_wr_32(ha, | |
572 | ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); | |
573 | win_read = qla82xx_rd_32(ha, | |
574 | ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); | |
575 | if ((win_read << 17) != window) { | |
576 | qla_printk(KERN_WARNING, ha, | |
577 | "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", | |
578 | __func__, window, win_read); | |
579 | } | |
580 | addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; | |
581 | } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, | |
582 | QLA82XX_ADDR_OCM0_MAX)) { | |
583 | unsigned int temp1; | |
584 | if ((addr & 0x00ff800) == 0xff800) { | |
585 | qla_printk(KERN_WARNING, ha, | |
586 | "%s: QM access not handled.\n", __func__); | |
587 | addr = -1UL; | |
588 | } | |
589 | window = OCM_WIN(addr); | |
590 | ha->ddr_mn_window = window; | |
591 | qla82xx_wr_32(ha, | |
592 | ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); | |
593 | win_read = qla82xx_rd_32(ha, | |
594 | ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); | |
595 | temp1 = ((window & 0x1FF) << 7) | | |
596 | ((window & 0x0FFFE0000) >> 17); | |
597 | if (win_read != temp1) { | |
598 | qla_printk(KERN_WARNING, ha, | |
599 | "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", | |
600 | __func__, temp1, win_read); | |
601 | } | |
602 | addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; | |
603 | ||
604 | } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, | |
605 | QLA82XX_P3_ADDR_QDR_NET_MAX)) { | |
606 | /* QDR network side */ | |
607 | window = MS_WIN(addr); | |
608 | ha->qdr_sn_window = window; | |
609 | qla82xx_wr_32(ha, | |
610 | ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); | |
611 | win_read = qla82xx_rd_32(ha, | |
612 | ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); | |
613 | if (win_read != window) { | |
614 | qla_printk(KERN_WARNING, ha, | |
615 | "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", | |
616 | __func__, window, win_read); | |
617 | } | |
618 | addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; | |
619 | } else { | |
620 | /* | |
621 | * peg gdb frequently accesses memory that doesn't exist, | |
622 | * this limits the chit chat so debugging isn't slowed down. | |
623 | */ | |
624 | if ((qla82xx_pci_set_window_warning_count++ < 8) || | |
625 | (qla82xx_pci_set_window_warning_count%64 == 0)) { | |
626 | qla_printk(KERN_WARNING, ha, | |
627 | "%s: Warning:%s Unknown address range!\n", __func__, | |
628 | QLA2XXX_DRIVER_NAME); | |
629 | } | |
630 | addr = -1UL; | |
631 | } | |
632 | return addr; | |
633 | } | |
634 | ||
635 | /* check if address is in the same windows as the previous access */ | |
636 | static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, | |
637 | unsigned long long addr) | |
638 | { | |
639 | int window; | |
640 | unsigned long long qdr_max; | |
641 | ||
642 | qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; | |
643 | ||
644 | /* DDR network side */ | |
645 | if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, | |
646 | QLA82XX_ADDR_DDR_NET_MAX)) | |
647 | BUG(); | |
648 | else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, | |
649 | QLA82XX_ADDR_OCM0_MAX)) | |
650 | return 1; | |
651 | else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, | |
652 | QLA82XX_ADDR_OCM1_MAX)) | |
653 | return 1; | |
654 | else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { | |
655 | /* QDR network side */ | |
656 | window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; | |
657 | if (ha->qdr_sn_window == window) | |
658 | return 1; | |
659 | } | |
660 | return 0; | |
661 | } | |
662 | ||
663 | static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, | |
664 | u64 off, void *data, int size) | |
665 | { | |
666 | unsigned long flags; | |
f1af6208 | 667 | void *addr = NULL; |
a9083016 GM |
668 | int ret = 0; |
669 | u64 start; | |
670 | uint8_t *mem_ptr = NULL; | |
671 | unsigned long mem_base; | |
672 | unsigned long mem_page; | |
673 | ||
674 | write_lock_irqsave(&ha->hw_lock, flags); | |
675 | ||
676 | /* | |
677 | * If attempting to access unknown address or straddle hw windows, | |
678 | * do not access. | |
679 | */ | |
680 | start = qla82xx_pci_set_window(ha, off); | |
681 | if ((start == -1UL) || | |
682 | (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { | |
683 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
684 | qla_printk(KERN_ERR, ha, | |
685 | "%s out of bound pci memory access. " | |
686 | "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); | |
687 | return -1; | |
688 | } | |
689 | ||
f1af6208 GM |
690 | write_unlock_irqrestore(&ha->hw_lock, flags); |
691 | mem_base = pci_resource_start(ha->pdev, 0); | |
692 | mem_page = start & PAGE_MASK; | |
693 | /* Map two pages whenever user tries to access addresses in two | |
694 | * consecutive pages. | |
695 | */ | |
696 | if (mem_page != ((start + size - 1) & PAGE_MASK)) | |
697 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); | |
698 | else | |
699 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | |
700 | if (mem_ptr == 0UL) { | |
701 | *(u8 *)data = 0; | |
702 | return -1; | |
a9083016 | 703 | } |
f1af6208 GM |
704 | addr = mem_ptr; |
705 | addr += start & (PAGE_SIZE - 1); | |
706 | write_lock_irqsave(&ha->hw_lock, flags); | |
a9083016 GM |
707 | |
708 | switch (size) { | |
709 | case 1: | |
710 | *(u8 *)data = readb(addr); | |
711 | break; | |
712 | case 2: | |
713 | *(u16 *)data = readw(addr); | |
714 | break; | |
715 | case 4: | |
716 | *(u32 *)data = readl(addr); | |
717 | break; | |
718 | case 8: | |
719 | *(u64 *)data = readq(addr); | |
720 | break; | |
721 | default: | |
722 | ret = -1; | |
723 | break; | |
724 | } | |
725 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
726 | ||
727 | if (mem_ptr) | |
728 | iounmap(mem_ptr); | |
729 | return ret; | |
730 | } | |
731 | ||
732 | static int | |
733 | qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, | |
734 | u64 off, void *data, int size) | |
735 | { | |
736 | unsigned long flags; | |
f1af6208 | 737 | void *addr = NULL; |
a9083016 GM |
738 | int ret = 0; |
739 | u64 start; | |
740 | uint8_t *mem_ptr = NULL; | |
741 | unsigned long mem_base; | |
742 | unsigned long mem_page; | |
743 | ||
744 | write_lock_irqsave(&ha->hw_lock, flags); | |
745 | ||
746 | /* | |
747 | * If attempting to access unknown address or straddle hw windows, | |
748 | * do not access. | |
749 | */ | |
750 | start = qla82xx_pci_set_window(ha, off); | |
751 | if ((start == -1UL) || | |
752 | (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { | |
753 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
754 | qla_printk(KERN_ERR, ha, | |
755 | "%s out of bound pci memory access. " | |
756 | "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); | |
757 | return -1; | |
758 | } | |
759 | ||
f1af6208 GM |
760 | write_unlock_irqrestore(&ha->hw_lock, flags); |
761 | mem_base = pci_resource_start(ha->pdev, 0); | |
762 | mem_page = start & PAGE_MASK; | |
763 | /* Map two pages whenever user tries to access addresses in two | |
764 | * consecutive pages. | |
765 | */ | |
766 | if (mem_page != ((start + size - 1) & PAGE_MASK)) | |
767 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); | |
768 | else | |
769 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | |
770 | if (mem_ptr == 0UL) | |
771 | return -1; | |
a9083016 | 772 | |
f1af6208 GM |
773 | addr = mem_ptr; |
774 | addr += start & (PAGE_SIZE - 1); | |
775 | write_lock_irqsave(&ha->hw_lock, flags); | |
a9083016 GM |
776 | |
777 | switch (size) { | |
778 | case 1: | |
779 | writeb(*(u8 *)data, addr); | |
780 | break; | |
781 | case 2: | |
782 | writew(*(u16 *)data, addr); | |
783 | break; | |
784 | case 4: | |
785 | writel(*(u32 *)data, addr); | |
786 | break; | |
787 | case 8: | |
788 | writeq(*(u64 *)data, addr); | |
789 | break; | |
790 | default: | |
791 | ret = -1; | |
792 | break; | |
793 | } | |
794 | write_unlock_irqrestore(&ha->hw_lock, flags); | |
795 | if (mem_ptr) | |
796 | iounmap(mem_ptr); | |
797 | return ret; | |
798 | } | |
799 | ||
a9083016 GM |
800 | #define MTU_FUDGE_FACTOR 100 |
801 | unsigned long qla82xx_decode_crb_addr(unsigned long addr) | |
802 | { | |
803 | int i; | |
804 | unsigned long base_addr, offset, pci_base; | |
805 | ||
806 | if (!qla82xx_crb_table_initialized) | |
807 | qla82xx_crb_addr_transform_setup(); | |
808 | ||
809 | pci_base = ADDR_ERROR; | |
810 | base_addr = addr & 0xfff00000; | |
811 | offset = addr & 0x000fffff; | |
812 | ||
813 | for (i = 0; i < MAX_CRB_XFORM; i++) { | |
814 | if (crb_addr_xform[i] == base_addr) { | |
815 | pci_base = i << 20; | |
816 | break; | |
817 | } | |
818 | } | |
819 | if (pci_base == ADDR_ERROR) | |
820 | return pci_base; | |
821 | return pci_base + offset; | |
822 | } | |
823 | ||
824 | static long rom_max_timeout = 100; | |
825 | static long qla82xx_rom_lock_timeout = 100; | |
826 | ||
827 | int | |
828 | qla82xx_rom_lock(struct qla_hw_data *ha) | |
829 | { | |
830 | int done = 0, timeout = 0; | |
831 | ||
832 | while (!done) { | |
833 | /* acquire semaphore2 from PCI HW block */ | |
834 | done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); | |
835 | if (done == 1) | |
836 | break; | |
837 | if (timeout >= qla82xx_rom_lock_timeout) | |
838 | return -1; | |
839 | timeout++; | |
840 | } | |
841 | qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); | |
842 | return 0; | |
843 | } | |
844 | ||
845 | int | |
846 | qla82xx_wait_rom_busy(struct qla_hw_data *ha) | |
847 | { | |
848 | long timeout = 0; | |
849 | long done = 0 ; | |
850 | ||
851 | while (done == 0) { | |
852 | done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); | |
853 | done &= 4; | |
854 | timeout++; | |
855 | if (timeout >= rom_max_timeout) { | |
856 | DEBUG(qla_printk(KERN_INFO, ha, | |
857 | "%s: Timeout reached waiting for rom busy", | |
858 | QLA2XXX_DRIVER_NAME)); | |
859 | return -1; | |
860 | } | |
861 | } | |
862 | return 0; | |
863 | } | |
864 | ||
865 | int | |
866 | qla82xx_wait_rom_done(struct qla_hw_data *ha) | |
867 | { | |
868 | long timeout = 0; | |
869 | long done = 0 ; | |
870 | ||
871 | while (done == 0) { | |
872 | done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); | |
873 | done &= 2; | |
874 | timeout++; | |
875 | if (timeout >= rom_max_timeout) { | |
876 | DEBUG(qla_printk(KERN_INFO, ha, | |
877 | "%s: Timeout reached waiting for rom done", | |
878 | QLA2XXX_DRIVER_NAME)); | |
879 | return -1; | |
880 | } | |
881 | } | |
882 | return 0; | |
883 | } | |
884 | ||
885 | int | |
886 | qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | |
887 | { | |
888 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); | |
889 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | |
890 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); | |
891 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); | |
892 | qla82xx_wait_rom_busy(ha); | |
893 | if (qla82xx_wait_rom_done(ha)) { | |
894 | qla_printk(KERN_WARNING, ha, | |
895 | "%s: Error waiting for rom done\n", | |
896 | QLA2XXX_DRIVER_NAME); | |
897 | return -1; | |
898 | } | |
899 | /* Reset abyte_cnt and dummy_byte_cnt */ | |
900 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | |
901 | udelay(10); | |
902 | cond_resched(); | |
903 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); | |
904 | *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); | |
905 | return 0; | |
906 | } | |
907 | ||
908 | int | |
909 | qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | |
910 | { | |
911 | int ret, loops = 0; | |
912 | ||
913 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { | |
914 | udelay(100); | |
915 | schedule(); | |
916 | loops++; | |
917 | } | |
918 | if (loops >= 50000) { | |
919 | qla_printk(KERN_INFO, ha, | |
920 | "%s: qla82xx_rom_lock failed\n", | |
921 | QLA2XXX_DRIVER_NAME); | |
922 | return -1; | |
923 | } | |
924 | ret = qla82xx_do_rom_fast_read(ha, addr, valp); | |
925 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
926 | return ret; | |
927 | } | |
928 | ||
929 | int | |
930 | qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) | |
931 | { | |
932 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); | |
933 | qla82xx_wait_rom_busy(ha); | |
934 | if (qla82xx_wait_rom_done(ha)) { | |
935 | qla_printk(KERN_WARNING, ha, | |
936 | "Error waiting for rom done\n"); | |
937 | return -1; | |
938 | } | |
939 | *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); | |
940 | return 0; | |
941 | } | |
942 | ||
943 | int | |
944 | qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) | |
945 | { | |
946 | long timeout = 0; | |
947 | uint32_t done = 1 ; | |
948 | uint32_t val; | |
949 | int ret = 0; | |
950 | ||
951 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); | |
952 | while ((done != 0) && (ret == 0)) { | |
953 | ret = qla82xx_read_status_reg(ha, &val); | |
954 | done = val & 1; | |
955 | timeout++; | |
956 | udelay(10); | |
957 | cond_resched(); | |
958 | if (timeout >= 50000) { | |
959 | qla_printk(KERN_WARNING, ha, | |
960 | "Timeout reached waiting for write finish"); | |
961 | return -1; | |
962 | } | |
963 | } | |
964 | return ret; | |
965 | } | |
966 | ||
967 | int | |
968 | qla82xx_flash_set_write_enable(struct qla_hw_data *ha) | |
969 | { | |
970 | uint32_t val; | |
971 | qla82xx_wait_rom_busy(ha); | |
972 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); | |
973 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); | |
974 | qla82xx_wait_rom_busy(ha); | |
975 | if (qla82xx_wait_rom_done(ha)) | |
976 | return -1; | |
977 | if (qla82xx_read_status_reg(ha, &val) != 0) | |
978 | return -1; | |
979 | if ((val & 2) != 2) | |
980 | return -1; | |
981 | return 0; | |
982 | } | |
983 | ||
984 | int | |
985 | qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) | |
986 | { | |
987 | if (qla82xx_flash_set_write_enable(ha)) | |
988 | return -1; | |
989 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); | |
990 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); | |
991 | if (qla82xx_wait_rom_done(ha)) { | |
992 | qla_printk(KERN_WARNING, ha, | |
993 | "Error waiting for rom done\n"); | |
994 | return -1; | |
995 | } | |
996 | return qla82xx_flash_wait_write_finish(ha); | |
997 | } | |
998 | ||
999 | int | |
1000 | qla82xx_write_disable_flash(struct qla_hw_data *ha) | |
1001 | { | |
1002 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); | |
1003 | if (qla82xx_wait_rom_done(ha)) { | |
1004 | qla_printk(KERN_WARNING, ha, | |
1005 | "Error waiting for rom done\n"); | |
1006 | return -1; | |
1007 | } | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | int | |
1012 | ql82xx_rom_lock_d(struct qla_hw_data *ha) | |
1013 | { | |
1014 | int loops = 0; | |
1015 | while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { | |
1016 | udelay(100); | |
1017 | cond_resched(); | |
1018 | loops++; | |
1019 | } | |
1020 | if (loops >= 50000) { | |
1021 | qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); | |
1022 | return -1; | |
1023 | } | |
1024 | return 0;; | |
1025 | } | |
1026 | ||
1027 | int | |
1028 | qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, | |
1029 | uint32_t data) | |
1030 | { | |
1031 | int ret = 0; | |
1032 | ||
1033 | ret = ql82xx_rom_lock_d(ha); | |
1034 | if (ret < 0) { | |
1035 | qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); | |
1036 | return ret; | |
1037 | } | |
1038 | ||
1039 | if (qla82xx_flash_set_write_enable(ha)) | |
1040 | goto done_write; | |
1041 | ||
1042 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); | |
1043 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); | |
1044 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); | |
1045 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); | |
1046 | qla82xx_wait_rom_busy(ha); | |
1047 | if (qla82xx_wait_rom_done(ha)) { | |
1048 | qla_printk(KERN_WARNING, ha, | |
1049 | "Error waiting for rom done\n"); | |
1050 | ret = -1; | |
1051 | goto done_write; | |
1052 | } | |
1053 | ||
1054 | ret = qla82xx_flash_wait_write_finish(ha); | |
1055 | ||
1056 | done_write: | |
1057 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
1058 | return ret; | |
1059 | } | |
1060 | ||
1061 | /* This routine does CRB initialize sequence | |
1062 | * to put the ISP into operational state | |
1063 | */ | |
1064 | int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |
1065 | { | |
1066 | int addr, val; | |
1067 | int i ; | |
1068 | struct crb_addr_pair *buf; | |
1069 | unsigned long off; | |
1070 | unsigned offset, n; | |
1071 | struct qla_hw_data *ha = vha->hw; | |
1072 | ||
1073 | struct crb_addr_pair { | |
1074 | long addr; | |
1075 | long data; | |
1076 | }; | |
1077 | ||
1078 | /* Halt all the indiviual PEGs and other blocks of the ISP */ | |
1079 | qla82xx_rom_lock(ha); | |
1080 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) | |
1081 | /* don't reset CAM block on reset */ | |
1082 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); | |
1083 | else | |
1084 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); | |
1085 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
1086 | ||
1087 | /* Read the signature value from the flash. | |
1088 | * Offset 0: Contain signature (0xcafecafe) | |
1089 | * Offset 4: Offset and number of addr/value pairs | |
1090 | * that present in CRB initialize sequence | |
1091 | */ | |
1092 | if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || | |
1093 | qla82xx_rom_fast_read(ha, 4, &n) != 0) { | |
1094 | qla_printk(KERN_WARNING, ha, | |
1095 | "[ERROR] Reading crb_init area: n: %08x\n", n); | |
1096 | return -1; | |
1097 | } | |
1098 | ||
1099 | /* Offset in flash = lower 16 bits | |
1100 | * Number of enteries = upper 16 bits | |
1101 | */ | |
1102 | offset = n & 0xffffU; | |
1103 | n = (n >> 16) & 0xffffU; | |
1104 | ||
1105 | /* number of addr/value pair should not exceed 1024 enteries */ | |
1106 | if (n >= 1024) { | |
1107 | qla_printk(KERN_WARNING, ha, | |
1108 | "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", | |
1109 | QLA2XXX_DRIVER_NAME, __func__, n); | |
1110 | return -1; | |
1111 | } | |
1112 | ||
1113 | qla_printk(KERN_INFO, ha, | |
1114 | "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); | |
1115 | ||
1116 | buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); | |
1117 | if (buf == NULL) { | |
1118 | qla_printk(KERN_WARNING, ha, | |
1119 | "%s: [ERROR] Unable to malloc memory.\n", | |
1120 | QLA2XXX_DRIVER_NAME); | |
1121 | return -1; | |
1122 | } | |
1123 | ||
1124 | for (i = 0; i < n; i++) { | |
1125 | if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || | |
1126 | qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { | |
1127 | kfree(buf); | |
1128 | return -1; | |
1129 | } | |
1130 | ||
1131 | buf[i].addr = addr; | |
1132 | buf[i].data = val; | |
1133 | } | |
1134 | ||
1135 | for (i = 0; i < n; i++) { | |
1136 | /* Translate internal CRB initialization | |
1137 | * address to PCI bus address | |
1138 | */ | |
1139 | off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + | |
1140 | QLA82XX_PCI_CRBSPACE; | |
1141 | /* Not all CRB addr/value pair to be written, | |
1142 | * some of them are skipped | |
1143 | */ | |
1144 | ||
1145 | /* skipping cold reboot MAGIC */ | |
1146 | if (off == QLA82XX_CAM_RAM(0x1fc)) | |
1147 | continue; | |
1148 | ||
1149 | /* do not reset PCI */ | |
1150 | if (off == (ROMUSB_GLB + 0xbc)) | |
1151 | continue; | |
1152 | ||
1153 | /* skip core clock, so that firmware can increase the clock */ | |
1154 | if (off == (ROMUSB_GLB + 0xc8)) | |
1155 | continue; | |
1156 | ||
1157 | /* skip the function enable register */ | |
1158 | if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) | |
1159 | continue; | |
1160 | ||
1161 | if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) | |
1162 | continue; | |
1163 | ||
1164 | if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) | |
1165 | continue; | |
1166 | ||
1167 | if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) | |
1168 | continue; | |
1169 | ||
1170 | if (off == ADDR_ERROR) { | |
1171 | qla_printk(KERN_WARNING, ha, | |
1172 | "%s: [ERROR] Unknown addr: 0x%08lx\n", | |
1173 | QLA2XXX_DRIVER_NAME, buf[i].addr); | |
1174 | continue; | |
1175 | } | |
1176 | ||
a9083016 GM |
1177 | qla82xx_wr_32(ha, off, buf[i].data); |
1178 | ||
1179 | /* ISP requires much bigger delay to settle down, | |
1180 | * else crb_window returns 0xffffffff | |
1181 | */ | |
1182 | if (off == QLA82XX_ROMUSB_GLB_SW_RESET) | |
1183 | msleep(1000); | |
1184 | ||
1185 | /* ISP requires millisec delay between | |
1186 | * successive CRB register updation | |
1187 | */ | |
1188 | msleep(1); | |
1189 | } | |
1190 | ||
1191 | kfree(buf); | |
1192 | ||
1193 | /* Resetting the data and instruction cache */ | |
1194 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); | |
1195 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); | |
1196 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); | |
1197 | ||
1198 | /* Clear all protocol processing engines */ | |
1199 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); | |
1200 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); | |
1201 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); | |
1202 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); | |
1203 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); | |
1204 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); | |
1205 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); | |
1206 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | int qla82xx_check_for_bad_spd(struct qla_hw_data *ha) | |
1211 | { | |
1212 | u32 val = 0; | |
1213 | val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS); | |
1214 | val &= QLA82XX_BOOT_LOADER_MN_ISSUE; | |
1215 | if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) { | |
1216 | qla_printk(KERN_INFO, ha, | |
1217 | "Memory DIMM SPD not programmed. " | |
1218 | " Assumed valid.\n"); | |
1219 | return 1; | |
1220 | } else if (val) { | |
1221 | qla_printk(KERN_INFO, ha, | |
1222 | "Memory DIMM type incorrect.Info:%08X.\n", val); | |
1223 | return 2; | |
1224 | } | |
1225 | return 0; | |
1226 | } | |
1227 | ||
1228 | int | |
1229 | qla82xx_fw_load_from_flash(struct qla_hw_data *ha) | |
1230 | { | |
1231 | int i; | |
1232 | long size = 0; | |
9c2b2975 HZ |
1233 | long flashaddr = ha->flt_region_bootload << 2; |
1234 | long memaddr = BOOTLD_START; | |
a9083016 GM |
1235 | u64 data; |
1236 | u32 high, low; | |
1237 | size = (IMAGE_START - BOOTLD_START) / 8; | |
1238 | ||
1239 | for (i = 0; i < size; i++) { | |
1240 | if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || | |
1241 | (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { | |
1242 | return -1; | |
1243 | } | |
1244 | data = ((u64)high << 32) | low ; | |
1245 | qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); | |
1246 | flashaddr += 8; | |
1247 | memaddr += 8; | |
1248 | ||
1249 | if (i % 0x1000 == 0) | |
1250 | msleep(1); | |
1251 | } | |
1252 | udelay(100); | |
1253 | read_lock(&ha->hw_lock); | |
3711333d GM |
1254 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); |
1255 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); | |
a9083016 GM |
1256 | read_unlock(&ha->hw_lock); |
1257 | return 0; | |
1258 | } | |
1259 | ||
1260 | int | |
1261 | qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, | |
1262 | u64 off, void *data, int size) | |
1263 | { | |
1264 | int i, j = 0, k, start, end, loop, sz[2], off0[2]; | |
1265 | int shift_amount; | |
1266 | uint32_t temp; | |
1267 | uint64_t off8, val, mem_crb, word[2] = {0, 0}; | |
1268 | ||
1269 | /* | |
1270 | * If not MN, go check for MS or invalid. | |
1271 | */ | |
1272 | ||
1273 | if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) | |
1274 | mem_crb = QLA82XX_CRB_QDR_NET; | |
1275 | else { | |
1276 | mem_crb = QLA82XX_CRB_DDR_NET; | |
1277 | if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) | |
1278 | return qla82xx_pci_mem_read_direct(ha, | |
1279 | off, data, size); | |
1280 | } | |
1281 | ||
3711333d GM |
1282 | off8 = off & 0xfffffff0; |
1283 | off0[0] = off & 0xf; | |
1284 | sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); | |
1285 | shift_amount = 4; | |
a9083016 GM |
1286 | loop = ((off0[0] + size - 1) >> shift_amount) + 1; |
1287 | off0[1] = 0; | |
1288 | sz[1] = size - sz[0]; | |
1289 | ||
1290 | /* | |
1291 | * don't lock here - write_wx gets the lock if each time | |
1292 | * write_lock_irqsave(&adapter->adapter_lock, flags); | |
1293 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | |
1294 | */ | |
1295 | ||
1296 | for (i = 0; i < loop; i++) { | |
1297 | temp = off8 + (i << shift_amount); | |
1298 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); | |
1299 | temp = 0; | |
1300 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); | |
1301 | temp = MIU_TA_CTL_ENABLE; | |
1302 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); | |
1303 | temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; | |
1304 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); | |
1305 | ||
1306 | for (j = 0; j < MAX_CTL_CHECK; j++) { | |
1307 | temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); | |
1308 | if ((temp & MIU_TA_CTL_BUSY) == 0) | |
1309 | break; | |
1310 | } | |
1311 | ||
1312 | if (j >= MAX_CTL_CHECK) { | |
1313 | if (printk_ratelimit()) | |
1314 | dev_err(&ha->pdev->dev, | |
1315 | "failed to read through agent\n"); | |
1316 | break; | |
1317 | } | |
1318 | ||
1319 | start = off0[i] >> 2; | |
1320 | end = (off0[i] + sz[i] - 1) >> 2; | |
1321 | for (k = start; k <= end; k++) { | |
1322 | temp = qla82xx_rd_32(ha, | |
1323 | mem_crb + MIU_TEST_AGT_RDDATA(k)); | |
1324 | word[i] |= ((uint64_t)temp << (32 * (k & 1))); | |
1325 | } | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * netxen_nic_pci_change_crbwindow_128M(adapter, 1); | |
1330 | * write_unlock_irqrestore(&adapter->adapter_lock, flags); | |
1331 | */ | |
1332 | ||
1333 | if (j >= MAX_CTL_CHECK) | |
1334 | return -1; | |
1335 | ||
1336 | if ((off0[0] & 7) == 0) { | |
1337 | val = word[0]; | |
1338 | } else { | |
1339 | val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | | |
1340 | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); | |
1341 | } | |
1342 | ||
1343 | switch (size) { | |
1344 | case 1: | |
1345 | *(uint8_t *)data = val; | |
1346 | break; | |
1347 | case 2: | |
1348 | *(uint16_t *)data = val; | |
1349 | break; | |
1350 | case 4: | |
1351 | *(uint32_t *)data = val; | |
1352 | break; | |
1353 | case 8: | |
1354 | *(uint64_t *)data = val; | |
1355 | break; | |
1356 | } | |
1357 | return 0; | |
1358 | } | |
1359 | ||
1360 | int | |
1361 | qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, | |
1362 | u64 off, void *data, int size) | |
1363 | { | |
1364 | int i, j, ret = 0, loop, sz[2], off0; | |
3711333d | 1365 | int scale, shift_amount, startword; |
a9083016 GM |
1366 | uint32_t temp; |
1367 | uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; | |
1368 | ||
1369 | /* | |
1370 | * If not MN, go check for MS or invalid. | |
1371 | */ | |
1372 | if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) | |
1373 | mem_crb = QLA82XX_CRB_QDR_NET; | |
1374 | else { | |
1375 | mem_crb = QLA82XX_CRB_DDR_NET; | |
1376 | if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) | |
1377 | return qla82xx_pci_mem_write_direct(ha, | |
1378 | off, data, size); | |
1379 | } | |
1380 | ||
1381 | off0 = off & 0x7; | |
1382 | sz[0] = (size < (8 - off0)) ? size : (8 - off0); | |
1383 | sz[1] = size - sz[0]; | |
1384 | ||
3711333d GM |
1385 | off8 = off & 0xfffffff0; |
1386 | loop = (((off & 0xf) + size - 1) >> 4) + 1; | |
1387 | shift_amount = 4; | |
1388 | scale = 2; | |
1389 | startword = (off & 0xf)/8; | |
1390 | ||
1391 | for (i = 0; i < loop; i++) { | |
1392 | if (qla82xx_pci_mem_read_2M(ha, off8 + | |
1393 | (i << shift_amount), &word[i * scale], 8)) | |
1394 | return -1; | |
a9083016 GM |
1395 | } |
1396 | ||
1397 | switch (size) { | |
1398 | case 1: | |
1399 | tmpw = *((uint8_t *)data); | |
1400 | break; | |
1401 | case 2: | |
1402 | tmpw = *((uint16_t *)data); | |
1403 | break; | |
1404 | case 4: | |
1405 | tmpw = *((uint32_t *)data); | |
1406 | break; | |
1407 | case 8: | |
1408 | default: | |
1409 | tmpw = *((uint64_t *)data); | |
1410 | break; | |
1411 | } | |
1412 | ||
3711333d GM |
1413 | if (sz[0] == 8) { |
1414 | word[startword] = tmpw; | |
a9083016 | 1415 | } else { |
3711333d GM |
1416 | word[startword] &= |
1417 | ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); | |
a9083016 | 1418 | word[startword] |= tmpw << (off0 * 8); |
3711333d GM |
1419 | } |
1420 | if (sz[1] != 0) { | |
1421 | word[startword+1] &= ~(~0ULL << (sz[1] * 8)); | |
1422 | word[startword+1] |= tmpw >> (sz[0] * 8); | |
a9083016 GM |
1423 | } |
1424 | ||
1425 | /* | |
1426 | * don't lock here - write_wx gets the lock if each time | |
1427 | * write_lock_irqsave(&adapter->adapter_lock, flags); | |
1428 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | |
1429 | */ | |
1430 | for (i = 0; i < loop; i++) { | |
1431 | temp = off8 + (i << shift_amount); | |
1432 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); | |
1433 | temp = 0; | |
1434 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); | |
1435 | temp = word[i * scale] & 0xffffffff; | |
1436 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); | |
1437 | temp = (word[i * scale] >> 32) & 0xffffffff; | |
1438 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); | |
3711333d GM |
1439 | temp = word[i*scale + 1] & 0xffffffff; |
1440 | qla82xx_wr_32(ha, mem_crb + | |
1441 | MIU_TEST_AGT_WRDATA_UPPER_LO, temp); | |
1442 | temp = (word[i*scale + 1] >> 32) & 0xffffffff; | |
1443 | qla82xx_wr_32(ha, mem_crb + | |
1444 | MIU_TEST_AGT_WRDATA_UPPER_HI, temp); | |
a9083016 GM |
1445 | |
1446 | temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; | |
1447 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); | |
1448 | temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; | |
1449 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); | |
1450 | ||
1451 | for (j = 0; j < MAX_CTL_CHECK; j++) { | |
1452 | temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); | |
1453 | if ((temp & MIU_TA_CTL_BUSY) == 0) | |
1454 | break; | |
1455 | } | |
1456 | ||
1457 | if (j >= MAX_CTL_CHECK) { | |
1458 | if (printk_ratelimit()) | |
1459 | dev_err(&ha->pdev->dev, | |
1460 | "failed to write through agent\n"); | |
1461 | ret = -1; | |
1462 | break; | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | return ret; | |
1467 | } | |
1468 | ||
9c2b2975 HZ |
1469 | static struct qla82xx_uri_table_desc * |
1470 | qla82xx_get_table_desc(const u8 *unirom, int section) | |
1471 | { | |
1472 | uint32_t i; | |
1473 | struct qla82xx_uri_table_desc *directory = | |
1474 | (struct qla82xx_uri_table_desc *)&unirom[0]; | |
1475 | __le32 offset; | |
1476 | __le32 tab_type; | |
1477 | __le32 entries = cpu_to_le32(directory->num_entries); | |
1478 | ||
1479 | for (i = 0; i < entries; i++) { | |
1480 | offset = cpu_to_le32(directory->findex) + | |
1481 | (i * cpu_to_le32(directory->entry_size)); | |
1482 | tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); | |
1483 | ||
1484 | if (tab_type == section) | |
1485 | return (struct qla82xx_uri_table_desc *)&unirom[offset]; | |
1486 | } | |
1487 | ||
1488 | return NULL; | |
1489 | } | |
1490 | ||
1491 | static struct qla82xx_uri_data_desc * | |
1492 | qla82xx_get_data_desc(struct qla_hw_data *ha, | |
1493 | u32 section, u32 idx_offset) | |
1494 | { | |
1495 | const u8 *unirom = ha->hablob->fw->data; | |
1496 | int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); | |
1497 | struct qla82xx_uri_table_desc *tab_desc = NULL; | |
1498 | __le32 offset; | |
1499 | ||
1500 | tab_desc = qla82xx_get_table_desc(unirom, section); | |
1501 | if (!tab_desc) | |
1502 | return NULL; | |
1503 | ||
1504 | offset = cpu_to_le32(tab_desc->findex) + | |
1505 | (cpu_to_le32(tab_desc->entry_size) * idx); | |
1506 | ||
1507 | return (struct qla82xx_uri_data_desc *)&unirom[offset]; | |
1508 | } | |
1509 | ||
1510 | static u8 * | |
1511 | qla82xx_get_bootld_offset(struct qla_hw_data *ha) | |
1512 | { | |
1513 | u32 offset = BOOTLD_START; | |
1514 | struct qla82xx_uri_data_desc *uri_desc = NULL; | |
1515 | ||
1516 | if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { | |
1517 | uri_desc = qla82xx_get_data_desc(ha, | |
1518 | QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); | |
1519 | if (uri_desc) | |
1520 | offset = cpu_to_le32(uri_desc->findex); | |
1521 | } | |
1522 | ||
1523 | return (u8 *)&ha->hablob->fw->data[offset]; | |
1524 | } | |
1525 | ||
1526 | static __le32 | |
1527 | qla82xx_get_fw_size(struct qla_hw_data *ha) | |
1528 | { | |
1529 | struct qla82xx_uri_data_desc *uri_desc = NULL; | |
1530 | ||
1531 | if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { | |
1532 | uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, | |
1533 | QLA82XX_URI_FIRMWARE_IDX_OFF); | |
1534 | if (uri_desc) | |
1535 | return cpu_to_le32(uri_desc->size); | |
1536 | } | |
1537 | ||
1538 | return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); | |
1539 | } | |
1540 | ||
1541 | static u8 * | |
1542 | qla82xx_get_fw_offs(struct qla_hw_data *ha) | |
1543 | { | |
1544 | u32 offset = IMAGE_START; | |
1545 | struct qla82xx_uri_data_desc *uri_desc = NULL; | |
1546 | ||
1547 | if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { | |
1548 | uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, | |
1549 | QLA82XX_URI_FIRMWARE_IDX_OFF); | |
1550 | if (uri_desc) | |
1551 | offset = cpu_to_le32(uri_desc->findex); | |
1552 | } | |
1553 | ||
1554 | return (u8 *)&ha->hablob->fw->data[offset]; | |
1555 | } | |
1556 | ||
a9083016 GM |
1557 | /* PCI related functions */ |
1558 | char * | |
1559 | qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) | |
1560 | { | |
1561 | int pcie_reg; | |
1562 | struct qla_hw_data *ha = vha->hw; | |
1563 | char lwstr[6]; | |
1564 | uint16_t lnk; | |
1565 | ||
1566 | pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); | |
1567 | pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); | |
1568 | ha->link_width = (lnk >> 4) & 0x3f; | |
1569 | ||
1570 | strcpy(str, "PCIe ("); | |
1571 | strcat(str, "2.5Gb/s "); | |
1572 | snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width); | |
1573 | strcat(str, lwstr); | |
1574 | return str; | |
1575 | } | |
1576 | ||
1577 | int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) | |
1578 | { | |
1579 | unsigned long val = 0; | |
1580 | u32 control; | |
1581 | ||
1582 | switch (region) { | |
1583 | case 0: | |
1584 | val = 0; | |
1585 | break; | |
1586 | case 1: | |
1587 | pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); | |
1588 | val = control + QLA82XX_MSIX_TBL_SPACE; | |
1589 | break; | |
1590 | } | |
1591 | return val; | |
1592 | } | |
1593 | ||
a9083016 GM |
1594 | |
1595 | int | |
1596 | qla82xx_iospace_config(struct qla_hw_data *ha) | |
1597 | { | |
1598 | uint32_t len = 0; | |
1599 | ||
1600 | if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { | |
1601 | qla_printk(KERN_WARNING, ha, | |
1602 | "Failed to reserve selected regions (%s)\n", | |
1603 | pci_name(ha->pdev)); | |
1604 | goto iospace_error_exit; | |
1605 | } | |
1606 | ||
1607 | /* Use MMIO operations for all accesses. */ | |
1608 | if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { | |
1609 | qla_printk(KERN_ERR, ha, | |
1610 | "region #0 not an MMIO resource (%s), aborting\n", | |
1611 | pci_name(ha->pdev)); | |
1612 | goto iospace_error_exit; | |
1613 | } | |
1614 | ||
1615 | len = pci_resource_len(ha->pdev, 0); | |
1616 | ha->nx_pcibase = | |
1617 | (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); | |
1618 | if (!ha->nx_pcibase) { | |
1619 | qla_printk(KERN_ERR, ha, | |
1620 | "cannot remap pcibase MMIO (%s), aborting\n", | |
1621 | pci_name(ha->pdev)); | |
1622 | pci_release_regions(ha->pdev); | |
1623 | goto iospace_error_exit; | |
1624 | } | |
1625 | ||
1626 | /* Mapping of IO base pointer */ | |
1627 | ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + | |
1628 | 0xbc000 + (ha->pdev->devfn << 11)); | |
1629 | ||
1630 | if (!ql2xdbwr) { | |
1631 | ha->nxdb_wr_ptr = | |
1632 | (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + | |
1633 | (ha->pdev->devfn << 12)), 4); | |
1634 | if (!ha->nxdb_wr_ptr) { | |
1635 | qla_printk(KERN_ERR, ha, | |
1636 | "cannot remap MMIO (%s), aborting\n", | |
1637 | pci_name(ha->pdev)); | |
1638 | pci_release_regions(ha->pdev); | |
1639 | goto iospace_error_exit; | |
1640 | } | |
1641 | ||
1642 | /* Mapping of IO base pointer, | |
1643 | * door bell read and write pointer | |
1644 | */ | |
1645 | ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + | |
1646 | (ha->pdev->devfn * 8); | |
1647 | } else { | |
1648 | ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? | |
1649 | QLA82XX_CAMRAM_DB1 : | |
1650 | QLA82XX_CAMRAM_DB2); | |
1651 | } | |
1652 | ||
1653 | ha->max_req_queues = ha->max_rsp_queues = 1; | |
1654 | ha->msix_count = ha->max_rsp_queues + 1; | |
1655 | return 0; | |
1656 | ||
1657 | iospace_error_exit: | |
1658 | return -ENOMEM; | |
1659 | } | |
1660 | ||
1661 | /* GS related functions */ | |
1662 | ||
1663 | /* Initialization related functions */ | |
1664 | ||
1665 | /** | |
1666 | * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. | |
1667 | * @ha: HA context | |
1668 | * | |
1669 | * Returns 0 on success. | |
1670 | */ | |
1671 | int | |
1672 | qla82xx_pci_config(scsi_qla_host_t *vha) | |
1673 | { | |
1674 | struct qla_hw_data *ha = vha->hw; | |
1675 | int ret; | |
1676 | ||
1677 | pci_set_master(ha->pdev); | |
1678 | ret = pci_set_mwi(ha->pdev); | |
1679 | ha->chip_revision = ha->pdev->revision; | |
1680 | return 0; | |
1681 | } | |
1682 | ||
1683 | /** | |
1684 | * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. | |
1685 | * @ha: HA context | |
1686 | * | |
1687 | * Returns 0 on success. | |
1688 | */ | |
1689 | void | |
1690 | qla82xx_reset_chip(scsi_qla_host_t *vha) | |
1691 | { | |
1692 | struct qla_hw_data *ha = vha->hw; | |
1693 | ha->isp_ops->disable_intrs(ha); | |
1694 | } | |
1695 | ||
1696 | void qla82xx_config_rings(struct scsi_qla_host *vha) | |
1697 | { | |
1698 | struct qla_hw_data *ha = vha->hw; | |
1699 | struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; | |
1700 | struct init_cb_81xx *icb; | |
1701 | struct req_que *req = ha->req_q_map[0]; | |
1702 | struct rsp_que *rsp = ha->rsp_q_map[0]; | |
1703 | ||
1704 | /* Setup ring parameters in initialization control block. */ | |
1705 | icb = (struct init_cb_81xx *)ha->init_cb; | |
1706 | icb->request_q_outpointer = __constant_cpu_to_le16(0); | |
1707 | icb->response_q_inpointer = __constant_cpu_to_le16(0); | |
1708 | icb->request_q_length = cpu_to_le16(req->length); | |
1709 | icb->response_q_length = cpu_to_le16(rsp->length); | |
1710 | icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); | |
1711 | icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); | |
1712 | icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); | |
1713 | icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); | |
1714 | ||
a9083016 GM |
1715 | WRT_REG_DWORD((unsigned long __iomem *)®->req_q_out[0], 0); |
1716 | WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_in[0], 0); | |
1717 | WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0); | |
1718 | } | |
1719 | ||
f1af6208 GM |
1720 | void qla82xx_reset_adapter(struct scsi_qla_host *vha) |
1721 | { | |
1722 | struct qla_hw_data *ha = vha->hw; | |
1723 | vha->flags.online = 0; | |
1724 | qla2x00_try_to_stop_firmware(vha); | |
1725 | ha->isp_ops->disable_intrs(ha); | |
1726 | } | |
1727 | ||
a9083016 GM |
1728 | int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) |
1729 | { | |
1730 | u64 *ptr64; | |
1731 | u32 i, flashaddr, size; | |
1732 | __le64 data; | |
1733 | ||
1734 | size = (IMAGE_START - BOOTLD_START) / 8; | |
1735 | ||
9c2b2975 | 1736 | ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); |
a9083016 GM |
1737 | flashaddr = BOOTLD_START; |
1738 | ||
1739 | for (i = 0; i < size; i++) { | |
1740 | data = cpu_to_le64(ptr64[i]); | |
9c2b2975 HZ |
1741 | if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) |
1742 | return -EIO; | |
a9083016 GM |
1743 | flashaddr += 8; |
1744 | } | |
1745 | ||
a9083016 | 1746 | flashaddr = FLASH_ADDR_START; |
9c2b2975 HZ |
1747 | size = (__force u32)qla82xx_get_fw_size(ha) / 8; |
1748 | ptr64 = (u64 *)qla82xx_get_fw_offs(ha); | |
a9083016 GM |
1749 | |
1750 | for (i = 0; i < size; i++) { | |
1751 | data = cpu_to_le64(ptr64[i]); | |
1752 | ||
1753 | if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) | |
1754 | return -EIO; | |
1755 | flashaddr += 8; | |
1756 | } | |
9c2b2975 | 1757 | udelay(100); |
a9083016 GM |
1758 | |
1759 | /* Write a magic value to CAMRAM register | |
1760 | * at a specified offset to indicate | |
1761 | * that all data is written and | |
1762 | * ready for firmware to initialize. | |
1763 | */ | |
9c2b2975 | 1764 | qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); |
a9083016 | 1765 | |
9c2b2975 | 1766 | read_lock(&ha->hw_lock); |
3711333d GM |
1767 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); |
1768 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); | |
9c2b2975 HZ |
1769 | read_unlock(&ha->hw_lock); |
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | static int | |
1774 | qla82xx_set_product_offset(struct qla_hw_data *ha) | |
1775 | { | |
1776 | struct qla82xx_uri_table_desc *ptab_desc = NULL; | |
1777 | const uint8_t *unirom = ha->hablob->fw->data; | |
1778 | uint32_t i; | |
1779 | __le32 entries; | |
1780 | __le32 flags, file_chiprev, offset; | |
1781 | uint8_t chiprev = ha->chip_revision; | |
1782 | /* Hardcoding mn_present flag for P3P */ | |
1783 | int mn_present = 0; | |
1784 | uint32_t flagbit; | |
1785 | ||
1786 | ptab_desc = qla82xx_get_table_desc(unirom, | |
1787 | QLA82XX_URI_DIR_SECT_PRODUCT_TBL); | |
1788 | if (!ptab_desc) | |
1789 | return -1; | |
1790 | ||
1791 | entries = cpu_to_le32(ptab_desc->num_entries); | |
1792 | ||
1793 | for (i = 0; i < entries; i++) { | |
1794 | offset = cpu_to_le32(ptab_desc->findex) + | |
1795 | (i * cpu_to_le32(ptab_desc->entry_size)); | |
1796 | flags = cpu_to_le32(*((int *)&unirom[offset] + | |
1797 | QLA82XX_URI_FLAGS_OFF)); | |
1798 | file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + | |
1799 | QLA82XX_URI_CHIP_REV_OFF)); | |
1800 | ||
1801 | flagbit = mn_present ? 1 : 2; | |
1802 | ||
1803 | if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { | |
1804 | ha->file_prd_off = offset; | |
1805 | return 0; | |
1806 | } | |
1807 | } | |
1808 | return -1; | |
1809 | } | |
1810 | ||
1811 | int | |
1812 | qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) | |
1813 | { | |
1814 | __le32 val; | |
1815 | uint32_t min_size; | |
1816 | struct qla_hw_data *ha = vha->hw; | |
1817 | const struct firmware *fw = ha->hablob->fw; | |
1818 | ||
1819 | ha->fw_type = fw_type; | |
1820 | ||
1821 | if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { | |
1822 | if (qla82xx_set_product_offset(ha)) | |
1823 | return -EINVAL; | |
1824 | ||
1825 | min_size = QLA82XX_URI_FW_MIN_SIZE; | |
1826 | } else { | |
1827 | val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); | |
1828 | if ((__force u32)val != QLA82XX_BDINFO_MAGIC) | |
1829 | return -EINVAL; | |
1830 | ||
1831 | min_size = QLA82XX_FW_MIN_SIZE; | |
1832 | } | |
1833 | ||
1834 | if (fw->size < min_size) | |
1835 | return -EINVAL; | |
a9083016 GM |
1836 | return 0; |
1837 | } | |
1838 | ||
1839 | int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) | |
1840 | { | |
1841 | u32 val = 0; | |
1842 | int retries = 60; | |
1843 | ||
1844 | do { | |
1845 | read_lock(&ha->hw_lock); | |
1846 | val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); | |
1847 | read_unlock(&ha->hw_lock); | |
1848 | ||
1849 | switch (val) { | |
1850 | case PHAN_INITIALIZE_COMPLETE: | |
1851 | case PHAN_INITIALIZE_ACK: | |
1852 | return QLA_SUCCESS; | |
1853 | case PHAN_INITIALIZE_FAILED: | |
1854 | break; | |
1855 | default: | |
1856 | break; | |
1857 | } | |
1858 | qla_printk(KERN_WARNING, ha, | |
1859 | "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", | |
1860 | val, retries); | |
1861 | ||
1862 | msleep(500); | |
1863 | ||
1864 | } while (--retries); | |
1865 | ||
1866 | qla_printk(KERN_INFO, ha, | |
1867 | "Cmd Peg initialization failed: 0x%x.\n", val); | |
1868 | ||
1869 | qla82xx_check_for_bad_spd(ha); | |
1870 | val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); | |
1871 | read_lock(&ha->hw_lock); | |
1872 | qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); | |
1873 | read_unlock(&ha->hw_lock); | |
1874 | return QLA_FUNCTION_FAILED; | |
1875 | } | |
1876 | ||
1877 | int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) | |
1878 | { | |
1879 | u32 val = 0; | |
1880 | int retries = 60; | |
1881 | ||
1882 | do { | |
1883 | read_lock(&ha->hw_lock); | |
1884 | val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); | |
1885 | read_unlock(&ha->hw_lock); | |
1886 | ||
1887 | switch (val) { | |
1888 | case PHAN_INITIALIZE_COMPLETE: | |
1889 | case PHAN_INITIALIZE_ACK: | |
1890 | return QLA_SUCCESS; | |
1891 | case PHAN_INITIALIZE_FAILED: | |
1892 | break; | |
1893 | default: | |
1894 | break; | |
1895 | } | |
1896 | ||
1897 | qla_printk(KERN_WARNING, ha, | |
1898 | "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", | |
1899 | val, retries); | |
1900 | ||
1901 | msleep(500); | |
1902 | ||
1903 | } while (--retries); | |
1904 | ||
1905 | qla_printk(KERN_INFO, ha, | |
1906 | "Rcv Peg initialization failed: 0x%x.\n", val); | |
1907 | read_lock(&ha->hw_lock); | |
1908 | qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); | |
1909 | read_unlock(&ha->hw_lock); | |
1910 | return QLA_FUNCTION_FAILED; | |
1911 | } | |
1912 | ||
1913 | /* ISR related functions */ | |
1914 | uint32_t qla82xx_isr_int_target_mask_enable[8] = { | |
1915 | ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1, | |
1916 | ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3, | |
1917 | ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5, | |
1918 | ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7 | |
1919 | }; | |
1920 | ||
1921 | uint32_t qla82xx_isr_int_target_status[8] = { | |
1922 | ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, | |
1923 | ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, | |
1924 | ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, | |
1925 | ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7 | |
1926 | }; | |
1927 | ||
1928 | static struct qla82xx_legacy_intr_set legacy_intr[] = \ | |
1929 | QLA82XX_LEGACY_INTR_CONFIG; | |
1930 | ||
1931 | /* | |
1932 | * qla82xx_mbx_completion() - Process mailbox command completions. | |
1933 | * @ha: SCSI driver HA context | |
1934 | * @mb0: Mailbox0 register | |
1935 | */ | |
1936 | void | |
1937 | qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |
1938 | { | |
1939 | uint16_t cnt; | |
1940 | uint16_t __iomem *wptr; | |
1941 | struct qla_hw_data *ha = vha->hw; | |
1942 | struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; | |
1943 | wptr = (uint16_t __iomem *)®->mailbox_out[1]; | |
1944 | ||
1945 | /* Load return mailbox registers. */ | |
1946 | ha->flags.mbox_int = 1; | |
1947 | ha->mailbox_out[0] = mb0; | |
1948 | ||
1949 | for (cnt = 1; cnt < ha->mbx_count; cnt++) { | |
1950 | ha->mailbox_out[cnt] = RD_REG_WORD(wptr); | |
1951 | wptr++; | |
1952 | } | |
1953 | ||
1954 | if (ha->mcp) { | |
1955 | DEBUG3_11(printk(KERN_INFO "%s(%ld): " | |
1956 | "Got mailbox completion. cmd=%x.\n", | |
1957 | __func__, vha->host_no, ha->mcp->mb[0])); | |
1958 | } else { | |
1959 | qla_printk(KERN_INFO, ha, | |
1960 | "%s(%ld): MBX pointer ERROR!\n", | |
1961 | __func__, vha->host_no); | |
1962 | } | |
1963 | } | |
1964 | ||
1965 | /* | |
1966 | * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. | |
1967 | * @irq: | |
1968 | * @dev_id: SCSI driver HA context | |
1969 | * @regs: | |
1970 | * | |
1971 | * Called by system whenever the host adapter generates an interrupt. | |
1972 | * | |
1973 | * Returns handled flag. | |
1974 | */ | |
1975 | irqreturn_t | |
1976 | qla82xx_intr_handler(int irq, void *dev_id) | |
1977 | { | |
1978 | scsi_qla_host_t *vha; | |
1979 | struct qla_hw_data *ha; | |
1980 | struct rsp_que *rsp; | |
1981 | struct device_reg_82xx __iomem *reg; | |
1982 | int status = 0, status1 = 0; | |
1983 | unsigned long flags; | |
1984 | unsigned long iter; | |
1985 | uint32_t stat; | |
1986 | uint16_t mb[4]; | |
1987 | ||
1988 | rsp = (struct rsp_que *) dev_id; | |
1989 | if (!rsp) { | |
1990 | printk(KERN_INFO | |
1991 | "%s(): NULL response queue pointer\n", __func__); | |
1992 | return IRQ_NONE; | |
1993 | } | |
1994 | ha = rsp->hw; | |
1995 | ||
1996 | if (!ha->flags.msi_enabled) { | |
1997 | status = qla82xx_rd_32(ha, ISR_INT_VECTOR); | |
1998 | if (!(status & ha->nx_legacy_intr.int_vec_bit)) | |
1999 | return IRQ_NONE; | |
2000 | ||
2001 | status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); | |
2002 | if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) | |
2003 | return IRQ_NONE; | |
2004 | } | |
2005 | ||
2006 | /* clear the interrupt */ | |
2007 | qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); | |
2008 | ||
2009 | /* read twice to ensure write is flushed */ | |
2010 | qla82xx_rd_32(ha, ISR_INT_VECTOR); | |
2011 | qla82xx_rd_32(ha, ISR_INT_VECTOR); | |
2012 | ||
2013 | reg = &ha->iobase->isp82; | |
2014 | ||
2015 | spin_lock_irqsave(&ha->hardware_lock, flags); | |
2016 | vha = pci_get_drvdata(ha->pdev); | |
2017 | for (iter = 1; iter--; ) { | |
2018 | ||
2019 | if (RD_REG_DWORD(®->host_int)) { | |
2020 | stat = RD_REG_DWORD(®->host_status); | |
a9083016 GM |
2021 | |
2022 | switch (stat & 0xff) { | |
2023 | case 0x1: | |
2024 | case 0x2: | |
2025 | case 0x10: | |
2026 | case 0x11: | |
2027 | qla82xx_mbx_completion(vha, MSW(stat)); | |
2028 | status |= MBX_INTERRUPT; | |
2029 | break; | |
2030 | case 0x12: | |
2031 | mb[0] = MSW(stat); | |
2032 | mb[1] = RD_REG_WORD(®->mailbox_out[1]); | |
2033 | mb[2] = RD_REG_WORD(®->mailbox_out[2]); | |
2034 | mb[3] = RD_REG_WORD(®->mailbox_out[3]); | |
2035 | qla2x00_async_event(vha, rsp, mb); | |
2036 | break; | |
2037 | case 0x13: | |
2038 | qla24xx_process_response_queue(vha, rsp); | |
2039 | break; | |
2040 | default: | |
2041 | DEBUG2(printk("scsi(%ld): " | |
2042 | " Unrecognized interrupt type (%d).\n", | |
2043 | vha->host_no, stat & 0xff)); | |
2044 | break; | |
2045 | } | |
2046 | } | |
2047 | WRT_REG_DWORD(®->host_int, 0); | |
2048 | } | |
2049 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | |
2050 | if (!ha->flags.msi_enabled) | |
2051 | qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); | |
2052 | ||
2053 | #ifdef QL_DEBUG_LEVEL_17 | |
2054 | if (!irq && ha->flags.eeh_busy) | |
2055 | qla_printk(KERN_WARNING, ha, | |
2056 | "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", | |
2057 | status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); | |
2058 | #endif | |
2059 | ||
2060 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | |
2061 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | |
2062 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | |
2063 | complete(&ha->mbx_intr_comp); | |
2064 | } | |
2065 | return IRQ_HANDLED; | |
2066 | } | |
2067 | ||
2068 | irqreturn_t | |
2069 | qla82xx_msix_default(int irq, void *dev_id) | |
2070 | { | |
2071 | scsi_qla_host_t *vha; | |
2072 | struct qla_hw_data *ha; | |
2073 | struct rsp_que *rsp; | |
2074 | struct device_reg_82xx __iomem *reg; | |
2075 | int status = 0; | |
2076 | unsigned long flags; | |
2077 | uint32_t stat; | |
2078 | uint16_t mb[4]; | |
2079 | ||
2080 | rsp = (struct rsp_que *) dev_id; | |
2081 | if (!rsp) { | |
2082 | printk(KERN_INFO | |
2083 | "%s(): NULL response queue pointer\n", __func__); | |
2084 | return IRQ_NONE; | |
2085 | } | |
2086 | ha = rsp->hw; | |
2087 | ||
2088 | reg = &ha->iobase->isp82; | |
2089 | ||
2090 | spin_lock_irqsave(&ha->hardware_lock, flags); | |
2091 | vha = pci_get_drvdata(ha->pdev); | |
2092 | do { | |
2093 | if (RD_REG_DWORD(®->host_int)) { | |
2094 | stat = RD_REG_DWORD(®->host_status); | |
a9083016 GM |
2095 | |
2096 | switch (stat & 0xff) { | |
2097 | case 0x1: | |
2098 | case 0x2: | |
2099 | case 0x10: | |
2100 | case 0x11: | |
2101 | qla82xx_mbx_completion(vha, MSW(stat)); | |
2102 | status |= MBX_INTERRUPT; | |
2103 | break; | |
2104 | case 0x12: | |
2105 | mb[0] = MSW(stat); | |
2106 | mb[1] = RD_REG_WORD(®->mailbox_out[1]); | |
2107 | mb[2] = RD_REG_WORD(®->mailbox_out[2]); | |
2108 | mb[3] = RD_REG_WORD(®->mailbox_out[3]); | |
2109 | qla2x00_async_event(vha, rsp, mb); | |
2110 | break; | |
2111 | case 0x13: | |
2112 | qla24xx_process_response_queue(vha, rsp); | |
2113 | break; | |
2114 | default: | |
2115 | DEBUG2(printk("scsi(%ld): " | |
2116 | " Unrecognized interrupt type (%d).\n", | |
2117 | vha->host_no, stat & 0xff)); | |
2118 | break; | |
2119 | } | |
2120 | } | |
2121 | WRT_REG_DWORD(®->host_int, 0); | |
2122 | } while (0); | |
2123 | ||
2124 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | |
2125 | ||
2126 | #ifdef QL_DEBUG_LEVEL_17 | |
2127 | if (!irq && ha->flags.eeh_busy) | |
2128 | qla_printk(KERN_WARNING, ha, | |
2129 | "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", | |
2130 | status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); | |
2131 | #endif | |
2132 | ||
2133 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | |
2134 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | |
2135 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | |
2136 | complete(&ha->mbx_intr_comp); | |
2137 | } | |
2138 | return IRQ_HANDLED; | |
2139 | } | |
2140 | ||
2141 | irqreturn_t | |
2142 | qla82xx_msix_rsp_q(int irq, void *dev_id) | |
2143 | { | |
2144 | scsi_qla_host_t *vha; | |
2145 | struct qla_hw_data *ha; | |
2146 | struct rsp_que *rsp; | |
2147 | struct device_reg_82xx __iomem *reg; | |
2148 | ||
2149 | rsp = (struct rsp_que *) dev_id; | |
2150 | if (!rsp) { | |
2151 | printk(KERN_INFO | |
2152 | "%s(): NULL response queue pointer\n", __func__); | |
2153 | return IRQ_NONE; | |
2154 | } | |
2155 | ||
2156 | ha = rsp->hw; | |
2157 | reg = &ha->iobase->isp82; | |
2158 | spin_lock_irq(&ha->hardware_lock); | |
2159 | vha = pci_get_drvdata(ha->pdev); | |
2160 | qla24xx_process_response_queue(vha, rsp); | |
2161 | WRT_REG_DWORD(®->host_int, 0); | |
2162 | spin_unlock_irq(&ha->hardware_lock); | |
2163 | return IRQ_HANDLED; | |
2164 | } | |
2165 | ||
2166 | void | |
2167 | qla82xx_poll(int irq, void *dev_id) | |
2168 | { | |
2169 | scsi_qla_host_t *vha; | |
2170 | struct qla_hw_data *ha; | |
2171 | struct rsp_que *rsp; | |
2172 | struct device_reg_82xx __iomem *reg; | |
2173 | int status = 0; | |
2174 | uint32_t stat; | |
2175 | uint16_t mb[4]; | |
2176 | unsigned long flags; | |
2177 | ||
2178 | rsp = (struct rsp_que *) dev_id; | |
2179 | if (!rsp) { | |
2180 | printk(KERN_INFO | |
2181 | "%s(): NULL response queue pointer\n", __func__); | |
2182 | return; | |
2183 | } | |
2184 | ha = rsp->hw; | |
2185 | ||
2186 | reg = &ha->iobase->isp82; | |
2187 | spin_lock_irqsave(&ha->hardware_lock, flags); | |
2188 | vha = pci_get_drvdata(ha->pdev); | |
2189 | ||
2190 | if (RD_REG_DWORD(®->host_int)) { | |
2191 | stat = RD_REG_DWORD(®->host_status); | |
2192 | switch (stat & 0xff) { | |
2193 | case 0x1: | |
2194 | case 0x2: | |
2195 | case 0x10: | |
2196 | case 0x11: | |
2197 | qla82xx_mbx_completion(vha, MSW(stat)); | |
2198 | status |= MBX_INTERRUPT; | |
2199 | break; | |
2200 | case 0x12: | |
2201 | mb[0] = MSW(stat); | |
2202 | mb[1] = RD_REG_WORD(®->mailbox_out[1]); | |
2203 | mb[2] = RD_REG_WORD(®->mailbox_out[2]); | |
2204 | mb[3] = RD_REG_WORD(®->mailbox_out[3]); | |
2205 | qla2x00_async_event(vha, rsp, mb); | |
2206 | break; | |
2207 | case 0x13: | |
2208 | qla24xx_process_response_queue(vha, rsp); | |
2209 | break; | |
2210 | default: | |
2211 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | |
2212 | "(%d).\n", | |
2213 | vha->host_no, stat & 0xff)); | |
2214 | break; | |
2215 | } | |
2216 | } | |
2217 | WRT_REG_DWORD(®->host_int, 0); | |
2218 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | |
2219 | } | |
2220 | ||
2221 | void | |
2222 | qla82xx_enable_intrs(struct qla_hw_data *ha) | |
2223 | { | |
2224 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | |
2225 | qla82xx_mbx_intr_enable(vha); | |
2226 | spin_lock_irq(&ha->hardware_lock); | |
2227 | qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); | |
2228 | spin_unlock_irq(&ha->hardware_lock); | |
2229 | ha->interrupts_on = 1; | |
2230 | } | |
2231 | ||
2232 | void | |
2233 | qla82xx_disable_intrs(struct qla_hw_data *ha) | |
2234 | { | |
2235 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | |
2236 | qla82xx_mbx_intr_disable(vha); | |
2237 | spin_lock_irq(&ha->hardware_lock); | |
2238 | qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); | |
2239 | spin_unlock_irq(&ha->hardware_lock); | |
2240 | ha->interrupts_on = 0; | |
2241 | } | |
2242 | ||
2243 | void qla82xx_init_flags(struct qla_hw_data *ha) | |
2244 | { | |
2245 | struct qla82xx_legacy_intr_set *nx_legacy_intr; | |
2246 | ||
2247 | /* ISP 8021 initializations */ | |
2248 | rwlock_init(&ha->hw_lock); | |
2249 | ha->qdr_sn_window = -1; | |
2250 | ha->ddr_mn_window = -1; | |
2251 | ha->curr_window = 255; | |
2252 | ha->portnum = PCI_FUNC(ha->pdev->devfn); | |
2253 | nx_legacy_intr = &legacy_intr[ha->portnum]; | |
2254 | ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; | |
2255 | ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; | |
2256 | ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; | |
2257 | ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; | |
2258 | } | |
2259 | ||
2260 | static inline void | |
2261 | qla82xx_set_drv_active(scsi_qla_host_t *vha) | |
2262 | { | |
2263 | uint32_t drv_active; | |
2264 | struct qla_hw_data *ha = vha->hw; | |
2265 | ||
2266 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | |
2267 | ||
2268 | /* If reset value is all FF's, initialize DRV_ACTIVE */ | |
2269 | if (drv_active == 0xffffffff) { | |
2270 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0); | |
2271 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | |
2272 | } | |
2273 | drv_active |= (1 << (ha->portnum * 4)); | |
2274 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); | |
2275 | } | |
2276 | ||
2277 | inline void | |
2278 | qla82xx_clear_drv_active(struct qla_hw_data *ha) | |
2279 | { | |
2280 | uint32_t drv_active; | |
2281 | ||
2282 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | |
2283 | drv_active &= ~(1 << (ha->portnum * 4)); | |
2284 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); | |
2285 | } | |
2286 | ||
2287 | static inline int | |
2288 | qla82xx_need_reset(struct qla_hw_data *ha) | |
2289 | { | |
2290 | uint32_t drv_state; | |
2291 | int rval; | |
2292 | ||
2293 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
2294 | rval = drv_state & (1 << (ha->portnum * 4)); | |
2295 | return rval; | |
2296 | } | |
2297 | ||
2298 | static inline void | |
2299 | qla82xx_set_rst_ready(struct qla_hw_data *ha) | |
2300 | { | |
2301 | uint32_t drv_state; | |
2302 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | |
2303 | ||
2304 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
2305 | ||
2306 | /* If reset value is all FF's, initialize DRV_STATE */ | |
2307 | if (drv_state == 0xffffffff) { | |
2308 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); | |
2309 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
2310 | } | |
2311 | drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); | |
2312 | qla_printk(KERN_INFO, ha, | |
2313 | "%s(%ld):drv_state = 0x%x\n", | |
2314 | __func__, vha->host_no, drv_state); | |
2315 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); | |
2316 | } | |
2317 | ||
2318 | static inline void | |
2319 | qla82xx_clear_rst_ready(struct qla_hw_data *ha) | |
2320 | { | |
2321 | uint32_t drv_state; | |
2322 | ||
2323 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
2324 | drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); | |
2325 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); | |
2326 | } | |
2327 | ||
2328 | static inline void | |
2329 | qla82xx_set_qsnt_ready(struct qla_hw_data *ha) | |
2330 | { | |
2331 | uint32_t qsnt_state; | |
2332 | ||
2333 | qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
2334 | qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); | |
2335 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); | |
2336 | } | |
2337 | ||
2338 | int qla82xx_load_fw(scsi_qla_host_t *vha) | |
2339 | { | |
2340 | int rst; | |
2341 | struct fw_blob *blob; | |
2342 | struct qla_hw_data *ha = vha->hw; | |
2343 | ||
a9083016 GM |
2344 | if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { |
2345 | qla_printk(KERN_ERR, ha, | |
2346 | "%s: Error during CRB Initialization\n", __func__); | |
2347 | return QLA_FUNCTION_FAILED; | |
2348 | } | |
2349 | udelay(500); | |
2350 | ||
2351 | /* Bring QM and CAMRAM out of reset */ | |
2352 | rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); | |
2353 | rst &= ~((1 << 28) | (1 << 24)); | |
2354 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); | |
2355 | ||
2356 | /* | |
2357 | * FW Load priority: | |
2358 | * 1) Operational firmware residing in flash. | |
2359 | * 2) Firmware via request-firmware interface (.bin file). | |
2360 | */ | |
2361 | if (ql2xfwloadbin == 2) | |
2362 | goto try_blob_fw; | |
2363 | ||
2364 | qla_printk(KERN_INFO, ha, | |
2365 | "Attempting to load firmware from flash\n"); | |
2366 | ||
2367 | if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { | |
2368 | qla_printk(KERN_ERR, ha, | |
2369 | "Firmware loaded successfully from flash\n"); | |
2370 | return QLA_SUCCESS; | |
2371 | } | |
2372 | try_blob_fw: | |
2373 | qla_printk(KERN_INFO, ha, | |
2374 | "Attempting to load firmware from blob\n"); | |
2375 | ||
2376 | /* Load firmware blob. */ | |
2377 | blob = ha->hablob = qla2x00_request_firmware(vha); | |
2378 | if (!blob) { | |
2379 | qla_printk(KERN_ERR, ha, | |
2380 | "Firmware image not present.\n"); | |
2381 | goto fw_load_failed; | |
2382 | } | |
2383 | ||
9c2b2975 HZ |
2384 | /* Validating firmware blob */ |
2385 | if (qla82xx_validate_firmware_blob(vha, | |
2386 | QLA82XX_FLASH_ROMIMAGE)) { | |
2387 | /* Fallback to URI format */ | |
2388 | if (qla82xx_validate_firmware_blob(vha, | |
2389 | QLA82XX_UNIFIED_ROMIMAGE)) { | |
2390 | qla_printk(KERN_ERR, ha, | |
2391 | "No valid firmware image found!!!"); | |
2392 | return QLA_FUNCTION_FAILED; | |
2393 | } | |
2394 | } | |
2395 | ||
a9083016 GM |
2396 | if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { |
2397 | qla_printk(KERN_ERR, ha, | |
2398 | "%s: Firmware loaded successfully " | |
2399 | " from binary blob\n", __func__); | |
2400 | return QLA_SUCCESS; | |
2401 | } else { | |
2402 | qla_printk(KERN_ERR, ha, | |
2403 | "Firmware load failed from binary blob\n"); | |
2404 | blob->fw = NULL; | |
2405 | blob = NULL; | |
2406 | goto fw_load_failed; | |
2407 | } | |
2408 | return QLA_SUCCESS; | |
2409 | ||
2410 | fw_load_failed: | |
2411 | return QLA_FUNCTION_FAILED; | |
2412 | } | |
2413 | ||
2414 | static int | |
2415 | qla82xx_start_firmware(scsi_qla_host_t *vha) | |
2416 | { | |
2417 | int pcie_cap; | |
2418 | uint16_t lnk; | |
2419 | struct qla_hw_data *ha = vha->hw; | |
2420 | ||
2421 | /* scrub dma mask expansion register */ | |
2422 | qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); | |
2423 | ||
3711333d GM |
2424 | /* Put both the PEG CMD and RCV PEG to default state |
2425 | * of 0 before resetting the hardware | |
2426 | */ | |
2427 | qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); | |
2428 | qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); | |
2429 | ||
a9083016 GM |
2430 | /* Overwrite stale initialization register values */ |
2431 | qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); | |
2432 | qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); | |
2433 | ||
2434 | if (qla82xx_load_fw(vha) != QLA_SUCCESS) { | |
2435 | qla_printk(KERN_INFO, ha, | |
2436 | "%s: Error trying to start fw!\n", __func__); | |
2437 | return QLA_FUNCTION_FAILED; | |
2438 | } | |
2439 | ||
2440 | /* Handshake with the card before we register the devices. */ | |
2441 | if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { | |
2442 | qla_printk(KERN_INFO, ha, | |
2443 | "%s: Error during card handshake!\n", __func__); | |
2444 | return QLA_FUNCTION_FAILED; | |
2445 | } | |
2446 | ||
2447 | /* Negotiated Link width */ | |
2448 | pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); | |
2449 | pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); | |
2450 | ha->link_width = (lnk >> 4) & 0x3f; | |
2451 | ||
2452 | /* Synchronize with Receive peg */ | |
2453 | return qla82xx_check_rcvpeg_state(ha); | |
2454 | } | |
2455 | ||
2456 | static inline int | |
2457 | qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, | |
2458 | uint16_t tot_dsds) | |
2459 | { | |
2460 | uint32_t *cur_dsd = NULL; | |
2461 | scsi_qla_host_t *vha; | |
2462 | struct qla_hw_data *ha; | |
2463 | struct scsi_cmnd *cmd; | |
2464 | struct scatterlist *cur_seg; | |
2465 | uint32_t *dsd_seg; | |
2466 | void *next_dsd; | |
2467 | uint8_t avail_dsds; | |
2468 | uint8_t first_iocb = 1; | |
2469 | uint32_t dsd_list_len; | |
2470 | struct dsd_dma *dsd_ptr; | |
2471 | struct ct6_dsd *ctx; | |
2472 | ||
2473 | cmd = sp->cmd; | |
2474 | ||
2475 | /* Update entry type to indicate Command Type 3 IOCB */ | |
2476 | *((uint32_t *)(&cmd_pkt->entry_type)) = | |
2477 | __constant_cpu_to_le32(COMMAND_TYPE_6); | |
2478 | ||
2479 | /* No data transfer */ | |
2480 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | |
2481 | cmd_pkt->byte_count = __constant_cpu_to_le32(0); | |
2482 | return 0; | |
2483 | } | |
2484 | ||
2485 | vha = sp->fcport->vha; | |
2486 | ha = vha->hw; | |
2487 | ||
2488 | /* Set transfer direction */ | |
2489 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | |
2490 | cmd_pkt->control_flags = | |
2491 | __constant_cpu_to_le16(CF_WRITE_DATA); | |
2492 | ha->qla_stats.output_bytes += scsi_bufflen(cmd); | |
2493 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | |
2494 | cmd_pkt->control_flags = | |
2495 | __constant_cpu_to_le16(CF_READ_DATA); | |
2496 | ha->qla_stats.input_bytes += scsi_bufflen(cmd); | |
2497 | } | |
2498 | ||
2499 | cur_seg = scsi_sglist(cmd); | |
2500 | ctx = sp->ctx; | |
2501 | ||
2502 | while (tot_dsds) { | |
2503 | avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? | |
2504 | QLA_DSDS_PER_IOCB : tot_dsds; | |
2505 | tot_dsds -= avail_dsds; | |
2506 | dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; | |
2507 | ||
2508 | dsd_ptr = list_first_entry(&ha->gbl_dsd_list, | |
2509 | struct dsd_dma, list); | |
2510 | next_dsd = dsd_ptr->dsd_addr; | |
2511 | list_del(&dsd_ptr->list); | |
2512 | ha->gbl_dsd_avail--; | |
2513 | list_add_tail(&dsd_ptr->list, &ctx->dsd_list); | |
2514 | ctx->dsd_use_cnt++; | |
2515 | ha->gbl_dsd_inuse++; | |
2516 | ||
2517 | if (first_iocb) { | |
2518 | first_iocb = 0; | |
2519 | dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; | |
2520 | *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | |
2521 | *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | |
2522 | *dsd_seg++ = dsd_list_len; | |
2523 | } else { | |
2524 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | |
2525 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | |
2526 | *cur_dsd++ = dsd_list_len; | |
2527 | } | |
2528 | cur_dsd = (uint32_t *)next_dsd; | |
2529 | while (avail_dsds) { | |
2530 | dma_addr_t sle_dma; | |
2531 | ||
2532 | sle_dma = sg_dma_address(cur_seg); | |
2533 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | |
2534 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | |
2535 | *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); | |
2536 | cur_seg++; | |
2537 | avail_dsds--; | |
2538 | } | |
2539 | } | |
2540 | ||
2541 | /* Null termination */ | |
2542 | *cur_dsd++ = 0; | |
2543 | *cur_dsd++ = 0; | |
2544 | *cur_dsd++ = 0; | |
2545 | cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; | |
2546 | return 0; | |
2547 | } | |
2548 | ||
2549 | /* | |
2550 | * qla82xx_calc_dsd_lists() - Determine number of DSD list required | |
2551 | * for Command Type 6. | |
2552 | * | |
2553 | * @dsds: number of data segment decriptors needed | |
2554 | * | |
2555 | * Returns the number of dsd list needed to store @dsds. | |
2556 | */ | |
2557 | inline uint16_t | |
2558 | qla82xx_calc_dsd_lists(uint16_t dsds) | |
2559 | { | |
2560 | uint16_t dsd_lists = 0; | |
2561 | ||
2562 | dsd_lists = (dsds/QLA_DSDS_PER_IOCB); | |
2563 | if (dsds % QLA_DSDS_PER_IOCB) | |
2564 | dsd_lists++; | |
2565 | return dsd_lists; | |
2566 | } | |
2567 | ||
2568 | /* | |
2569 | * qla82xx_start_scsi() - Send a SCSI command to the ISP | |
2570 | * @sp: command to send to the ISP | |
2571 | * | |
2572 | * Returns non-zero if a failure occured, else zero. | |
2573 | */ | |
2574 | int | |
2575 | qla82xx_start_scsi(srb_t *sp) | |
2576 | { | |
2577 | int ret, nseg; | |
2578 | unsigned long flags; | |
2579 | struct scsi_cmnd *cmd; | |
2580 | uint32_t *clr_ptr; | |
2581 | uint32_t index; | |
2582 | uint32_t handle; | |
2583 | uint16_t cnt; | |
2584 | uint16_t req_cnt; | |
2585 | uint16_t tot_dsds; | |
2586 | struct device_reg_82xx __iomem *reg; | |
2587 | uint32_t dbval; | |
2588 | uint32_t *fcp_dl; | |
2589 | uint8_t additional_cdb_len; | |
2590 | struct ct6_dsd *ctx; | |
2591 | struct scsi_qla_host *vha = sp->fcport->vha; | |
2592 | struct qla_hw_data *ha = vha->hw; | |
2593 | struct req_que *req = NULL; | |
2594 | struct rsp_que *rsp = NULL; | |
2595 | ||
2596 | /* Setup device pointers. */ | |
2597 | ret = 0; | |
2598 | reg = &ha->iobase->isp82; | |
2599 | cmd = sp->cmd; | |
2600 | req = vha->req; | |
2601 | rsp = ha->rsp_q_map[0]; | |
2602 | ||
2603 | /* So we know we haven't pci_map'ed anything yet */ | |
2604 | tot_dsds = 0; | |
2605 | ||
2606 | dbval = 0x04 | (ha->portnum << 5); | |
2607 | ||
2608 | /* Send marker if required */ | |
2609 | if (vha->marker_needed != 0) { | |
2610 | if (qla2x00_marker(vha, req, | |
2611 | rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | |
2612 | return QLA_FUNCTION_FAILED; | |
2613 | vha->marker_needed = 0; | |
2614 | } | |
2615 | ||
2616 | /* Acquire ring specific lock */ | |
2617 | spin_lock_irqsave(&ha->hardware_lock, flags); | |
2618 | ||
2619 | /* Check for room in outstanding command list. */ | |
2620 | handle = req->current_outstanding_cmd; | |
2621 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | |
2622 | handle++; | |
2623 | if (handle == MAX_OUTSTANDING_COMMANDS) | |
2624 | handle = 1; | |
2625 | if (!req->outstanding_cmds[handle]) | |
2626 | break; | |
2627 | } | |
2628 | if (index == MAX_OUTSTANDING_COMMANDS) | |
2629 | goto queuing_error; | |
2630 | ||
2631 | /* Map the sg table so we have an accurate count of sg entries needed */ | |
2632 | if (scsi_sg_count(cmd)) { | |
2633 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | |
2634 | scsi_sg_count(cmd), cmd->sc_data_direction); | |
2635 | if (unlikely(!nseg)) | |
2636 | goto queuing_error; | |
2637 | } else | |
2638 | nseg = 0; | |
2639 | ||
2640 | tot_dsds = nseg; | |
2641 | ||
2642 | if (tot_dsds > ql2xshiftctondsd) { | |
2643 | struct cmd_type_6 *cmd_pkt; | |
2644 | uint16_t more_dsd_lists = 0; | |
2645 | struct dsd_dma *dsd_ptr; | |
2646 | uint16_t i; | |
2647 | ||
2648 | more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); | |
2649 | if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) | |
2650 | goto queuing_error; | |
2651 | ||
2652 | if (more_dsd_lists <= ha->gbl_dsd_avail) | |
2653 | goto sufficient_dsds; | |
2654 | else | |
2655 | more_dsd_lists -= ha->gbl_dsd_avail; | |
2656 | ||
2657 | for (i = 0; i < more_dsd_lists; i++) { | |
2658 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | |
2659 | if (!dsd_ptr) | |
2660 | goto queuing_error; | |
2661 | ||
2662 | dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, | |
2663 | GFP_ATOMIC, &dsd_ptr->dsd_list_dma); | |
2664 | if (!dsd_ptr->dsd_addr) { | |
2665 | kfree(dsd_ptr); | |
2666 | goto queuing_error; | |
2667 | } | |
2668 | list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); | |
2669 | ha->gbl_dsd_avail++; | |
2670 | } | |
2671 | ||
2672 | sufficient_dsds: | |
2673 | req_cnt = 1; | |
2674 | ||
1bd58b89 GM |
2675 | if (req->cnt < (req_cnt + 2)) { |
2676 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | |
2677 | ®->req_q_out[0]); | |
2678 | if (req->ring_index < cnt) | |
2679 | req->cnt = cnt - req->ring_index; | |
2680 | else | |
2681 | req->cnt = req->length - | |
2682 | (req->ring_index - cnt); | |
2683 | } | |
2684 | ||
2685 | if (req->cnt < (req_cnt + 2)) | |
2686 | goto queuing_error; | |
2687 | ||
a9083016 GM |
2688 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); |
2689 | if (!sp->ctx) { | |
2690 | DEBUG(printk(KERN_INFO | |
2691 | "%s(%ld): failed to allocate" | |
2692 | " ctx.\n", __func__, vha->host_no)); | |
2693 | goto queuing_error; | |
2694 | } | |
2695 | memset(ctx, 0, sizeof(struct ct6_dsd)); | |
2696 | ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, | |
2697 | GFP_ATOMIC, &ctx->fcp_cmnd_dma); | |
2698 | if (!ctx->fcp_cmnd) { | |
2699 | DEBUG2_3(printk("%s(%ld): failed to allocate" | |
2700 | " fcp_cmnd.\n", __func__, vha->host_no)); | |
2701 | goto queuing_error_fcp_cmnd; | |
2702 | } | |
2703 | ||
2704 | /* Initialize the DSD list and dma handle */ | |
2705 | INIT_LIST_HEAD(&ctx->dsd_list); | |
2706 | ctx->dsd_use_cnt = 0; | |
2707 | ||
2708 | if (cmd->cmd_len > 16) { | |
2709 | additional_cdb_len = cmd->cmd_len - 16; | |
2710 | if ((cmd->cmd_len % 4) != 0) { | |
2711 | /* SCSI command bigger than 16 bytes must be | |
2712 | * multiple of 4 | |
2713 | */ | |
2714 | goto queuing_error_fcp_cmnd; | |
2715 | } | |
2716 | ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; | |
2717 | } else { | |
2718 | additional_cdb_len = 0; | |
2719 | ctx->fcp_cmnd_len = 12 + 16 + 4; | |
2720 | } | |
2721 | ||
2722 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; | |
2723 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | |
2724 | ||
2725 | /* Zero out remaining portion of packet. */ | |
2726 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ | |
2727 | clr_ptr = (uint32_t *)cmd_pkt + 2; | |
2728 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | |
2729 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | |
2730 | ||
2731 | /* Set NPORT-ID and LUN number*/ | |
2732 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | |
2733 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | |
2734 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | |
2735 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | |
2736 | cmd_pkt->vp_index = sp->fcport->vp_idx; | |
2737 | ||
2738 | /* Build IOCB segments */ | |
2739 | if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) | |
2740 | goto queuing_error_fcp_cmnd; | |
2741 | ||
2742 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | |
2743 | ||
2744 | /* build FCP_CMND IU */ | |
2745 | memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | |
2746 | int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); | |
2747 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; | |
2748 | ||
2749 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | |
2750 | ctx->fcp_cmnd->additional_cdb_len |= 1; | |
2751 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | |
2752 | ctx->fcp_cmnd->additional_cdb_len |= 2; | |
2753 | ||
2754 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | |
2755 | ||
2756 | fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + | |
2757 | additional_cdb_len); | |
2758 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); | |
2759 | ||
2760 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); | |
2761 | cmd_pkt->fcp_cmnd_dseg_address[0] = | |
2762 | cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); | |
2763 | cmd_pkt->fcp_cmnd_dseg_address[1] = | |
2764 | cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); | |
2765 | ||
2766 | sp->flags |= SRB_FCP_CMND_DMA_VALID; | |
2767 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | |
2768 | /* Set total data segment count. */ | |
2769 | cmd_pkt->entry_count = (uint8_t)req_cnt; | |
2770 | /* Specify response queue number where | |
2771 | * completion should happen | |
2772 | */ | |
2773 | cmd_pkt->entry_status = (uint8_t) rsp->id; | |
2774 | } else { | |
2775 | struct cmd_type_7 *cmd_pkt; | |
2776 | req_cnt = qla24xx_calc_iocbs(tot_dsds); | |
2777 | if (req->cnt < (req_cnt + 2)) { | |
2778 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | |
2779 | ®->req_q_out[0]); | |
2780 | if (req->ring_index < cnt) | |
2781 | req->cnt = cnt - req->ring_index; | |
2782 | else | |
2783 | req->cnt = req->length - | |
2784 | (req->ring_index - cnt); | |
2785 | } | |
2786 | if (req->cnt < (req_cnt + 2)) | |
2787 | goto queuing_error; | |
2788 | ||
2789 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; | |
2790 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | |
2791 | ||
2792 | /* Zero out remaining portion of packet. */ | |
2793 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ | |
2794 | clr_ptr = (uint32_t *)cmd_pkt + 2; | |
2795 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | |
2796 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | |
2797 | ||
2798 | /* Set NPORT-ID and LUN number*/ | |
2799 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | |
2800 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | |
2801 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | |
2802 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | |
2803 | cmd_pkt->vp_index = sp->fcport->vp_idx; | |
2804 | ||
2805 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | |
2806 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, | |
2807 | sizeof(cmd_pkt->lun)); | |
2808 | ||
2809 | /* Load SCSI command packet. */ | |
2810 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); | |
2811 | host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); | |
2812 | ||
2813 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | |
2814 | ||
2815 | /* Build IOCB segments */ | |
2816 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); | |
2817 | ||
2818 | /* Set total data segment count. */ | |
2819 | cmd_pkt->entry_count = (uint8_t)req_cnt; | |
2820 | /* Specify response queue number where | |
2821 | * completion should happen. | |
2822 | */ | |
2823 | cmd_pkt->entry_status = (uint8_t) rsp->id; | |
2824 | ||
2825 | } | |
2826 | /* Build command packet. */ | |
2827 | req->current_outstanding_cmd = handle; | |
2828 | req->outstanding_cmds[handle] = sp; | |
2829 | sp->handle = handle; | |
2830 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | |
2831 | req->cnt -= req_cnt; | |
2832 | wmb(); | |
2833 | ||
2834 | /* Adjust ring index. */ | |
2835 | req->ring_index++; | |
2836 | if (req->ring_index == req->length) { | |
2837 | req->ring_index = 0; | |
2838 | req->ring_ptr = req->ring; | |
2839 | } else | |
2840 | req->ring_ptr++; | |
2841 | ||
2842 | sp->flags |= SRB_DMA_VALID; | |
2843 | ||
2844 | /* Set chip new ring index. */ | |
2845 | /* write, read and verify logic */ | |
2846 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); | |
2847 | if (ql2xdbwr) | |
2848 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | |
2849 | else { | |
2850 | WRT_REG_DWORD( | |
2851 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | |
2852 | dbval); | |
2853 | wmb(); | |
2854 | while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { | |
2855 | WRT_REG_DWORD( | |
2856 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | |
2857 | dbval); | |
2858 | wmb(); | |
2859 | } | |
2860 | } | |
2861 | ||
2862 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | |
2863 | if (vha->flags.process_response_queue && | |
2864 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) | |
2865 | qla24xx_process_response_queue(vha, rsp); | |
2866 | ||
2867 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | |
2868 | return QLA_SUCCESS; | |
2869 | ||
2870 | queuing_error_fcp_cmnd: | |
2871 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); | |
2872 | queuing_error: | |
2873 | if (tot_dsds) | |
2874 | scsi_dma_unmap(cmd); | |
2875 | ||
2876 | if (sp->ctx) { | |
2877 | mempool_free(sp->ctx, ha->ctx_mempool); | |
2878 | sp->ctx = NULL; | |
2879 | } | |
2880 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | |
2881 | ||
2882 | return QLA_FUNCTION_FAILED; | |
2883 | } | |
2884 | ||
2885 | uint32_t * | |
2886 | qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |
2887 | uint32_t length) | |
2888 | { | |
2889 | uint32_t i; | |
2890 | uint32_t val; | |
2891 | struct qla_hw_data *ha = vha->hw; | |
2892 | ||
2893 | /* Dword reads to flash. */ | |
2894 | for (i = 0; i < length/4; i++, faddr += 4) { | |
2895 | if (qla82xx_rom_fast_read(ha, faddr, &val)) { | |
2896 | qla_printk(KERN_WARNING, ha, | |
2897 | "Do ROM fast read failed\n"); | |
2898 | goto done_read; | |
2899 | } | |
2900 | dwptr[i] = __constant_cpu_to_le32(val); | |
2901 | } | |
2902 | done_read: | |
2903 | return dwptr; | |
2904 | } | |
2905 | ||
2906 | int | |
2907 | qla82xx_unprotect_flash(struct qla_hw_data *ha) | |
2908 | { | |
2909 | int ret; | |
2910 | uint32_t val; | |
2911 | ||
2912 | ret = ql82xx_rom_lock_d(ha); | |
2913 | if (ret < 0) { | |
2914 | qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); | |
2915 | return ret; | |
2916 | } | |
2917 | ||
2918 | ret = qla82xx_read_status_reg(ha, &val); | |
2919 | if (ret < 0) | |
2920 | goto done_unprotect; | |
2921 | ||
0547fb37 | 2922 | val &= ~(BLOCK_PROTECT_BITS << 2); |
a9083016 GM |
2923 | ret = qla82xx_write_status_reg(ha, val); |
2924 | if (ret < 0) { | |
0547fb37 | 2925 | val |= (BLOCK_PROTECT_BITS << 2); |
a9083016 GM |
2926 | qla82xx_write_status_reg(ha, val); |
2927 | } | |
2928 | ||
2929 | if (qla82xx_write_disable_flash(ha) != 0) | |
2930 | qla_printk(KERN_WARNING, ha, "Write disable failed\n"); | |
2931 | ||
2932 | done_unprotect: | |
2933 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
2934 | return ret; | |
2935 | } | |
2936 | ||
2937 | int | |
2938 | qla82xx_protect_flash(struct qla_hw_data *ha) | |
2939 | { | |
2940 | int ret; | |
2941 | uint32_t val; | |
2942 | ||
2943 | ret = ql82xx_rom_lock_d(ha); | |
2944 | if (ret < 0) { | |
2945 | qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); | |
2946 | return ret; | |
2947 | } | |
2948 | ||
2949 | ret = qla82xx_read_status_reg(ha, &val); | |
2950 | if (ret < 0) | |
2951 | goto done_protect; | |
2952 | ||
0547fb37 | 2953 | val |= (BLOCK_PROTECT_BITS << 2); |
a9083016 GM |
2954 | /* LOCK all sectors */ |
2955 | ret = qla82xx_write_status_reg(ha, val); | |
2956 | if (ret < 0) | |
2957 | qla_printk(KERN_WARNING, ha, "Write status register failed\n"); | |
2958 | ||
2959 | if (qla82xx_write_disable_flash(ha) != 0) | |
2960 | qla_printk(KERN_WARNING, ha, "Write disable failed\n"); | |
2961 | done_protect: | |
2962 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
2963 | return ret; | |
2964 | } | |
2965 | ||
2966 | int | |
2967 | qla82xx_erase_sector(struct qla_hw_data *ha, int addr) | |
2968 | { | |
2969 | int ret = 0; | |
2970 | ||
2971 | ret = ql82xx_rom_lock_d(ha); | |
2972 | if (ret < 0) { | |
2973 | qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); | |
2974 | return ret; | |
2975 | } | |
2976 | ||
2977 | qla82xx_flash_set_write_enable(ha); | |
2978 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); | |
2979 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); | |
2980 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); | |
2981 | ||
2982 | if (qla82xx_wait_rom_done(ha)) { | |
2983 | qla_printk(KERN_WARNING, ha, | |
2984 | "Error waiting for rom done\n"); | |
2985 | ret = -1; | |
2986 | goto done; | |
2987 | } | |
2988 | ret = qla82xx_flash_wait_write_finish(ha); | |
2989 | done: | |
2990 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | |
2991 | return ret; | |
2992 | } | |
2993 | ||
2994 | /* | |
2995 | * Address and length are byte address | |
2996 | */ | |
2997 | uint8_t * | |
2998 | qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, | |
2999 | uint32_t offset, uint32_t length) | |
3000 | { | |
3001 | scsi_block_requests(vha->host); | |
3002 | qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); | |
3003 | scsi_unblock_requests(vha->host); | |
3004 | return buf; | |
3005 | } | |
3006 | ||
3007 | static int | |
3008 | qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, | |
3009 | uint32_t faddr, uint32_t dwords) | |
3010 | { | |
3011 | int ret; | |
3012 | uint32_t liter; | |
3013 | uint32_t sec_mask, rest_addr; | |
3014 | dma_addr_t optrom_dma; | |
3015 | void *optrom = NULL; | |
3016 | int page_mode = 0; | |
3017 | struct qla_hw_data *ha = vha->hw; | |
3018 | ||
3019 | ret = -1; | |
3020 | ||
3021 | /* Prepare burst-capable write on supported ISPs. */ | |
3022 | if (page_mode && !(faddr & 0xfff) && | |
3023 | dwords > OPTROM_BURST_DWORDS) { | |
3024 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | |
3025 | &optrom_dma, GFP_KERNEL); | |
3026 | if (!optrom) { | |
3027 | qla_printk(KERN_DEBUG, ha, | |
3028 | "Unable to allocate memory for optrom " | |
3029 | "burst write (%x KB).\n", | |
3030 | OPTROM_BURST_SIZE / 1024); | |
3031 | } | |
3032 | } | |
3033 | ||
3034 | rest_addr = ha->fdt_block_size - 1; | |
3035 | sec_mask = ~rest_addr; | |
3036 | ||
3037 | ret = qla82xx_unprotect_flash(ha); | |
3038 | if (ret) { | |
3039 | qla_printk(KERN_WARNING, ha, | |
3040 | "Unable to unprotect flash for update.\n"); | |
3041 | goto write_done; | |
3042 | } | |
3043 | ||
3044 | for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { | |
3045 | /* Are we at the beginning of a sector? */ | |
3046 | if ((faddr & rest_addr) == 0) { | |
3047 | ||
3048 | ret = qla82xx_erase_sector(ha, faddr); | |
3049 | if (ret) { | |
3050 | DEBUG9(qla_printk(KERN_ERR, ha, | |
3051 | "Unable to erase sector: " | |
3052 | "address=%x.\n", faddr)); | |
3053 | break; | |
3054 | } | |
3055 | } | |
3056 | ||
3057 | /* Go with burst-write. */ | |
3058 | if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { | |
3059 | /* Copy data to DMA'ble buffer. */ | |
3060 | memcpy(optrom, dwptr, OPTROM_BURST_SIZE); | |
3061 | ||
3062 | ret = qla2x00_load_ram(vha, optrom_dma, | |
3063 | (ha->flash_data_off | faddr), | |
3064 | OPTROM_BURST_DWORDS); | |
3065 | if (ret != QLA_SUCCESS) { | |
3066 | qla_printk(KERN_WARNING, ha, | |
3067 | "Unable to burst-write optrom segment " | |
3068 | "(%x/%x/%llx).\n", ret, | |
3069 | (ha->flash_data_off | faddr), | |
3070 | (unsigned long long)optrom_dma); | |
3071 | qla_printk(KERN_WARNING, ha, | |
3072 | "Reverting to slow-write.\n"); | |
3073 | ||
3074 | dma_free_coherent(&ha->pdev->dev, | |
3075 | OPTROM_BURST_SIZE, optrom, optrom_dma); | |
3076 | optrom = NULL; | |
3077 | } else { | |
3078 | liter += OPTROM_BURST_DWORDS - 1; | |
3079 | faddr += OPTROM_BURST_DWORDS - 1; | |
3080 | dwptr += OPTROM_BURST_DWORDS - 1; | |
3081 | continue; | |
3082 | } | |
3083 | } | |
3084 | ||
3085 | ret = qla82xx_write_flash_dword(ha, faddr, | |
3086 | cpu_to_le32(*dwptr)); | |
3087 | if (ret) { | |
3088 | DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" | |
3089 | "flash address=%x data=%x.\n", __func__, | |
3090 | ha->host_no, faddr, *dwptr)); | |
3091 | break; | |
3092 | } | |
3093 | } | |
3094 | ||
3095 | ret = qla82xx_protect_flash(ha); | |
3096 | if (ret) | |
3097 | qla_printk(KERN_WARNING, ha, | |
3098 | "Unable to protect flash after update.\n"); | |
3099 | write_done: | |
3100 | if (optrom) | |
3101 | dma_free_coherent(&ha->pdev->dev, | |
3102 | OPTROM_BURST_SIZE, optrom, optrom_dma); | |
3103 | return ret; | |
3104 | } | |
3105 | ||
3106 | int | |
3107 | qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, | |
3108 | uint32_t offset, uint32_t length) | |
3109 | { | |
3110 | int rval; | |
3111 | ||
3112 | /* Suspend HBA. */ | |
3113 | scsi_block_requests(vha->host); | |
3114 | rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, | |
3115 | length >> 2); | |
3116 | scsi_unblock_requests(vha->host); | |
3117 | ||
3118 | /* Convert return ISP82xx to generic */ | |
3119 | if (rval) | |
3120 | rval = QLA_FUNCTION_FAILED; | |
3121 | else | |
3122 | rval = QLA_SUCCESS; | |
3123 | return rval; | |
3124 | } | |
3125 | ||
3126 | void | |
3127 | qla82xx_start_iocbs(srb_t *sp) | |
3128 | { | |
3129 | struct qla_hw_data *ha = sp->fcport->vha->hw; | |
3130 | struct req_que *req = ha->req_q_map[0]; | |
3131 | struct device_reg_82xx __iomem *reg; | |
3132 | uint32_t dbval; | |
3133 | ||
3134 | /* Adjust ring index. */ | |
3135 | req->ring_index++; | |
3136 | if (req->ring_index == req->length) { | |
3137 | req->ring_index = 0; | |
3138 | req->ring_ptr = req->ring; | |
3139 | } else | |
3140 | req->ring_ptr++; | |
3141 | ||
3142 | reg = &ha->iobase->isp82; | |
3143 | dbval = 0x04 | (ha->portnum << 5); | |
3144 | ||
3145 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); | |
6907869d GM |
3146 | if (ql2xdbwr) |
3147 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | |
3148 | else { | |
3149 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); | |
a9083016 | 3150 | wmb(); |
6907869d GM |
3151 | while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { |
3152 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, | |
3153 | dbval); | |
3154 | wmb(); | |
3155 | } | |
a9083016 GM |
3156 | } |
3157 | } | |
3158 | ||
3159 | /* | |
3160 | * qla82xx_device_bootstrap | |
3161 | * Initialize device, set DEV_READY, start fw | |
3162 | * | |
3163 | * Note: | |
3164 | * IDC lock must be held upon entry | |
3165 | * | |
3166 | * Return: | |
3167 | * Success : 0 | |
3168 | * Failed : 1 | |
3169 | */ | |
3170 | static int | |
3171 | qla82xx_device_bootstrap(scsi_qla_host_t *vha) | |
3172 | { | |
3173 | int rval, i, timeout; | |
3174 | uint32_t old_count, count; | |
3175 | struct qla_hw_data *ha = vha->hw; | |
3176 | ||
3177 | if (qla82xx_need_reset(ha)) | |
3178 | goto dev_initialize; | |
3179 | ||
3180 | old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); | |
3181 | ||
3182 | for (i = 0; i < 10; i++) { | |
3183 | timeout = msleep_interruptible(200); | |
3184 | if (timeout) { | |
3185 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | |
3186 | QLA82XX_DEV_FAILED); | |
3187 | return QLA_FUNCTION_FAILED; | |
3188 | } | |
3189 | ||
3190 | count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); | |
3191 | if (count != old_count) | |
3192 | goto dev_ready; | |
3193 | } | |
3194 | ||
3195 | dev_initialize: | |
3196 | /* set to DEV_INITIALIZING */ | |
3197 | qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); | |
3198 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); | |
3199 | ||
3200 | /* Driver that sets device state to initializating sets IDC version */ | |
3201 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); | |
3202 | ||
3203 | qla82xx_idc_unlock(ha); | |
3204 | rval = qla82xx_start_firmware(vha); | |
3205 | qla82xx_idc_lock(ha); | |
3206 | ||
3207 | if (rval != QLA_SUCCESS) { | |
3208 | qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); | |
3209 | qla82xx_clear_drv_active(ha); | |
3210 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); | |
3211 | return rval; | |
3212 | } | |
3213 | ||
3214 | dev_ready: | |
3215 | qla_printk(KERN_INFO, ha, "HW State: READY\n"); | |
3216 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); | |
3217 | ||
3218 | return QLA_SUCCESS; | |
3219 | } | |
3220 | ||
3221 | static void | |
3222 | qla82xx_dev_failed_handler(scsi_qla_host_t *vha) | |
3223 | { | |
3224 | struct qla_hw_data *ha = vha->hw; | |
3225 | ||
3226 | /* Disable the board */ | |
3227 | qla_printk(KERN_INFO, ha, "Disabling the board\n"); | |
3228 | ||
b963752f GM |
3229 | qla82xx_idc_lock(ha); |
3230 | qla82xx_clear_drv_active(ha); | |
3231 | qla82xx_idc_unlock(ha); | |
3232 | ||
a9083016 GM |
3233 | /* Set DEV_FAILED flag to disable timer */ |
3234 | vha->device_flags |= DFLG_DEV_FAILED; | |
3235 | qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); | |
3236 | qla2x00_mark_all_devices_lost(vha, 0); | |
3237 | vha->flags.online = 0; | |
3238 | vha->flags.init_done = 0; | |
3239 | } | |
3240 | ||
3241 | /* | |
3242 | * qla82xx_need_reset_handler | |
3243 | * Code to start reset sequence | |
3244 | * | |
3245 | * Note: | |
3246 | * IDC lock must be held upon entry | |
3247 | * | |
3248 | * Return: | |
3249 | * Success : 0 | |
3250 | * Failed : 1 | |
3251 | */ | |
3252 | static void | |
3253 | qla82xx_need_reset_handler(scsi_qla_host_t *vha) | |
3254 | { | |
3255 | uint32_t dev_state, drv_state, drv_active; | |
3256 | unsigned long reset_timeout; | |
3257 | struct qla_hw_data *ha = vha->hw; | |
3258 | struct req_que *req = ha->req_q_map[0]; | |
3259 | ||
3260 | if (vha->flags.online) { | |
3261 | qla82xx_idc_unlock(ha); | |
3262 | qla2x00_abort_isp_cleanup(vha); | |
3263 | ha->isp_ops->get_flash_version(vha, req->ring); | |
3264 | ha->isp_ops->nvram_config(vha); | |
3265 | qla82xx_idc_lock(ha); | |
3266 | } | |
3267 | ||
3268 | qla82xx_set_rst_ready(ha); | |
3269 | ||
3270 | /* wait for 10 seconds for reset ack from all functions */ | |
3271 | reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); | |
3272 | ||
3273 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
3274 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | |
3275 | ||
3276 | while (drv_state != drv_active) { | |
3277 | if (time_after_eq(jiffies, reset_timeout)) { | |
3278 | qla_printk(KERN_INFO, ha, | |
3279 | "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); | |
3280 | break; | |
3281 | } | |
3282 | qla82xx_idc_unlock(ha); | |
3283 | msleep(1000); | |
3284 | qla82xx_idc_lock(ha); | |
3285 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | |
3286 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | |
3287 | } | |
3288 | ||
3289 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | |
f1af6208 GM |
3290 | qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, |
3291 | dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); | |
3292 | ||
a9083016 GM |
3293 | /* Force to DEV_COLD unless someone else is starting a reset */ |
3294 | if (dev_state != QLA82XX_DEV_INITIALIZING) { | |
3295 | qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); | |
3296 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); | |
3297 | } | |
3298 | } | |
3299 | ||
3300 | static void | |
3301 | qla82xx_check_fw_alive(scsi_qla_host_t *vha) | |
3302 | { | |
3303 | uint32_t fw_heartbeat_counter, halt_status; | |
3304 | struct qla_hw_data *ha = vha->hw; | |
3305 | ||
3306 | fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); | |
3307 | if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { | |
3308 | vha->seconds_since_last_heartbeat++; | |
3309 | /* FW not alive after 2 seconds */ | |
3310 | if (vha->seconds_since_last_heartbeat == 2) { | |
3311 | vha->seconds_since_last_heartbeat = 0; | |
3312 | halt_status = qla82xx_rd_32(ha, | |
3313 | QLA82XX_PEG_HALT_STATUS1); | |
3314 | if (halt_status & HALT_STATUS_UNRECOVERABLE) { | |
3315 | set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); | |
3316 | } else { | |
3317 | qla_printk(KERN_INFO, ha, | |
3318 | "scsi(%ld): %s - detect abort needed\n", | |
3319 | vha->host_no, __func__); | |
3320 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | |
3321 | } | |
3322 | qla2xxx_wake_dpc(vha); | |
4142b198 | 3323 | ha->flags.fw_hung = 1; |
cdbb0a4f | 3324 | if (ha->flags.mbox_busy) { |
cdbb0a4f SV |
3325 | ha->flags.mbox_int = 1; |
3326 | DEBUG2(qla_printk(KERN_ERR, ha, | |
4142b198 GM |
3327 | "Due to fw hung, doing premature " |
3328 | "completion of mbx command\n")); | |
3329 | if (test_bit(MBX_INTR_WAIT, | |
3330 | &ha->mbx_cmd_flags)) | |
3331 | complete(&ha->mbx_intr_comp); | |
cdbb0a4f | 3332 | } |
a9083016 | 3333 | } |
efa786cc LC |
3334 | } else |
3335 | vha->seconds_since_last_heartbeat = 0; | |
a9083016 GM |
3336 | vha->fw_heartbeat_counter = fw_heartbeat_counter; |
3337 | } | |
3338 | ||
3339 | /* | |
3340 | * qla82xx_device_state_handler | |
3341 | * Main state handler | |
3342 | * | |
3343 | * Note: | |
3344 | * IDC lock must be held upon entry | |
3345 | * | |
3346 | * Return: | |
3347 | * Success : 0 | |
3348 | * Failed : 1 | |
3349 | */ | |
3350 | int | |
3351 | qla82xx_device_state_handler(scsi_qla_host_t *vha) | |
3352 | { | |
3353 | uint32_t dev_state; | |
a9083016 GM |
3354 | int rval = QLA_SUCCESS; |
3355 | unsigned long dev_init_timeout; | |
3356 | struct qla_hw_data *ha = vha->hw; | |
3357 | ||
3358 | qla82xx_idc_lock(ha); | |
3359 | if (!vha->flags.init_done) | |
3360 | qla82xx_set_drv_active(vha); | |
3361 | ||
f1af6208 GM |
3362 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
3363 | qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, | |
3364 | dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); | |
a9083016 GM |
3365 | |
3366 | /* wait for 30 seconds for device to go ready */ | |
3367 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); | |
3368 | ||
3369 | while (1) { | |
3370 | ||
3371 | if (time_after_eq(jiffies, dev_init_timeout)) { | |
3372 | DEBUG(qla_printk(KERN_INFO, ha, | |
3373 | "%s: device init failed!\n", | |
3374 | QLA2XXX_DRIVER_NAME)); | |
3375 | rval = QLA_FUNCTION_FAILED; | |
3376 | break; | |
3377 | } | |
3378 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | |
f1af6208 GM |
3379 | qla_printk(KERN_INFO, ha, |
3380 | "2:Device state is 0x%x = %s\n", dev_state, | |
3381 | dev_state < MAX_STATES ? | |
3382 | qdev_state[dev_state] : "Unknown"); | |
3383 | ||
a9083016 GM |
3384 | switch (dev_state) { |
3385 | case QLA82XX_DEV_READY: | |
3386 | goto exit; | |
3387 | case QLA82XX_DEV_COLD: | |
3388 | rval = qla82xx_device_bootstrap(vha); | |
3389 | goto exit; | |
3390 | case QLA82XX_DEV_INITIALIZING: | |
3391 | qla82xx_idc_unlock(ha); | |
3392 | msleep(1000); | |
3393 | qla82xx_idc_lock(ha); | |
3394 | break; | |
3395 | case QLA82XX_DEV_NEED_RESET: | |
3396 | if (!ql2xdontresethba) | |
3397 | qla82xx_need_reset_handler(vha); | |
3398 | break; | |
3399 | case QLA82XX_DEV_NEED_QUIESCENT: | |
3400 | qla82xx_set_qsnt_ready(ha); | |
3401 | case QLA82XX_DEV_QUIESCENT: | |
3402 | qla82xx_idc_unlock(ha); | |
3403 | msleep(1000); | |
3404 | qla82xx_idc_lock(ha); | |
3405 | break; | |
3406 | case QLA82XX_DEV_FAILED: | |
3407 | qla82xx_dev_failed_handler(vha); | |
3408 | rval = QLA_FUNCTION_FAILED; | |
3409 | goto exit; | |
3410 | default: | |
3411 | qla82xx_idc_unlock(ha); | |
3412 | msleep(1000); | |
3413 | qla82xx_idc_lock(ha); | |
3414 | } | |
3415 | } | |
3416 | exit: | |
3417 | qla82xx_idc_unlock(ha); | |
3418 | return rval; | |
3419 | } | |
3420 | ||
3421 | void qla82xx_watchdog(scsi_qla_host_t *vha) | |
3422 | { | |
3423 | uint32_t dev_state; | |
3424 | struct qla_hw_data *ha = vha->hw; | |
3425 | ||
3426 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | |
3427 | ||
3428 | /* don't poll if reset is going on */ | |
3429 | if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | |
3430 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | |
3431 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) { | |
3432 | if (dev_state == QLA82XX_DEV_NEED_RESET) { | |
3433 | qla_printk(KERN_WARNING, ha, | |
3434 | "%s(): Adapter reset needed!\n", __func__); | |
3435 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | |
3436 | qla2xxx_wake_dpc(vha); | |
4142b198 | 3437 | ha->flags.fw_hung = 1; |
cdbb0a4f | 3438 | if (ha->flags.mbox_busy) { |
cdbb0a4f SV |
3439 | ha->flags.mbox_int = 1; |
3440 | DEBUG2(qla_printk(KERN_ERR, ha, | |
4142b198 GM |
3441 | "Need reset, doing premature " |
3442 | "completion of mbx command\n")); | |
3443 | if (test_bit(MBX_INTR_WAIT, | |
3444 | &ha->mbx_cmd_flags)) | |
3445 | complete(&ha->mbx_intr_comp); | |
cdbb0a4f | 3446 | } |
a9083016 GM |
3447 | } else { |
3448 | qla82xx_check_fw_alive(vha); | |
3449 | } | |
3450 | } | |
3451 | } | |
3452 | ||
3453 | int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |
3454 | { | |
3455 | int rval; | |
3456 | rval = qla82xx_device_state_handler(vha); | |
3457 | return rval; | |
3458 | } | |
3459 | ||
3460 | /* | |
3461 | * qla82xx_abort_isp | |
3462 | * Resets ISP and aborts all outstanding commands. | |
3463 | * | |
3464 | * Input: | |
3465 | * ha = adapter block pointer. | |
3466 | * | |
3467 | * Returns: | |
3468 | * 0 = success | |
3469 | */ | |
3470 | int | |
3471 | qla82xx_abort_isp(scsi_qla_host_t *vha) | |
3472 | { | |
3473 | int rval; | |
3474 | struct qla_hw_data *ha = vha->hw; | |
3475 | uint32_t dev_state; | |
3476 | ||
3477 | if (vha->device_flags & DFLG_DEV_FAILED) { | |
3478 | qla_printk(KERN_WARNING, ha, | |
3479 | "%s(%ld): Device in failed state, " | |
3480 | "Exiting.\n", __func__, vha->host_no); | |
3481 | return QLA_SUCCESS; | |
3482 | } | |
3483 | ||
3484 | qla82xx_idc_lock(ha); | |
3485 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | |
f1af6208 | 3486 | if (dev_state == QLA82XX_DEV_READY) { |
a9083016 GM |
3487 | qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); |
3488 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | |
3489 | QLA82XX_DEV_NEED_RESET); | |
3490 | } else | |
f1af6208 GM |
3491 | qla_printk(KERN_INFO, ha, "HW State: %s\n", |
3492 | dev_state < MAX_STATES ? | |
3493 | qdev_state[dev_state] : "Unknown"); | |
a9083016 GM |
3494 | qla82xx_idc_unlock(ha); |
3495 | ||
3496 | rval = qla82xx_device_state_handler(vha); | |
3497 | ||
3498 | qla82xx_idc_lock(ha); | |
3499 | qla82xx_clear_rst_ready(ha); | |
3500 | qla82xx_idc_unlock(ha); | |
3501 | ||
cdbb0a4f SV |
3502 | if (rval == QLA_SUCCESS) { |
3503 | ha->flags.fw_hung = 0; | |
a9083016 | 3504 | qla82xx_restart_isp(vha); |
cdbb0a4f | 3505 | } |
f1af6208 GM |
3506 | |
3507 | if (rval) { | |
3508 | vha->flags.online = 1; | |
3509 | if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | |
3510 | if (ha->isp_abort_cnt == 0) { | |
3511 | qla_printk(KERN_WARNING, ha, | |
3512 | "ISP error recovery failed - " | |
3513 | "board disabled\n"); | |
3514 | /* | |
3515 | * The next call disables the board | |
3516 | * completely. | |
3517 | */ | |
3518 | ha->isp_ops->reset_adapter(vha); | |
3519 | vha->flags.online = 0; | |
3520 | clear_bit(ISP_ABORT_RETRY, | |
3521 | &vha->dpc_flags); | |
3522 | rval = QLA_SUCCESS; | |
3523 | } else { /* schedule another ISP abort */ | |
3524 | ha->isp_abort_cnt--; | |
3525 | DEBUG(qla_printk(KERN_INFO, ha, | |
3526 | "qla%ld: ISP abort - retry remaining %d\n", | |
3527 | vha->host_no, ha->isp_abort_cnt)); | |
3528 | rval = QLA_FUNCTION_FAILED; | |
3529 | } | |
3530 | } else { | |
3531 | ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; | |
3532 | DEBUG(qla_printk(KERN_INFO, ha, | |
3533 | "(%ld): ISP error recovery - retrying (%d) " | |
3534 | "more times\n", vha->host_no, ha->isp_abort_cnt)); | |
3535 | set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | |
3536 | rval = QLA_FUNCTION_FAILED; | |
3537 | } | |
3538 | } | |
a9083016 GM |
3539 | return rval; |
3540 | } | |
3541 | ||
3542 | /* | |
3543 | * qla82xx_fcoe_ctx_reset | |
3544 | * Perform a quick reset and aborts all outstanding commands. | |
3545 | * This will only perform an FCoE context reset and avoids a full blown | |
3546 | * chip reset. | |
3547 | * | |
3548 | * Input: | |
3549 | * ha = adapter block pointer. | |
3550 | * is_reset_path = flag for identifying the reset path. | |
3551 | * | |
3552 | * Returns: | |
3553 | * 0 = success | |
3554 | */ | |
3555 | int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) | |
3556 | { | |
3557 | int rval = QLA_FUNCTION_FAILED; | |
3558 | ||
3559 | if (vha->flags.online) { | |
3560 | /* Abort all outstanding commands, so as to be requeued later */ | |
3561 | qla2x00_abort_isp_cleanup(vha); | |
3562 | } | |
3563 | ||
3564 | /* Stop currently executing firmware. | |
3565 | * This will destroy existing FCoE context at the F/W end. | |
3566 | */ | |
3567 | qla2x00_try_to_stop_firmware(vha); | |
3568 | ||
3569 | /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ | |
3570 | rval = qla82xx_restart_isp(vha); | |
3571 | ||
3572 | return rval; | |
3573 | } | |
3574 | ||
3575 | /* | |
3576 | * qla2x00_wait_for_fcoe_ctx_reset | |
3577 | * Wait till the FCoE context is reset. | |
3578 | * | |
3579 | * Note: | |
3580 | * Does context switching here. | |
3581 | * Release SPIN_LOCK (if any) before calling this routine. | |
3582 | * | |
3583 | * Return: | |
3584 | * Success (fcoe_ctx reset is done) : 0 | |
3585 | * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 | |
3586 | */ | |
3587 | int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) | |
3588 | { | |
3589 | int status = QLA_FUNCTION_FAILED; | |
3590 | unsigned long wait_reset; | |
3591 | ||
3592 | wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); | |
3593 | while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || | |
3594 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) | |
3595 | && time_before(jiffies, wait_reset)) { | |
3596 | ||
3597 | set_current_state(TASK_UNINTERRUPTIBLE); | |
3598 | schedule_timeout(HZ); | |
3599 | ||
3600 | if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && | |
3601 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { | |
3602 | status = QLA_SUCCESS; | |
3603 | break; | |
3604 | } | |
3605 | } | |
3606 | DEBUG2(printk(KERN_INFO | |
3607 | "%s status=%d\n", __func__, status)); | |
3608 | ||
3609 | return status; | |
3610 | } |