]>
Commit | Line | Data |
---|---|---|
11324132 RV |
1 | /****************************************************************************** |
2 | * This software may be used and distributed according to the terms of | |
3 | * the GNU General Public License (GPL), incorporated herein by reference. | |
4 | * Drivers based on or derived from this code fall under the GPL and must | |
5 | * retain the authorship, copyright and license notice. This file is not | |
6 | * a complete program and may only be used when the entire operating | |
7 | * system is licensed under the GPL. | |
8 | * See the file COPYING in this distribution for more information. | |
9 | * | |
926bd900 | 10 | * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O |
11324132 | 11 | * Virtualized Server Adapter. |
926bd900 | 12 | * Copyright(c) 2002-2010 Exar Corp. |
11324132 RV |
13 | ******************************************************************************/ |
14 | #include <linux/etherdevice.h> | |
15 | ||
16 | #include "vxge-traffic.h" | |
17 | #include "vxge-config.h" | |
18 | #include "vxge-main.h" | |
19 | ||
42821a5b | 20 | static enum vxge_hw_status |
21 | __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, | |
22 | u32 vp_id, enum vxge_hw_event type); | |
23 | static enum vxge_hw_status | |
24 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | |
25 | u32 skip_alarms); | |
26 | ||
11324132 RV |
27 | /* |
28 | * vxge_hw_vpath_intr_enable - Enable vpath interrupts. | |
29 | * @vp: Virtual Path handle. | |
30 | * | |
31 | * Enable vpath interrupts. The function is to be executed the last in | |
32 | * vpath initialization sequence. | |
33 | * | |
34 | * See also: vxge_hw_vpath_intr_disable() | |
35 | */ | |
36 | enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) | |
37 | { | |
38 | u64 val64; | |
39 | ||
40 | struct __vxge_hw_virtualpath *vpath; | |
41 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
42 | enum vxge_hw_status status = VXGE_HW_OK; | |
43 | if (vp == NULL) { | |
44 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
45 | goto exit; | |
46 | } | |
47 | ||
48 | vpath = vp->vpath; | |
49 | ||
50 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
51 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
52 | goto exit; | |
53 | } | |
54 | ||
55 | vp_reg = vpath->vp_reg; | |
56 | ||
57 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); | |
58 | ||
59 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
60 | &vp_reg->general_errors_reg); | |
61 | ||
62 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
63 | &vp_reg->pci_config_errors_reg); | |
64 | ||
65 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
66 | &vp_reg->mrpcim_to_vpath_alarm_reg); | |
67 | ||
68 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
69 | &vp_reg->srpcim_to_vpath_alarm_reg); | |
70 | ||
71 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
72 | &vp_reg->vpath_ppif_int_status); | |
73 | ||
74 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
75 | &vp_reg->srpcim_msg_to_vpath_reg); | |
76 | ||
77 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
78 | &vp_reg->vpath_pcipif_int_status); | |
79 | ||
80 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
81 | &vp_reg->prc_alarm_reg); | |
82 | ||
83 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
84 | &vp_reg->wrdma_alarm_status); | |
85 | ||
86 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
87 | &vp_reg->asic_ntwk_vp_err_reg); | |
88 | ||
89 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
90 | &vp_reg->xgmac_vp_int_status); | |
91 | ||
92 | val64 = readq(&vp_reg->vpath_general_int_status); | |
93 | ||
94 | /* Mask unwanted interrupts */ | |
95 | ||
96 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
97 | &vp_reg->vpath_pcipif_int_mask); | |
98 | ||
99 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
100 | &vp_reg->srpcim_msg_to_vpath_mask); | |
101 | ||
102 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
103 | &vp_reg->srpcim_to_vpath_alarm_mask); | |
104 | ||
105 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
106 | &vp_reg->mrpcim_to_vpath_alarm_mask); | |
107 | ||
108 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
109 | &vp_reg->pci_config_errors_mask); | |
110 | ||
111 | /* Unmask the individual interrupts */ | |
112 | ||
113 | writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| | |
114 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| | |
115 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| | |
116 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), | |
117 | &vp_reg->general_errors_mask); | |
118 | ||
119 | __vxge_hw_pio_mem_write32_upper( | |
120 | (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| | |
121 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| | |
122 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| | |
123 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| | |
124 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| | |
d77dd8d2 | 125 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), |
11324132 RV |
126 | &vp_reg->kdfcctl_errors_mask); |
127 | ||
128 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); | |
129 | ||
130 | __vxge_hw_pio_mem_write32_upper( | |
131 | (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), | |
132 | &vp_reg->prc_alarm_mask); | |
133 | ||
134 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); | |
135 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); | |
136 | ||
137 | if (vpath->hldev->first_vp_id != vpath->vp_id) | |
138 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
139 | &vp_reg->asic_ntwk_vp_err_mask); | |
140 | else | |
141 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( | |
142 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | | |
143 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), | |
144 | &vp_reg->asic_ntwk_vp_err_mask); | |
145 | ||
146 | __vxge_hw_pio_mem_write32_upper(0, | |
147 | &vp_reg->vpath_general_int_mask); | |
148 | exit: | |
149 | return status; | |
150 | ||
151 | } | |
152 | ||
153 | /* | |
154 | * vxge_hw_vpath_intr_disable - Disable vpath interrupts. | |
155 | * @vp: Virtual Path handle. | |
156 | * | |
157 | * Disable vpath interrupts. The function is to be executed the last in | |
158 | * vpath initialization sequence. | |
159 | * | |
160 | * See also: vxge_hw_vpath_intr_enable() | |
161 | */ | |
162 | enum vxge_hw_status vxge_hw_vpath_intr_disable( | |
163 | struct __vxge_hw_vpath_handle *vp) | |
164 | { | |
165 | u64 val64; | |
166 | ||
167 | struct __vxge_hw_virtualpath *vpath; | |
168 | enum vxge_hw_status status = VXGE_HW_OK; | |
169 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
170 | if (vp == NULL) { | |
171 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
172 | goto exit; | |
173 | } | |
174 | ||
175 | vpath = vp->vpath; | |
176 | ||
177 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
178 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
179 | goto exit; | |
180 | } | |
181 | vp_reg = vpath->vp_reg; | |
182 | ||
183 | __vxge_hw_pio_mem_write32_upper( | |
184 | (u32)VXGE_HW_INTR_MASK_ALL, | |
185 | &vp_reg->vpath_general_int_mask); | |
186 | ||
187 | val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); | |
188 | ||
189 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); | |
190 | ||
191 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
192 | &vp_reg->general_errors_mask); | |
193 | ||
194 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
195 | &vp_reg->pci_config_errors_mask); | |
196 | ||
197 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
198 | &vp_reg->mrpcim_to_vpath_alarm_mask); | |
199 | ||
200 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
201 | &vp_reg->srpcim_to_vpath_alarm_mask); | |
202 | ||
203 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
204 | &vp_reg->vpath_ppif_int_mask); | |
205 | ||
206 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
207 | &vp_reg->srpcim_msg_to_vpath_mask); | |
208 | ||
209 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
210 | &vp_reg->vpath_pcipif_int_mask); | |
211 | ||
212 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
213 | &vp_reg->wrdma_alarm_mask); | |
214 | ||
215 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
216 | &vp_reg->prc_alarm_mask); | |
217 | ||
218 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
219 | &vp_reg->xgmac_vp_int_mask); | |
220 | ||
221 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
222 | &vp_reg->asic_ntwk_vp_err_mask); | |
223 | ||
224 | exit: | |
225 | return status; | |
226 | } | |
227 | ||
228 | /** | |
229 | * vxge_hw_channel_msix_mask - Mask MSIX Vector. | |
230 | * @channeh: Channel for rx or tx handle | |
231 | * @msix_id: MSIX ID | |
232 | * | |
233 | * The function masks the msix interrupt for the given msix_id | |
234 | * | |
235 | * Returns: 0 | |
236 | */ | |
237 | void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) | |
238 | { | |
239 | ||
240 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 241 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 242 | &channel->common_reg->set_msix_mask_vect[msix_id%4]); |
11324132 RV |
243 | } |
244 | ||
245 | /** | |
246 | * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. | |
247 | * @channeh: Channel for rx or tx handle | |
248 | * @msix_id: MSI ID | |
249 | * | |
250 | * The function unmasks the msix interrupt for the given msix_id | |
251 | * | |
252 | * Returns: 0 | |
253 | */ | |
254 | void | |
255 | vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) | |
256 | { | |
257 | ||
258 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 259 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 260 | &channel->common_reg->clear_msix_mask_vect[msix_id%4]); |
11324132 RV |
261 | } |
262 | ||
263 | /** | |
264 | * vxge_hw_device_set_intr_type - Updates the configuration | |
265 | * with new interrupt type. | |
266 | * @hldev: HW device handle. | |
267 | * @intr_mode: New interrupt type | |
268 | */ | |
269 | u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) | |
270 | { | |
271 | ||
272 | if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | |
273 | (intr_mode != VXGE_HW_INTR_MODE_MSIX) && | |
274 | (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | |
275 | (intr_mode != VXGE_HW_INTR_MODE_DEF)) | |
276 | intr_mode = VXGE_HW_INTR_MODE_IRQLINE; | |
277 | ||
278 | hldev->config.intr_mode = intr_mode; | |
279 | return intr_mode; | |
280 | } | |
281 | ||
282 | /** | |
283 | * vxge_hw_device_intr_enable - Enable interrupts. | |
284 | * @hldev: HW device handle. | |
285 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | |
286 | * the type(s) of interrupts to enable. | |
287 | * | |
288 | * Enable Titan interrupts. The function is to be executed the last in | |
289 | * Titan initialization sequence. | |
290 | * | |
291 | * See also: vxge_hw_device_intr_disable() | |
292 | */ | |
293 | void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) | |
294 | { | |
295 | u32 i; | |
296 | u64 val64; | |
297 | u32 val32; | |
298 | ||
eb5f10c2 SH |
299 | vxge_hw_device_mask_all(hldev); |
300 | ||
11324132 RV |
301 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
302 | ||
303 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
304 | continue; | |
305 | ||
306 | vxge_hw_vpath_intr_enable( | |
307 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | |
308 | } | |
309 | ||
310 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { | |
311 | val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
312 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; | |
313 | ||
314 | if (val64 != 0) { | |
315 | writeq(val64, &hldev->common_reg->tim_int_status0); | |
316 | ||
317 | writeq(~val64, &hldev->common_reg->tim_int_mask0); | |
318 | } | |
319 | ||
320 | val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
321 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; | |
322 | ||
323 | if (val32 != 0) { | |
324 | __vxge_hw_pio_mem_write32_upper(val32, | |
325 | &hldev->common_reg->tim_int_status1); | |
326 | ||
327 | __vxge_hw_pio_mem_write32_upper(~val32, | |
328 | &hldev->common_reg->tim_int_mask1); | |
329 | } | |
330 | } | |
331 | ||
332 | val64 = readq(&hldev->common_reg->titan_general_int_status); | |
333 | ||
334 | vxge_hw_device_unmask_all(hldev); | |
11324132 RV |
335 | } |
336 | ||
337 | /** | |
338 | * vxge_hw_device_intr_disable - Disable Titan interrupts. | |
339 | * @hldev: HW device handle. | |
340 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | |
341 | * the type(s) of interrupts to disable. | |
342 | * | |
343 | * Disable Titan interrupts. | |
344 | * | |
345 | * See also: vxge_hw_device_intr_enable() | |
346 | */ | |
347 | void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) | |
348 | { | |
349 | u32 i; | |
350 | ||
351 | vxge_hw_device_mask_all(hldev); | |
352 | ||
353 | /* mask all the tim interrupts */ | |
354 | writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); | |
355 | __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, | |
356 | &hldev->common_reg->tim_int_mask1); | |
357 | ||
358 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
359 | ||
360 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
361 | continue; | |
362 | ||
363 | vxge_hw_vpath_intr_disable( | |
364 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | |
365 | } | |
11324132 RV |
366 | } |
367 | ||
368 | /** | |
369 | * vxge_hw_device_mask_all - Mask all device interrupts. | |
370 | * @hldev: HW device handle. | |
371 | * | |
372 | * Mask all device interrupts. | |
373 | * | |
374 | * See also: vxge_hw_device_unmask_all() | |
375 | */ | |
376 | void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) | |
377 | { | |
378 | u64 val64; | |
379 | ||
380 | val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | | |
381 | VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | |
382 | ||
383 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
384 | &hldev->common_reg->titan_mask_all_int); | |
11324132 RV |
385 | } |
386 | ||
387 | /** | |
388 | * vxge_hw_device_unmask_all - Unmask all device interrupts. | |
389 | * @hldev: HW device handle. | |
390 | * | |
391 | * Unmask all device interrupts. | |
392 | * | |
393 | * See also: vxge_hw_device_mask_all() | |
394 | */ | |
395 | void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) | |
396 | { | |
397 | u64 val64 = 0; | |
398 | ||
399 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) | |
400 | val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | |
401 | ||
402 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
403 | &hldev->common_reg->titan_mask_all_int); | |
11324132 RV |
404 | } |
405 | ||
406 | /** | |
407 | * vxge_hw_device_flush_io - Flush io writes. | |
408 | * @hldev: HW device handle. | |
409 | * | |
410 | * The function performs a read operation to flush io writes. | |
411 | * | |
412 | * Returns: void | |
413 | */ | |
414 | void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) | |
415 | { | |
416 | u32 val32; | |
417 | ||
418 | val32 = readl(&hldev->common_reg->titan_general_int_status); | |
419 | } | |
420 | ||
421 | /** | |
422 | * vxge_hw_device_begin_irq - Begin IRQ processing. | |
423 | * @hldev: HW device handle. | |
424 | * @skip_alarms: Do not clear the alarms | |
425 | * @reason: "Reason" for the interrupt, the value of Titan's | |
426 | * general_int_status register. | |
427 | * | |
428 | * The function performs two actions, It first checks whether (shared IRQ) the | |
429 | * interrupt was raised by the device. Next, it masks the device interrupts. | |
430 | * | |
431 | * Note: | |
432 | * vxge_hw_device_begin_irq() does not flush MMIO writes through the | |
433 | * bridge. Therefore, two back-to-back interrupts are potentially possible. | |
434 | * | |
435 | * Returns: 0, if the interrupt is not "ours" (note that in this case the | |
436 | * device remain enabled). | |
437 | * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter | |
438 | * status. | |
439 | */ | |
440 | enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, | |
441 | u32 skip_alarms, u64 *reason) | |
442 | { | |
443 | u32 i; | |
444 | u64 val64; | |
445 | u64 adapter_status; | |
446 | u64 vpath_mask; | |
447 | enum vxge_hw_status ret = VXGE_HW_OK; | |
448 | ||
449 | val64 = readq(&hldev->common_reg->titan_general_int_status); | |
450 | ||
451 | if (unlikely(!val64)) { | |
452 | /* not Titan interrupt */ | |
453 | *reason = 0; | |
454 | ret = VXGE_HW_ERR_WRONG_IRQ; | |
455 | goto exit; | |
456 | } | |
457 | ||
458 | if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { | |
459 | ||
460 | adapter_status = readq(&hldev->common_reg->adapter_status); | |
461 | ||
462 | if (adapter_status == VXGE_HW_ALL_FOXES) { | |
463 | ||
464 | __vxge_hw_device_handle_error(hldev, | |
465 | NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); | |
466 | *reason = 0; | |
467 | ret = VXGE_HW_ERR_SLOT_FREEZE; | |
468 | goto exit; | |
469 | } | |
470 | } | |
471 | ||
472 | hldev->stats.sw_dev_info_stats.total_intr_cnt++; | |
473 | ||
474 | *reason = val64; | |
475 | ||
476 | vpath_mask = hldev->vpaths_deployed >> | |
477 | (64 - VXGE_HW_MAX_VIRTUAL_PATHS); | |
478 | ||
479 | if (val64 & | |
480 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { | |
481 | hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; | |
482 | ||
483 | return VXGE_HW_OK; | |
484 | } | |
485 | ||
486 | hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; | |
487 | ||
488 | if (unlikely(val64 & | |
489 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { | |
490 | ||
491 | enum vxge_hw_status error_level = VXGE_HW_OK; | |
492 | ||
493 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | |
494 | ||
495 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
496 | ||
497 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
498 | continue; | |
499 | ||
500 | ret = __vxge_hw_vpath_alarm_process( | |
501 | &hldev->virtual_paths[i], skip_alarms); | |
502 | ||
a4fe91ee | 503 | error_level = VXGE_HW_SET_LEVEL(ret, error_level); |
11324132 RV |
504 | |
505 | if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || | |
506 | (ret == VXGE_HW_ERR_SLOT_FREEZE))) | |
507 | break; | |
508 | } | |
509 | ||
510 | ret = error_level; | |
511 | } | |
512 | exit: | |
513 | return ret; | |
514 | } | |
515 | ||
516 | /* | |
517 | * __vxge_hw_device_handle_link_up_ind | |
518 | * @hldev: HW device handle. | |
519 | * | |
520 | * Link up indication handler. The function is invoked by HW when | |
521 | * Titan indicates that the link is up for programmable amount of time. | |
522 | */ | |
42821a5b | 523 | static enum vxge_hw_status |
11324132 RV |
524 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) |
525 | { | |
526 | /* | |
527 | * If the previous link state is not down, return. | |
528 | */ | |
529 | if (hldev->link_state == VXGE_HW_LINK_UP) | |
530 | goto exit; | |
531 | ||
532 | hldev->link_state = VXGE_HW_LINK_UP; | |
533 | ||
534 | /* notify driver */ | |
535 | if (hldev->uld_callbacks.link_up) | |
536 | hldev->uld_callbacks.link_up(hldev); | |
537 | exit: | |
538 | return VXGE_HW_OK; | |
539 | } | |
540 | ||
541 | /* | |
542 | * __vxge_hw_device_handle_link_down_ind | |
543 | * @hldev: HW device handle. | |
544 | * | |
545 | * Link down indication handler. The function is invoked by HW when | |
546 | * Titan indicates that the link is down. | |
547 | */ | |
42821a5b | 548 | static enum vxge_hw_status |
11324132 RV |
549 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) |
550 | { | |
551 | /* | |
552 | * If the previous link state is not down, return. | |
553 | */ | |
554 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | |
555 | goto exit; | |
556 | ||
557 | hldev->link_state = VXGE_HW_LINK_DOWN; | |
558 | ||
559 | /* notify driver */ | |
560 | if (hldev->uld_callbacks.link_down) | |
561 | hldev->uld_callbacks.link_down(hldev); | |
562 | exit: | |
563 | return VXGE_HW_OK; | |
564 | } | |
565 | ||
566 | /** | |
567 | * __vxge_hw_device_handle_error - Handle error | |
568 | * @hldev: HW device | |
569 | * @vp_id: Vpath Id | |
570 | * @type: Error type. Please see enum vxge_hw_event{} | |
571 | * | |
572 | * Handle error. | |
573 | */ | |
42821a5b | 574 | static enum vxge_hw_status |
11324132 RV |
575 | __vxge_hw_device_handle_error( |
576 | struct __vxge_hw_device *hldev, | |
577 | u32 vp_id, | |
578 | enum vxge_hw_event type) | |
579 | { | |
580 | switch (type) { | |
581 | case VXGE_HW_EVENT_UNKNOWN: | |
582 | break; | |
583 | case VXGE_HW_EVENT_RESET_START: | |
584 | case VXGE_HW_EVENT_RESET_COMPLETE: | |
585 | case VXGE_HW_EVENT_LINK_DOWN: | |
586 | case VXGE_HW_EVENT_LINK_UP: | |
587 | goto out; | |
588 | case VXGE_HW_EVENT_ALARM_CLEARED: | |
589 | goto out; | |
590 | case VXGE_HW_EVENT_ECCERR: | |
591 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | |
592 | goto out; | |
593 | case VXGE_HW_EVENT_FIFO_ERR: | |
594 | case VXGE_HW_EVENT_VPATH_ERR: | |
595 | case VXGE_HW_EVENT_CRITICAL_ERR: | |
596 | case VXGE_HW_EVENT_SERR: | |
597 | break; | |
598 | case VXGE_HW_EVENT_SRPCIM_SERR: | |
599 | case VXGE_HW_EVENT_MRPCIM_SERR: | |
600 | goto out; | |
601 | case VXGE_HW_EVENT_SLOT_FREEZE: | |
602 | break; | |
603 | default: | |
604 | vxge_assert(0); | |
605 | goto out; | |
606 | } | |
607 | ||
608 | /* notify driver */ | |
609 | if (hldev->uld_callbacks.crit_err) | |
610 | hldev->uld_callbacks.crit_err( | |
611 | (struct __vxge_hw_device *)hldev, | |
612 | type, vp_id); | |
613 | out: | |
614 | ||
615 | return VXGE_HW_OK; | |
616 | } | |
617 | ||
618 | /** | |
619 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the | |
620 | * condition that has caused the Tx and RX interrupt. | |
621 | * @hldev: HW device. | |
622 | * | |
623 | * Acknowledge (that is, clear) the condition that has caused | |
624 | * the Tx and Rx interrupt. | |
625 | * See also: vxge_hw_device_begin_irq(), | |
626 | * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). | |
627 | */ | |
628 | void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) | |
629 | { | |
630 | ||
631 | if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
632 | (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
633 | writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
634 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), | |
635 | &hldev->common_reg->tim_int_status0); | |
636 | } | |
637 | ||
638 | if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
639 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
640 | __vxge_hw_pio_mem_write32_upper( | |
641 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
642 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), | |
643 | &hldev->common_reg->tim_int_status1); | |
644 | } | |
11324132 RV |
645 | } |
646 | ||
647 | /* | |
648 | * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel | |
649 | * @channel: Channel | |
650 | * @dtrh: Buffer to return the DTR pointer | |
651 | * | |
652 | * Allocates a dtr from the reserve array. If the reserve array is empty, | |
653 | * it swaps the reserve and free arrays. | |
654 | * | |
655 | */ | |
42821a5b | 656 | static enum vxge_hw_status |
11324132 RV |
657 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) |
658 | { | |
659 | void **tmp_arr; | |
660 | ||
661 | if (channel->reserve_ptr - channel->reserve_top > 0) { | |
662 | _alloc_after_swap: | |
663 | *dtrh = channel->reserve_arr[--channel->reserve_ptr]; | |
664 | ||
665 | return VXGE_HW_OK; | |
666 | } | |
667 | ||
668 | /* switch between empty and full arrays */ | |
669 | ||
670 | /* the idea behind such a design is that by having free and reserved | |
671 | * arrays separated we basically separated irq and non-irq parts. | |
672 | * i.e. no additional lock need to be done when we free a resource */ | |
673 | ||
674 | if (channel->length - channel->free_ptr > 0) { | |
675 | ||
676 | tmp_arr = channel->reserve_arr; | |
677 | channel->reserve_arr = channel->free_arr; | |
678 | channel->free_arr = tmp_arr; | |
679 | channel->reserve_ptr = channel->length; | |
680 | channel->reserve_top = channel->free_ptr; | |
681 | channel->free_ptr = channel->length; | |
682 | ||
683 | channel->stats->reserve_free_swaps_cnt++; | |
684 | ||
685 | goto _alloc_after_swap; | |
686 | } | |
687 | ||
688 | channel->stats->full_cnt++; | |
689 | ||
690 | *dtrh = NULL; | |
691 | return VXGE_HW_INF_OUT_OF_DESCRIPTORS; | |
692 | } | |
693 | ||
694 | /* | |
695 | * vxge_hw_channel_dtr_post - Post a dtr to the channel | |
696 | * @channelh: Channel | |
697 | * @dtrh: DTR pointer | |
698 | * | |
699 | * Posts a dtr to work array. | |
700 | * | |
701 | */ | |
42821a5b | 702 | static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, |
703 | void *dtrh) | |
11324132 RV |
704 | { |
705 | vxge_assert(channel->work_arr[channel->post_index] == NULL); | |
706 | ||
707 | channel->work_arr[channel->post_index++] = dtrh; | |
708 | ||
709 | /* wrap-around */ | |
710 | if (channel->post_index == channel->length) | |
711 | channel->post_index = 0; | |
712 | } | |
713 | ||
714 | /* | |
715 | * vxge_hw_channel_dtr_try_complete - Returns next completed dtr | |
716 | * @channel: Channel | |
717 | * @dtr: Buffer to return the next completed DTR pointer | |
718 | * | |
719 | * Returns the next completed dtr with out removing it from work array | |
720 | * | |
721 | */ | |
722 | void | |
723 | vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) | |
724 | { | |
725 | vxge_assert(channel->compl_index < channel->length); | |
726 | ||
727 | *dtrh = channel->work_arr[channel->compl_index]; | |
3f23e436 | 728 | prefetch(*dtrh); |
11324132 RV |
729 | } |
730 | ||
731 | /* | |
732 | * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array | |
733 | * @channel: Channel handle | |
734 | * | |
735 | * Removes the next completed dtr from work array | |
736 | * | |
737 | */ | |
738 | void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) | |
739 | { | |
740 | channel->work_arr[channel->compl_index] = NULL; | |
741 | ||
742 | /* wrap-around */ | |
743 | if (++channel->compl_index == channel->length) | |
744 | channel->compl_index = 0; | |
745 | ||
746 | channel->stats->total_compl_cnt++; | |
747 | } | |
748 | ||
749 | /* | |
750 | * vxge_hw_channel_dtr_free - Frees a dtr | |
751 | * @channel: Channel handle | |
752 | * @dtr: DTR pointer | |
753 | * | |
754 | * Returns the dtr to free array | |
755 | * | |
756 | */ | |
757 | void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) | |
758 | { | |
759 | channel->free_arr[--channel->free_ptr] = dtrh; | |
760 | } | |
761 | ||
762 | /* | |
763 | * vxge_hw_channel_dtr_count | |
764 | * @channel: Channel handle. Obtained via vxge_hw_channel_open(). | |
765 | * | |
766 | * Retreive number of DTRs available. This function can not be called | |
767 | * from data path. ring_initial_replenishi() is the only user. | |
768 | */ | |
769 | int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) | |
770 | { | |
771 | return (channel->reserve_ptr - channel->reserve_top) + | |
772 | (channel->length - channel->free_ptr); | |
773 | } | |
774 | ||
775 | /** | |
776 | * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. | |
777 | * @ring: Handle to the ring object used for receive | |
778 | * @rxdh: Reserved descriptor. On success HW fills this "out" parameter | |
779 | * with a valid handle. | |
780 | * | |
781 | * Reserve Rx descriptor for the subsequent filling-in driver | |
782 | * and posting on the corresponding channel (@channelh) | |
783 | * via vxge_hw_ring_rxd_post(). | |
784 | * | |
785 | * Returns: VXGE_HW_OK - success. | |
786 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. | |
787 | * | |
788 | */ | |
789 | enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, | |
790 | void **rxdh) | |
791 | { | |
792 | enum vxge_hw_status status; | |
793 | struct __vxge_hw_channel *channel; | |
794 | ||
795 | channel = &ring->channel; | |
796 | ||
797 | status = vxge_hw_channel_dtr_alloc(channel, rxdh); | |
798 | ||
799 | if (status == VXGE_HW_OK) { | |
800 | struct vxge_hw_ring_rxd_1 *rxdp = | |
801 | (struct vxge_hw_ring_rxd_1 *)*rxdh; | |
802 | ||
803 | rxdp->control_0 = rxdp->control_1 = 0; | |
804 | } | |
805 | ||
806 | return status; | |
807 | } | |
808 | ||
809 | /** | |
810 | * vxge_hw_ring_rxd_free - Free descriptor. | |
811 | * @ring: Handle to the ring object used for receive | |
812 | * @rxdh: Descriptor handle. | |
813 | * | |
814 | * Free the reserved descriptor. This operation is "symmetrical" to | |
815 | * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's | |
816 | * lifecycle. | |
817 | * | |
818 | * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can | |
819 | * be: | |
820 | * | |
821 | * - reserved (vxge_hw_ring_rxd_reserve); | |
822 | * | |
823 | * - posted (vxge_hw_ring_rxd_post); | |
824 | * | |
825 | * - completed (vxge_hw_ring_rxd_next_completed); | |
826 | * | |
827 | * - and recycled again (vxge_hw_ring_rxd_free). | |
828 | * | |
829 | * For alternative state transitions and more details please refer to | |
830 | * the design doc. | |
831 | * | |
832 | */ | |
833 | void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) | |
834 | { | |
835 | struct __vxge_hw_channel *channel; | |
836 | ||
837 | channel = &ring->channel; | |
838 | ||
839 | vxge_hw_channel_dtr_free(channel, rxdh); | |
840 | ||
841 | } | |
842 | ||
843 | /** | |
844 | * vxge_hw_ring_rxd_pre_post - Prepare rxd and post | |
845 | * @ring: Handle to the ring object used for receive | |
846 | * @rxdh: Descriptor handle. | |
847 | * | |
848 | * This routine prepares a rxd and posts | |
849 | */ | |
850 | void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) | |
851 | { | |
852 | struct __vxge_hw_channel *channel; | |
853 | ||
854 | channel = &ring->channel; | |
855 | ||
856 | vxge_hw_channel_dtr_post(channel, rxdh); | |
857 | } | |
858 | ||
859 | /** | |
860 | * vxge_hw_ring_rxd_post_post - Process rxd after post. | |
861 | * @ring: Handle to the ring object used for receive | |
862 | * @rxdh: Descriptor handle. | |
863 | * | |
864 | * Processes rxd after post | |
865 | */ | |
866 | void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) | |
867 | { | |
868 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | |
869 | struct __vxge_hw_channel *channel; | |
870 | ||
871 | channel = &ring->channel; | |
872 | ||
18dec74c | 873 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
11324132 RV |
874 | |
875 | if (ring->stats->common_stats.usage_cnt > 0) | |
876 | ring->stats->common_stats.usage_cnt--; | |
877 | } | |
878 | ||
879 | /** | |
880 | * vxge_hw_ring_rxd_post - Post descriptor on the ring. | |
881 | * @ring: Handle to the ring object used for receive | |
882 | * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). | |
883 | * | |
884 | * Post descriptor on the ring. | |
885 | * Prior to posting the descriptor should be filled in accordance with | |
886 | * Host/Titan interface specification for a given service (LL, etc.). | |
887 | * | |
888 | */ | |
889 | void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | |
890 | { | |
891 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | |
892 | struct __vxge_hw_channel *channel; | |
893 | ||
894 | channel = &ring->channel; | |
895 | ||
896 | wmb(); | |
18dec74c | 897 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
11324132 RV |
898 | |
899 | vxge_hw_channel_dtr_post(channel, rxdh); | |
900 | ||
901 | if (ring->stats->common_stats.usage_cnt > 0) | |
902 | ring->stats->common_stats.usage_cnt--; | |
903 | } | |
904 | ||
905 | /** | |
906 | * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. | |
907 | * @ring: Handle to the ring object used for receive | |
908 | * @rxdh: Descriptor handle. | |
909 | * | |
910 | * Processes rxd after post with memory barrier. | |
911 | */ | |
912 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) | |
913 | { | |
914 | struct __vxge_hw_channel *channel; | |
915 | ||
916 | channel = &ring->channel; | |
917 | ||
918 | wmb(); | |
919 | vxge_hw_ring_rxd_post_post(ring, rxdh); | |
920 | } | |
921 | ||
922 | /** | |
923 | * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. | |
924 | * @ring: Handle to the ring object used for receive | |
925 | * @rxdh: Descriptor handle. Returned by HW. | |
926 | * @t_code: Transfer code, as per Titan User Guide, | |
927 | * Receive Descriptor Format. Returned by HW. | |
928 | * | |
929 | * Retrieve the _next_ completed descriptor. | |
930 | * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy | |
931 | * driver of new completed descriptors. After that | |
932 | * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest | |
933 | * completions (the very first completion is passed by HW via | |
934 | * vxge_hw_ring_callback_f). | |
935 | * | |
936 | * Implementation-wise, the driver is free to call | |
937 | * vxge_hw_ring_rxd_next_completed either immediately from inside the | |
938 | * ring callback, or in a deferred fashion and separate (from HW) | |
939 | * context. | |
940 | * | |
941 | * Non-zero @t_code means failure to fill-in receive buffer(s) | |
942 | * of the descriptor. | |
943 | * For instance, parity error detected during the data transfer. | |
944 | * In this case Titan will complete the descriptor and indicate | |
945 | * for the host that the received data is not to be used. | |
946 | * For details please refer to Titan User Guide. | |
947 | * | |
948 | * Returns: VXGE_HW_OK - success. | |
949 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | |
950 | * are currently available for processing. | |
951 | * | |
952 | * See also: vxge_hw_ring_callback_f{}, | |
953 | * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. | |
954 | */ | |
955 | enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |
956 | struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) | |
957 | { | |
958 | struct __vxge_hw_channel *channel; | |
959 | struct vxge_hw_ring_rxd_1 *rxdp; | |
960 | enum vxge_hw_status status = VXGE_HW_OK; | |
18dec74c | 961 | u64 control_0, own; |
11324132 RV |
962 | |
963 | channel = &ring->channel; | |
964 | ||
965 | vxge_hw_channel_dtr_try_complete(channel, rxdh); | |
966 | ||
967 | rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; | |
968 | if (rxdp == NULL) { | |
969 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
970 | goto exit; | |
971 | } | |
972 | ||
18dec74c SH |
973 | control_0 = rxdp->control_0; |
974 | own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | |
975 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); | |
976 | ||
11324132 | 977 | /* check whether it is not the end */ |
18dec74c | 978 | if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { |
11324132 RV |
979 | |
980 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != | |
981 | 0); | |
982 | ||
983 | ++ring->cmpl_cnt; | |
984 | vxge_hw_channel_dtr_complete(channel); | |
985 | ||
11324132 RV |
986 | vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); |
987 | ||
988 | ring->stats->common_stats.usage_cnt++; | |
989 | if (ring->stats->common_stats.usage_max < | |
990 | ring->stats->common_stats.usage_cnt) | |
991 | ring->stats->common_stats.usage_max = | |
992 | ring->stats->common_stats.usage_cnt; | |
993 | ||
994 | status = VXGE_HW_OK; | |
995 | goto exit; | |
996 | } | |
997 | ||
998 | /* reset it. since we don't want to return | |
999 | * garbage to the driver */ | |
1000 | *rxdh = NULL; | |
1001 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1002 | exit: | |
1003 | return status; | |
1004 | } | |
1005 | ||
1006 | /** | |
1007 | * vxge_hw_ring_handle_tcode - Handle transfer code. | |
1008 | * @ring: Handle to the ring object used for receive | |
1009 | * @rxdh: Descriptor handle. | |
1010 | * @t_code: One of the enumerated (and documented in the Titan user guide) | |
1011 | * "transfer codes". | |
1012 | * | |
1013 | * Handle descriptor's transfer code. The latter comes with each completed | |
1014 | * descriptor. | |
1015 | * | |
1016 | * Returns: one of the enum vxge_hw_status{} enumerated types. | |
1017 | * VXGE_HW_OK - for success. | |
1018 | * VXGE_HW_ERR_CRITICAL - when encounters critical error. | |
1019 | */ | |
1020 | enum vxge_hw_status vxge_hw_ring_handle_tcode( | |
1021 | struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) | |
1022 | { | |
1023 | struct __vxge_hw_channel *channel; | |
1024 | enum vxge_hw_status status = VXGE_HW_OK; | |
1025 | ||
1026 | channel = &ring->channel; | |
1027 | ||
1028 | /* If the t_code is not supported and if the | |
1029 | * t_code is other than 0x5 (unparseable packet | |
1030 | * such as unknown UPV6 header), Drop it !!! | |
1031 | */ | |
1032 | ||
18dec74c SH |
1033 | if (t_code == VXGE_HW_RING_T_CODE_OK || |
1034 | t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { | |
11324132 RV |
1035 | status = VXGE_HW_OK; |
1036 | goto exit; | |
1037 | } | |
1038 | ||
18dec74c | 1039 | if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { |
11324132 RV |
1040 | status = VXGE_HW_ERR_INVALID_TCODE; |
1041 | goto exit; | |
1042 | } | |
1043 | ||
1044 | ring->stats->rxd_t_code_err_cnt[t_code]++; | |
1045 | exit: | |
1046 | return status; | |
1047 | } | |
1048 | ||
1049 | /** | |
1050 | * __vxge_hw_non_offload_db_post - Post non offload doorbell | |
1051 | * | |
1052 | * @fifo: fifohandle | |
1053 | * @txdl_ptr: The starting location of the TxDL in host memory | |
1054 | * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) | |
1055 | * @no_snoop: No snoop flags | |
1056 | * | |
1057 | * This function posts a non-offload doorbell to doorbell FIFO | |
1058 | * | |
1059 | */ | |
1060 | static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, | |
1061 | u64 txdl_ptr, u32 num_txds, u32 no_snoop) | |
1062 | { | |
1063 | struct __vxge_hw_channel *channel; | |
1064 | ||
1065 | channel = &fifo->channel; | |
1066 | ||
1067 | writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | | |
1068 | VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | | |
1069 | VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), | |
1070 | &fifo->nofl_db->control_0); | |
1071 | ||
ff1b974c | 1072 | mmiowb(); |
11324132 RV |
1073 | |
1074 | writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); | |
11324132 | 1075 | |
ff1b974c | 1076 | mmiowb(); |
11324132 RV |
1077 | } |
1078 | ||
1079 | /** | |
1080 | * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in | |
1081 | * the fifo | |
1082 | * @fifoh: Handle to the fifo object used for non offload send | |
1083 | */ | |
1084 | u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) | |
1085 | { | |
1086 | return vxge_hw_channel_dtr_count(&fifoh->channel); | |
1087 | } | |
1088 | ||
1089 | /** | |
1090 | * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. | |
1091 | * @fifoh: Handle to the fifo object used for non offload send | |
1092 | * @txdlh: Reserved descriptor. On success HW fills this "out" parameter | |
1093 | * with a valid handle. | |
1094 | * @txdl_priv: Buffer to return the pointer to per txdl space | |
1095 | * | |
1096 | * Reserve a single TxDL (that is, fifo descriptor) | |
1097 | * for the subsequent filling-in by driver) | |
1098 | * and posting on the corresponding channel (@channelh) | |
1099 | * via vxge_hw_fifo_txdl_post(). | |
1100 | * | |
1101 | * Note: it is the responsibility of driver to reserve multiple descriptors | |
1102 | * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor | |
1103 | * carries up to configured number (fifo.max_frags) of contiguous buffers. | |
1104 | * | |
1105 | * Returns: VXGE_HW_OK - success; | |
1106 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available | |
1107 | * | |
1108 | */ | |
1109 | enum vxge_hw_status vxge_hw_fifo_txdl_reserve( | |
1110 | struct __vxge_hw_fifo *fifo, | |
1111 | void **txdlh, void **txdl_priv) | |
1112 | { | |
1113 | struct __vxge_hw_channel *channel; | |
1114 | enum vxge_hw_status status; | |
1115 | int i; | |
1116 | ||
1117 | channel = &fifo->channel; | |
1118 | ||
1119 | status = vxge_hw_channel_dtr_alloc(channel, txdlh); | |
1120 | ||
1121 | if (status == VXGE_HW_OK) { | |
1122 | struct vxge_hw_fifo_txd *txdp = | |
1123 | (struct vxge_hw_fifo_txd *)*txdlh; | |
1124 | struct __vxge_hw_fifo_txdl_priv *priv; | |
1125 | ||
1126 | priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | |
1127 | ||
1128 | /* reset the TxDL's private */ | |
1129 | priv->align_dma_offset = 0; | |
1130 | priv->align_vaddr_start = priv->align_vaddr; | |
1131 | priv->align_used_frags = 0; | |
1132 | priv->frags = 0; | |
1133 | priv->alloc_frags = fifo->config->max_frags; | |
1134 | priv->next_txdl_priv = NULL; | |
1135 | ||
1136 | *txdl_priv = (void *)(size_t)txdp->host_control; | |
1137 | ||
1138 | for (i = 0; i < fifo->config->max_frags; i++) { | |
1139 | txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; | |
1140 | txdp->control_0 = txdp->control_1 = 0; | |
1141 | } | |
1142 | } | |
1143 | ||
1144 | return status; | |
1145 | } | |
1146 | ||
1147 | /** | |
1148 | * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the | |
1149 | * descriptor. | |
1150 | * @fifo: Handle to the fifo object used for non offload send | |
1151 | * @txdlh: Descriptor handle. | |
1152 | * @frag_idx: Index of the data buffer in the caller's scatter-gather list | |
1153 | * (of buffers). | |
1154 | * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. | |
1155 | * @size: Size of the data buffer (in bytes). | |
1156 | * | |
1157 | * This API is part of the preparation of the transmit descriptor for posting | |
1158 | * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include | |
1159 | * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). | |
1160 | * All three APIs fill in the fields of the fifo descriptor, | |
1161 | * in accordance with the Titan specification. | |
1162 | * | |
1163 | */ | |
1164 | void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, | |
1165 | void *txdlh, u32 frag_idx, | |
1166 | dma_addr_t dma_pointer, u32 size) | |
1167 | { | |
1168 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1169 | struct vxge_hw_fifo_txd *txdp, *txdp_last; | |
1170 | struct __vxge_hw_channel *channel; | |
1171 | ||
1172 | channel = &fifo->channel; | |
1173 | ||
1174 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | |
1175 | txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; | |
1176 | ||
1177 | if (frag_idx != 0) | |
1178 | txdp->control_0 = txdp->control_1 = 0; | |
1179 | else { | |
1180 | txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | |
1181 | VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); | |
1182 | txdp->control_1 |= fifo->interrupt_type; | |
1183 | txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( | |
1184 | fifo->tx_intr_num); | |
1185 | if (txdl_priv->frags) { | |
1186 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh + | |
1187 | (txdl_priv->frags - 1); | |
1188 | txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | |
1189 | VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | |
1190 | } | |
1191 | } | |
1192 | ||
1193 | vxge_assert(frag_idx < txdl_priv->alloc_frags); | |
1194 | ||
1195 | txdp->buffer_pointer = (u64)dma_pointer; | |
1196 | txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); | |
1197 | fifo->stats->total_buffers++; | |
1198 | txdl_priv->frags++; | |
1199 | } | |
1200 | ||
1201 | /** | |
1202 | * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. | |
1203 | * @fifo: Handle to the fifo object used for non offload send | |
1204 | * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() | |
1205 | * @frags: Number of contiguous buffers that are part of a single | |
1206 | * transmit operation. | |
1207 | * | |
1208 | * Post descriptor on the 'fifo' type channel for transmission. | |
1209 | * Prior to posting the descriptor should be filled in accordance with | |
1210 | * Host/Titan interface specification for a given service (LL, etc.). | |
1211 | * | |
1212 | */ | |
1213 | void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) | |
1214 | { | |
1215 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1216 | struct vxge_hw_fifo_txd *txdp_last; | |
1217 | struct vxge_hw_fifo_txd *txdp_first; | |
1218 | struct __vxge_hw_channel *channel; | |
1219 | ||
1220 | channel = &fifo->channel; | |
1221 | ||
1222 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | |
1223 | txdp_first = (struct vxge_hw_fifo_txd *)txdlh; | |
1224 | ||
1225 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); | |
1226 | txdp_last->control_0 |= | |
1227 | VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | |
1228 | txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; | |
1229 | ||
1230 | vxge_hw_channel_dtr_post(&fifo->channel, txdlh); | |
1231 | ||
1232 | __vxge_hw_non_offload_db_post(fifo, | |
a4a987d8 | 1233 | (u64)txdl_priv->dma_addr, |
11324132 RV |
1234 | txdl_priv->frags - 1, |
1235 | fifo->no_snoop_bits); | |
1236 | ||
1237 | fifo->stats->total_posts++; | |
1238 | fifo->stats->common_stats.usage_cnt++; | |
1239 | if (fifo->stats->common_stats.usage_max < | |
1240 | fifo->stats->common_stats.usage_cnt) | |
1241 | fifo->stats->common_stats.usage_max = | |
1242 | fifo->stats->common_stats.usage_cnt; | |
1243 | } | |
1244 | ||
1245 | /** | |
1246 | * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. | |
1247 | * @fifo: Handle to the fifo object used for non offload send | |
1248 | * @txdlh: Descriptor handle. Returned by HW. | |
1249 | * @t_code: Transfer code, as per Titan User Guide, | |
1250 | * Transmit Descriptor Format. | |
1251 | * Returned by HW. | |
1252 | * | |
1253 | * Retrieve the _next_ completed descriptor. | |
1254 | * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy | |
1255 | * driver of new completed descriptors. After that | |
1256 | * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest | |
1257 | * completions (the very first completion is passed by HW via | |
1258 | * vxge_hw_channel_callback_f). | |
1259 | * | |
1260 | * Implementation-wise, the driver is free to call | |
1261 | * vxge_hw_fifo_txdl_next_completed either immediately from inside the | |
1262 | * channel callback, or in a deferred fashion and separate (from HW) | |
1263 | * context. | |
1264 | * | |
1265 | * Non-zero @t_code means failure to process the descriptor. | |
1266 | * The failure could happen, for instance, when the link is | |
1267 | * down, in which case Titan completes the descriptor because it | |
1268 | * is not able to send the data out. | |
1269 | * | |
1270 | * For details please refer to Titan User Guide. | |
1271 | * | |
1272 | * Returns: VXGE_HW_OK - success. | |
1273 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | |
1274 | * are currently available for processing. | |
1275 | * | |
1276 | */ | |
1277 | enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( | |
1278 | struct __vxge_hw_fifo *fifo, void **txdlh, | |
1279 | enum vxge_hw_fifo_tcode *t_code) | |
1280 | { | |
1281 | struct __vxge_hw_channel *channel; | |
1282 | struct vxge_hw_fifo_txd *txdp; | |
1283 | enum vxge_hw_status status = VXGE_HW_OK; | |
1284 | ||
1285 | channel = &fifo->channel; | |
1286 | ||
1287 | vxge_hw_channel_dtr_try_complete(channel, txdlh); | |
1288 | ||
1289 | txdp = (struct vxge_hw_fifo_txd *)*txdlh; | |
1290 | if (txdp == NULL) { | |
1291 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1292 | goto exit; | |
1293 | } | |
1294 | ||
1295 | /* check whether host owns it */ | |
1296 | if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { | |
1297 | ||
1298 | vxge_assert(txdp->host_control != 0); | |
1299 | ||
1300 | vxge_hw_channel_dtr_complete(channel); | |
1301 | ||
1302 | *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); | |
1303 | ||
1304 | if (fifo->stats->common_stats.usage_cnt > 0) | |
1305 | fifo->stats->common_stats.usage_cnt--; | |
1306 | ||
1307 | status = VXGE_HW_OK; | |
1308 | goto exit; | |
1309 | } | |
1310 | ||
1311 | /* no more completions */ | |
1312 | *txdlh = NULL; | |
1313 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1314 | exit: | |
1315 | return status; | |
1316 | } | |
1317 | ||
1318 | /** | |
1319 | * vxge_hw_fifo_handle_tcode - Handle transfer code. | |
1320 | * @fifo: Handle to the fifo object used for non offload send | |
1321 | * @txdlh: Descriptor handle. | |
1322 | * @t_code: One of the enumerated (and documented in the Titan user guide) | |
1323 | * "transfer codes". | |
1324 | * | |
1325 | * Handle descriptor's transfer code. The latter comes with each completed | |
1326 | * descriptor. | |
1327 | * | |
1328 | * Returns: one of the enum vxge_hw_status{} enumerated types. | |
1329 | * VXGE_HW_OK - for success. | |
1330 | * VXGE_HW_ERR_CRITICAL - when encounters critical error. | |
1331 | */ | |
1332 | enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, | |
1333 | void *txdlh, | |
1334 | enum vxge_hw_fifo_tcode t_code) | |
1335 | { | |
1336 | struct __vxge_hw_channel *channel; | |
1337 | ||
1338 | enum vxge_hw_status status = VXGE_HW_OK; | |
1339 | channel = &fifo->channel; | |
1340 | ||
1341 | if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { | |
1342 | status = VXGE_HW_ERR_INVALID_TCODE; | |
1343 | goto exit; | |
1344 | } | |
1345 | ||
1346 | fifo->stats->txd_t_code_err_cnt[t_code]++; | |
1347 | exit: | |
1348 | return status; | |
1349 | } | |
1350 | ||
1351 | /** | |
1352 | * vxge_hw_fifo_txdl_free - Free descriptor. | |
1353 | * @fifo: Handle to the fifo object used for non offload send | |
1354 | * @txdlh: Descriptor handle. | |
1355 | * | |
1356 | * Free the reserved descriptor. This operation is "symmetrical" to | |
1357 | * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's | |
1358 | * lifecycle. | |
1359 | * | |
1360 | * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can | |
1361 | * be: | |
1362 | * | |
1363 | * - reserved (vxge_hw_fifo_txdl_reserve); | |
1364 | * | |
1365 | * - posted (vxge_hw_fifo_txdl_post); | |
1366 | * | |
1367 | * - completed (vxge_hw_fifo_txdl_next_completed); | |
1368 | * | |
1369 | * - and recycled again (vxge_hw_fifo_txdl_free). | |
1370 | * | |
1371 | * For alternative state transitions and more details please refer to | |
1372 | * the design doc. | |
1373 | * | |
1374 | */ | |
1375 | void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) | |
1376 | { | |
1377 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1378 | u32 max_frags; | |
1379 | struct __vxge_hw_channel *channel; | |
1380 | ||
1381 | channel = &fifo->channel; | |
1382 | ||
1383 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, | |
1384 | (struct vxge_hw_fifo_txd *)txdlh); | |
1385 | ||
1386 | max_frags = fifo->config->max_frags; | |
1387 | ||
1388 | vxge_hw_channel_dtr_free(channel, txdlh); | |
1389 | } | |
1390 | ||
1391 | /** | |
1392 | * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath | |
1393 | * to MAC address table. | |
1394 | * @vp: Vpath handle. | |
1395 | * @macaddr: MAC address to be added for this vpath into the list | |
1396 | * @macaddr_mask: MAC address mask for macaddr | |
1397 | * @duplicate_mode: Duplicate MAC address add mode. Please see | |
1398 | * enum vxge_hw_vpath_mac_addr_add_mode{} | |
1399 | * | |
1400 | * Adds the given mac address and mac address mask into the list for this | |
1401 | * vpath. | |
1402 | * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and | |
1403 | * vxge_hw_vpath_mac_addr_get_next | |
1404 | * | |
1405 | */ | |
1406 | enum vxge_hw_status | |
1407 | vxge_hw_vpath_mac_addr_add( | |
1408 | struct __vxge_hw_vpath_handle *vp, | |
1409 | u8 (macaddr)[ETH_ALEN], | |
1410 | u8 (macaddr_mask)[ETH_ALEN], | |
1411 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) | |
1412 | { | |
1413 | u32 i; | |
1414 | u64 data1 = 0ULL; | |
1415 | u64 data2 = 0ULL; | |
1416 | enum vxge_hw_status status = VXGE_HW_OK; | |
1417 | ||
1418 | if (vp == NULL) { | |
1419 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1420 | goto exit; | |
1421 | } | |
1422 | ||
1423 | for (i = 0; i < ETH_ALEN; i++) { | |
1424 | data1 <<= 8; | |
1425 | data1 |= (u8)macaddr[i]; | |
1426 | ||
1427 | data2 <<= 8; | |
1428 | data2 |= (u8)macaddr_mask[i]; | |
1429 | } | |
1430 | ||
1431 | switch (duplicate_mode) { | |
1432 | case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: | |
1433 | i = 0; | |
1434 | break; | |
1435 | case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: | |
1436 | i = 1; | |
1437 | break; | |
1438 | case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: | |
1439 | i = 2; | |
1440 | break; | |
1441 | default: | |
1442 | i = 0; | |
1443 | break; | |
1444 | } | |
1445 | ||
1446 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1447 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | |
1448 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1449 | 0, | |
1450 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | |
1451 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| | |
1452 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); | |
1453 | exit: | |
1454 | return status; | |
1455 | } | |
1456 | ||
1457 | /** | |
1458 | * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath | |
1459 | * from MAC address table. | |
1460 | * @vp: Vpath handle. | |
1461 | * @macaddr: First MAC address entry for this vpath in the list | |
1462 | * @macaddr_mask: MAC address mask for macaddr | |
1463 | * | |
1464 | * Returns the first mac address and mac address mask in the list for this | |
1465 | * vpath. | |
1466 | * see also: vxge_hw_vpath_mac_addr_get_next | |
1467 | * | |
1468 | */ | |
1469 | enum vxge_hw_status | |
1470 | vxge_hw_vpath_mac_addr_get( | |
1471 | struct __vxge_hw_vpath_handle *vp, | |
1472 | u8 (macaddr)[ETH_ALEN], | |
1473 | u8 (macaddr_mask)[ETH_ALEN]) | |
1474 | { | |
1475 | u32 i; | |
1476 | u64 data1 = 0ULL; | |
1477 | u64 data2 = 0ULL; | |
1478 | enum vxge_hw_status status = VXGE_HW_OK; | |
1479 | ||
1480 | if (vp == NULL) { | |
1481 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1482 | goto exit; | |
1483 | } | |
1484 | ||
1485 | status = __vxge_hw_vpath_rts_table_get(vp, | |
1486 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | |
1487 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1488 | 0, &data1, &data2); | |
1489 | ||
1490 | if (status != VXGE_HW_OK) | |
1491 | goto exit; | |
1492 | ||
1493 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | |
1494 | ||
1495 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); | |
1496 | ||
1497 | for (i = ETH_ALEN; i > 0; i--) { | |
1498 | macaddr[i-1] = (u8)(data1 & 0xFF); | |
1499 | data1 >>= 8; | |
1500 | ||
1501 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | |
1502 | data2 >>= 8; | |
1503 | } | |
1504 | exit: | |
1505 | return status; | |
1506 | } | |
1507 | ||
1508 | /** | |
1509 | * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this | |
1510 | * vpath | |
1511 | * from MAC address table. | |
1512 | * @vp: Vpath handle. | |
1513 | * @macaddr: Next MAC address entry for this vpath in the list | |
1514 | * @macaddr_mask: MAC address mask for macaddr | |
1515 | * | |
1516 | * Returns the next mac address and mac address mask in the list for this | |
1517 | * vpath. | |
1518 | * see also: vxge_hw_vpath_mac_addr_get | |
1519 | * | |
1520 | */ | |
1521 | enum vxge_hw_status | |
1522 | vxge_hw_vpath_mac_addr_get_next( | |
1523 | struct __vxge_hw_vpath_handle *vp, | |
1524 | u8 (macaddr)[ETH_ALEN], | |
1525 | u8 (macaddr_mask)[ETH_ALEN]) | |
1526 | { | |
1527 | u32 i; | |
1528 | u64 data1 = 0ULL; | |
1529 | u64 data2 = 0ULL; | |
1530 | enum vxge_hw_status status = VXGE_HW_OK; | |
1531 | ||
1532 | if (vp == NULL) { | |
1533 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1534 | goto exit; | |
1535 | } | |
1536 | ||
1537 | status = __vxge_hw_vpath_rts_table_get(vp, | |
1538 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, | |
1539 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1540 | 0, &data1, &data2); | |
1541 | ||
1542 | if (status != VXGE_HW_OK) | |
1543 | goto exit; | |
1544 | ||
1545 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | |
1546 | ||
1547 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); | |
1548 | ||
1549 | for (i = ETH_ALEN; i > 0; i--) { | |
1550 | macaddr[i-1] = (u8)(data1 & 0xFF); | |
1551 | data1 >>= 8; | |
1552 | ||
1553 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | |
1554 | data2 >>= 8; | |
1555 | } | |
1556 | ||
1557 | exit: | |
1558 | return status; | |
1559 | } | |
1560 | ||
1561 | /** | |
1562 | * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath | |
1563 | * to MAC address table. | |
1564 | * @vp: Vpath handle. | |
1565 | * @macaddr: MAC address to be added for this vpath into the list | |
1566 | * @macaddr_mask: MAC address mask for macaddr | |
1567 | * | |
1568 | * Delete the given mac address and mac address mask into the list for this | |
1569 | * vpath. | |
1570 | * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and | |
1571 | * vxge_hw_vpath_mac_addr_get_next | |
1572 | * | |
1573 | */ | |
1574 | enum vxge_hw_status | |
1575 | vxge_hw_vpath_mac_addr_delete( | |
1576 | struct __vxge_hw_vpath_handle *vp, | |
1577 | u8 (macaddr)[ETH_ALEN], | |
1578 | u8 (macaddr_mask)[ETH_ALEN]) | |
1579 | { | |
1580 | u32 i; | |
1581 | u64 data1 = 0ULL; | |
1582 | u64 data2 = 0ULL; | |
1583 | enum vxge_hw_status status = VXGE_HW_OK; | |
1584 | ||
1585 | if (vp == NULL) { | |
1586 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1587 | goto exit; | |
1588 | } | |
1589 | ||
1590 | for (i = 0; i < ETH_ALEN; i++) { | |
1591 | data1 <<= 8; | |
1592 | data1 |= (u8)macaddr[i]; | |
1593 | ||
1594 | data2 <<= 8; | |
1595 | data2 |= (u8)macaddr_mask[i]; | |
1596 | } | |
1597 | ||
1598 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1599 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | |
1600 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1601 | 0, | |
1602 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | |
1603 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); | |
1604 | exit: | |
1605 | return status; | |
1606 | } | |
1607 | ||
1608 | /** | |
1609 | * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath | |
1610 | * to vlan id table. | |
1611 | * @vp: Vpath handle. | |
1612 | * @vid: vlan id to be added for this vpath into the list | |
1613 | * | |
1614 | * Adds the given vlan id into the list for this vpath. | |
1615 | * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and | |
1616 | * vxge_hw_vpath_vid_get_next | |
1617 | * | |
1618 | */ | |
1619 | enum vxge_hw_status | |
1620 | vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) | |
1621 | { | |
1622 | enum vxge_hw_status status = VXGE_HW_OK; | |
1623 | ||
1624 | if (vp == NULL) { | |
1625 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1626 | goto exit; | |
1627 | } | |
1628 | ||
1629 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1630 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | |
1631 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
1632 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | |
1633 | exit: | |
1634 | return status; | |
1635 | } | |
1636 | ||
1637 | /** | |
1638 | * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath | |
1639 | * from vlan id table. | |
1640 | * @vp: Vpath handle. | |
1641 | * @vid: Buffer to return vlan id | |
1642 | * | |
1643 | * Returns the first vlan id in the list for this vpath. | |
1644 | * see also: vxge_hw_vpath_vid_get_next | |
1645 | * | |
1646 | */ | |
1647 | enum vxge_hw_status | |
1648 | vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid) | |
1649 | { | |
1650 | u64 data; | |
1651 | enum vxge_hw_status status = VXGE_HW_OK; | |
1652 | ||
1653 | if (vp == NULL) { | |
1654 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1655 | goto exit; | |
1656 | } | |
1657 | ||
1658 | status = __vxge_hw_vpath_rts_table_get(vp, | |
1659 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | |
1660 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
1661 | 0, vid, &data); | |
1662 | ||
1663 | *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); | |
1664 | exit: | |
1665 | return status; | |
1666 | } | |
1667 | ||
11324132 RV |
1668 | /** |
1669 | * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath | |
1670 | * to vlan id table. | |
1671 | * @vp: Vpath handle. | |
1672 | * @vid: vlan id to be added for this vpath into the list | |
1673 | * | |
1674 | * Adds the given vlan id into the list for this vpath. | |
1675 | * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and | |
1676 | * vxge_hw_vpath_vid_get_next | |
1677 | * | |
1678 | */ | |
1679 | enum vxge_hw_status | |
1680 | vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) | |
1681 | { | |
1682 | enum vxge_hw_status status = VXGE_HW_OK; | |
1683 | ||
1684 | if (vp == NULL) { | |
1685 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1686 | goto exit; | |
1687 | } | |
1688 | ||
1689 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1690 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | |
1691 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
1692 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | |
1693 | exit: | |
1694 | return status; | |
1695 | } | |
1696 | ||
1697 | /** | |
1698 | * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. | |
1699 | * @vp: Vpath handle. | |
1700 | * | |
1701 | * Enable promiscuous mode of Titan-e operation. | |
1702 | * | |
1703 | * See also: vxge_hw_vpath_promisc_disable(). | |
1704 | */ | |
1705 | enum vxge_hw_status vxge_hw_vpath_promisc_enable( | |
1706 | struct __vxge_hw_vpath_handle *vp) | |
1707 | { | |
1708 | u64 val64; | |
1709 | struct __vxge_hw_virtualpath *vpath; | |
1710 | enum vxge_hw_status status = VXGE_HW_OK; | |
1711 | ||
1712 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | |
1713 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1714 | goto exit; | |
1715 | } | |
1716 | ||
1717 | vpath = vp->vpath; | |
1718 | ||
1719 | /* Enable promiscous mode for function 0 only */ | |
1720 | if (!(vpath->hldev->access_rights & | |
1721 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) | |
1722 | return VXGE_HW_OK; | |
1723 | ||
1724 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
1725 | ||
1726 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { | |
1727 | ||
1728 | val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | | |
1729 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | |
1730 | VXGE_HW_RXMAC_VCFG0_BCAST_EN | | |
1731 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; | |
1732 | ||
1733 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
1734 | } | |
1735 | exit: | |
1736 | return status; | |
1737 | } | |
1738 | ||
1739 | /** | |
1740 | * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. | |
1741 | * @vp: Vpath handle. | |
1742 | * | |
1743 | * Disable promiscuous mode of Titan-e operation. | |
1744 | * | |
1745 | * See also: vxge_hw_vpath_promisc_enable(). | |
1746 | */ | |
1747 | enum vxge_hw_status vxge_hw_vpath_promisc_disable( | |
1748 | struct __vxge_hw_vpath_handle *vp) | |
1749 | { | |
1750 | u64 val64; | |
1751 | struct __vxge_hw_virtualpath *vpath; | |
1752 | enum vxge_hw_status status = VXGE_HW_OK; | |
1753 | ||
1754 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | |
1755 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1756 | goto exit; | |
1757 | } | |
1758 | ||
1759 | vpath = vp->vpath; | |
1760 | ||
1761 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
1762 | ||
1763 | if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { | |
1764 | ||
1765 | val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | | |
1766 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | |
1767 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); | |
1768 | ||
1769 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
1770 | } | |
1771 | exit: | |
1772 | return status; | |
1773 | } | |
1774 | ||
1775 | /* | |
1776 | * vxge_hw_vpath_bcast_enable - Enable broadcast | |
1777 | * @vp: Vpath handle. | |
1778 | * | |
1779 | * Enable receiving broadcasts. | |
1780 | */ | |
1781 | enum vxge_hw_status vxge_hw_vpath_bcast_enable( | |
1782 | struct __vxge_hw_vpath_handle *vp) | |
1783 | { | |
1784 | u64 val64; | |
1785 | struct __vxge_hw_virtualpath *vpath; | |
1786 | enum vxge_hw_status status = VXGE_HW_OK; | |
1787 | ||
1788 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | |
1789 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1790 | goto exit; | |
1791 | } | |
1792 | ||
1793 | vpath = vp->vpath; | |
1794 | ||
1795 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
1796 | ||
1797 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { | |
1798 | val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; | |
1799 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
1800 | } | |
1801 | exit: | |
1802 | return status; | |
1803 | } | |
1804 | ||
1805 | /** | |
1806 | * vxge_hw_vpath_mcast_enable - Enable multicast addresses. | |
1807 | * @vp: Vpath handle. | |
1808 | * | |
1809 | * Enable Titan-e multicast addresses. | |
1810 | * Returns: VXGE_HW_OK on success. | |
1811 | * | |
1812 | */ | |
1813 | enum vxge_hw_status vxge_hw_vpath_mcast_enable( | |
1814 | struct __vxge_hw_vpath_handle *vp) | |
1815 | { | |
1816 | u64 val64; | |
1817 | struct __vxge_hw_virtualpath *vpath; | |
1818 | enum vxge_hw_status status = VXGE_HW_OK; | |
1819 | ||
1820 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | |
1821 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1822 | goto exit; | |
1823 | } | |
1824 | ||
1825 | vpath = vp->vpath; | |
1826 | ||
1827 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
1828 | ||
1829 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { | |
1830 | val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | |
1831 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
1832 | } | |
1833 | exit: | |
1834 | return status; | |
1835 | } | |
1836 | ||
1837 | /** | |
1838 | * vxge_hw_vpath_mcast_disable - Disable multicast addresses. | |
1839 | * @vp: Vpath handle. | |
1840 | * | |
1841 | * Disable Titan-e multicast addresses. | |
1842 | * Returns: VXGE_HW_OK - success. | |
1843 | * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle | |
1844 | * | |
1845 | */ | |
1846 | enum vxge_hw_status | |
1847 | vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) | |
1848 | { | |
1849 | u64 val64; | |
1850 | struct __vxge_hw_virtualpath *vpath; | |
1851 | enum vxge_hw_status status = VXGE_HW_OK; | |
1852 | ||
1853 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | |
1854 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1855 | goto exit; | |
1856 | } | |
1857 | ||
1858 | vpath = vp->vpath; | |
1859 | ||
1860 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
1861 | ||
1862 | if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { | |
1863 | val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | |
1864 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
1865 | } | |
1866 | exit: | |
1867 | return status; | |
1868 | } | |
1869 | ||
1870 | /* | |
1871 | * __vxge_hw_vpath_alarm_process - Process Alarms. | |
1872 | * @vpath: Virtual Path. | |
1873 | * @skip_alarms: Do not clear the alarms | |
1874 | * | |
1875 | * Process vpath alarms. | |
1876 | * | |
1877 | */ | |
42821a5b | 1878 | static enum vxge_hw_status |
1879 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | |
1880 | u32 skip_alarms) | |
11324132 RV |
1881 | { |
1882 | u64 val64; | |
1883 | u64 alarm_status; | |
1884 | u64 pic_status; | |
1885 | struct __vxge_hw_device *hldev = NULL; | |
1886 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | |
1887 | u64 mask64; | |
1888 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | |
1889 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
1890 | ||
1891 | if (vpath == NULL) { | |
a4fe91ee | 1892 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, |
11324132 | 1893 | alarm_event); |
4e204c10 | 1894 | goto out2; |
11324132 RV |
1895 | } |
1896 | ||
1897 | hldev = vpath->hldev; | |
1898 | vp_reg = vpath->vp_reg; | |
1899 | alarm_status = readq(&vp_reg->vpath_general_int_status); | |
1900 | ||
1901 | if (alarm_status == VXGE_HW_ALL_FOXES) { | |
a4fe91ee | 1902 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, |
11324132 RV |
1903 | alarm_event); |
1904 | goto out; | |
1905 | } | |
1906 | ||
1907 | sw_stats = vpath->sw_stats; | |
1908 | ||
1909 | if (alarm_status & ~( | |
1910 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | |
1911 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | |
1912 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | |
1913 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | |
1914 | sw_stats->error_stats.unknown_alarms++; | |
1915 | ||
a4fe91ee | 1916 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, |
11324132 RV |
1917 | alarm_event); |
1918 | goto out; | |
1919 | } | |
1920 | ||
1921 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | |
1922 | ||
1923 | val64 = readq(&vp_reg->xgmac_vp_int_status); | |
1924 | ||
1925 | if (val64 & | |
1926 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | |
1927 | ||
1928 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | |
1929 | ||
1930 | if (((val64 & | |
8e95a202 JP |
1931 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && |
1932 | (!(val64 & | |
11324132 RV |
1933 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || |
1934 | ((val64 & | |
8e95a202 JP |
1935 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && |
1936 | (!(val64 & | |
11324132 | 1937 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) |
8e95a202 | 1938 | ))) { |
11324132 RV |
1939 | sw_stats->error_stats.network_sustained_fault++; |
1940 | ||
1941 | writeq( | |
1942 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | |
1943 | &vp_reg->asic_ntwk_vp_err_mask); | |
1944 | ||
1945 | __vxge_hw_device_handle_link_down_ind(hldev); | |
a4fe91ee DM |
1946 | alarm_event = VXGE_HW_SET_LEVEL( |
1947 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | |
11324132 RV |
1948 | } |
1949 | ||
1950 | if (((val64 & | |
8e95a202 JP |
1951 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && |
1952 | (!(val64 & | |
11324132 RV |
1953 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || |
1954 | ((val64 & | |
8e95a202 JP |
1955 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && |
1956 | (!(val64 & | |
11324132 | 1957 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) |
8e95a202 | 1958 | ))) { |
11324132 RV |
1959 | |
1960 | sw_stats->error_stats.network_sustained_ok++; | |
1961 | ||
1962 | writeq( | |
1963 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | |
1964 | &vp_reg->asic_ntwk_vp_err_mask); | |
1965 | ||
1966 | __vxge_hw_device_handle_link_up_ind(hldev); | |
a4fe91ee DM |
1967 | alarm_event = VXGE_HW_SET_LEVEL( |
1968 | VXGE_HW_EVENT_LINK_UP, alarm_event); | |
11324132 RV |
1969 | } |
1970 | ||
1971 | writeq(VXGE_HW_INTR_MASK_ALL, | |
1972 | &vp_reg->asic_ntwk_vp_err_reg); | |
1973 | ||
a4fe91ee DM |
1974 | alarm_event = VXGE_HW_SET_LEVEL( |
1975 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | |
11324132 RV |
1976 | |
1977 | if (skip_alarms) | |
1978 | return VXGE_HW_OK; | |
1979 | } | |
1980 | } | |
1981 | ||
1982 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | |
1983 | ||
1984 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | |
1985 | ||
1986 | if (pic_status & | |
1987 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | |
1988 | ||
1989 | val64 = readq(&vp_reg->general_errors_reg); | |
1990 | mask64 = readq(&vp_reg->general_errors_mask); | |
1991 | ||
1992 | if ((val64 & | |
1993 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | |
1994 | ~mask64) { | |
1995 | sw_stats->error_stats.ini_serr_det++; | |
1996 | ||
a4fe91ee DM |
1997 | alarm_event = VXGE_HW_SET_LEVEL( |
1998 | VXGE_HW_EVENT_SERR, alarm_event); | |
11324132 RV |
1999 | } |
2000 | ||
2001 | if ((val64 & | |
2002 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | |
2003 | ~mask64) { | |
2004 | sw_stats->error_stats.dblgen_fifo0_overflow++; | |
2005 | ||
a4fe91ee DM |
2006 | alarm_event = VXGE_HW_SET_LEVEL( |
2007 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | |
11324132 RV |
2008 | } |
2009 | ||
2010 | if ((val64 & | |
2011 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | |
2012 | ~mask64) | |
2013 | sw_stats->error_stats.statsb_pif_chain_error++; | |
2014 | ||
2015 | if ((val64 & | |
2016 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | |
2017 | ~mask64) | |
2018 | sw_stats->error_stats.statsb_drop_timeout++; | |
2019 | ||
2020 | if ((val64 & | |
2021 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | |
2022 | ~mask64) | |
2023 | sw_stats->error_stats.target_illegal_access++; | |
2024 | ||
2025 | if (!skip_alarms) { | |
2026 | writeq(VXGE_HW_INTR_MASK_ALL, | |
2027 | &vp_reg->general_errors_reg); | |
a4fe91ee DM |
2028 | alarm_event = VXGE_HW_SET_LEVEL( |
2029 | VXGE_HW_EVENT_ALARM_CLEARED, | |
11324132 RV |
2030 | alarm_event); |
2031 | } | |
2032 | } | |
2033 | ||
2034 | if (pic_status & | |
2035 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | |
2036 | ||
2037 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | |
2038 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | |
2039 | ||
2040 | if ((val64 & | |
2041 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | |
2042 | ~mask64) { | |
2043 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | |
2044 | ||
a4fe91ee DM |
2045 | alarm_event = VXGE_HW_SET_LEVEL( |
2046 | VXGE_HW_EVENT_FIFO_ERR, | |
11324132 RV |
2047 | alarm_event); |
2048 | } | |
2049 | ||
2050 | if ((val64 & | |
2051 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | |
2052 | ~mask64) { | |
2053 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | |
2054 | ||
a4fe91ee DM |
2055 | alarm_event = VXGE_HW_SET_LEVEL( |
2056 | VXGE_HW_EVENT_FIFO_ERR, | |
11324132 RV |
2057 | alarm_event); |
2058 | } | |
2059 | ||
2060 | if ((val64 & | |
2061 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | |
2062 | ~mask64) { | |
2063 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | |
2064 | ||
a4fe91ee DM |
2065 | alarm_event = VXGE_HW_SET_LEVEL( |
2066 | VXGE_HW_EVENT_FIFO_ERR, | |
11324132 RV |
2067 | alarm_event); |
2068 | } | |
2069 | ||
2070 | if (!skip_alarms) { | |
2071 | writeq(VXGE_HW_INTR_MASK_ALL, | |
2072 | &vp_reg->kdfcctl_errors_reg); | |
a4fe91ee DM |
2073 | alarm_event = VXGE_HW_SET_LEVEL( |
2074 | VXGE_HW_EVENT_ALARM_CLEARED, | |
11324132 RV |
2075 | alarm_event); |
2076 | } | |
2077 | } | |
2078 | ||
2079 | } | |
2080 | ||
2081 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | |
2082 | ||
2083 | val64 = readq(&vp_reg->wrdma_alarm_status); | |
2084 | ||
2085 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | |
2086 | ||
2087 | val64 = readq(&vp_reg->prc_alarm_reg); | |
2088 | mask64 = readq(&vp_reg->prc_alarm_mask); | |
2089 | ||
2090 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | |
2091 | ~mask64) | |
2092 | sw_stats->error_stats.prc_ring_bumps++; | |
2093 | ||
2094 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | |
2095 | ~mask64) { | |
2096 | sw_stats->error_stats.prc_rxdcm_sc_err++; | |
2097 | ||
a4fe91ee DM |
2098 | alarm_event = VXGE_HW_SET_LEVEL( |
2099 | VXGE_HW_EVENT_VPATH_ERR, | |
11324132 RV |
2100 | alarm_event); |
2101 | } | |
2102 | ||
2103 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | |
2104 | & ~mask64) { | |
2105 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | |
2106 | ||
a4fe91ee DM |
2107 | alarm_event = VXGE_HW_SET_LEVEL( |
2108 | VXGE_HW_EVENT_VPATH_ERR, | |
11324132 RV |
2109 | alarm_event); |
2110 | } | |
2111 | ||
2112 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | |
2113 | & ~mask64) { | |
2114 | sw_stats->error_stats.prc_quanta_size_err++; | |
2115 | ||
a4fe91ee DM |
2116 | alarm_event = VXGE_HW_SET_LEVEL( |
2117 | VXGE_HW_EVENT_VPATH_ERR, | |
11324132 RV |
2118 | alarm_event); |
2119 | } | |
2120 | ||
2121 | if (!skip_alarms) { | |
2122 | writeq(VXGE_HW_INTR_MASK_ALL, | |
2123 | &vp_reg->prc_alarm_reg); | |
a4fe91ee DM |
2124 | alarm_event = VXGE_HW_SET_LEVEL( |
2125 | VXGE_HW_EVENT_ALARM_CLEARED, | |
11324132 RV |
2126 | alarm_event); |
2127 | } | |
2128 | } | |
2129 | } | |
2130 | out: | |
2131 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | |
4e204c10 | 2132 | out2: |
11324132 RV |
2133 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || |
2134 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | |
2135 | return VXGE_HW_OK; | |
2136 | ||
2137 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | |
2138 | ||
2139 | if (alarm_event == VXGE_HW_EVENT_SERR) | |
2140 | return VXGE_HW_ERR_CRITICAL; | |
2141 | ||
2142 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | |
2143 | VXGE_HW_ERR_SLOT_FREEZE : | |
2144 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | |
2145 | VXGE_HW_ERR_VPATH; | |
2146 | } | |
2147 | ||
2148 | /* | |
2149 | * vxge_hw_vpath_alarm_process - Process Alarms. | |
2150 | * @vpath: Virtual Path. | |
2151 | * @skip_alarms: Do not clear the alarms | |
2152 | * | |
2153 | * Process vpath alarms. | |
2154 | * | |
2155 | */ | |
2156 | enum vxge_hw_status vxge_hw_vpath_alarm_process( | |
2157 | struct __vxge_hw_vpath_handle *vp, | |
2158 | u32 skip_alarms) | |
2159 | { | |
2160 | enum vxge_hw_status status = VXGE_HW_OK; | |
2161 | ||
2162 | if (vp == NULL) { | |
2163 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2164 | goto exit; | |
2165 | } | |
2166 | ||
2167 | status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); | |
2168 | exit: | |
2169 | return status; | |
2170 | } | |
2171 | ||
2172 | /** | |
2173 | * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and | |
2174 | * alrms | |
2175 | * @vp: Virtual Path handle. | |
2176 | * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of | |
2177 | * interrupts(Can be repeated). If fifo or ring are not enabled | |
2178 | * the MSIX vector for that should be set to 0 | |
2179 | * @alarm_msix_id: MSIX vector for alarm. | |
2180 | * | |
2181 | * This API will associate a given MSIX vector numbers with the four TIM | |
2182 | * interrupts and alarm interrupt. | |
2183 | */ | |
b59c9457 | 2184 | void |
11324132 RV |
2185 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, |
2186 | int alarm_msix_id) | |
2187 | { | |
2188 | u64 val64; | |
2189 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | |
2190 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | |
b59c9457 | 2191 | u32 vp_id = vp->vpath->vp_id; |
11324132 RV |
2192 | |
2193 | val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( | |
b59c9457 | 2194 | (vp_id * 4) + tim_msix_id[0]) | |
11324132 | 2195 | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( |
b59c9457 | 2196 | (vp_id * 4) + tim_msix_id[1]); |
11324132 RV |
2197 | |
2198 | writeq(val64, &vp_reg->interrupt_cfg0); | |
2199 | ||
2200 | writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( | |
b59c9457 | 2201 | (vpath->hldev->first_vp_id * 4) + alarm_msix_id), |
11324132 RV |
2202 | &vp_reg->interrupt_cfg2); |
2203 | ||
2204 | if (vpath->hldev->config.intr_mode == | |
2205 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | |
2206 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | |
2207 | VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, | |
2208 | 0, 32), &vp_reg->one_shot_vect1_en); | |
2209 | } | |
2210 | ||
2211 | if (vpath->hldev->config.intr_mode == | |
2212 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | |
2213 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | |
2214 | VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, | |
2215 | 0, 32), &vp_reg->one_shot_vect2_en); | |
2216 | ||
2217 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | |
2218 | VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, | |
2219 | 0, 32), &vp_reg->one_shot_vect3_en); | |
2220 | } | |
11324132 RV |
2221 | } |
2222 | ||
2223 | /** | |
2224 | * vxge_hw_vpath_msix_mask - Mask MSIX Vector. | |
2225 | * @vp: Virtual Path handle. | |
2226 | * @msix_id: MSIX ID | |
2227 | * | |
2228 | * The function masks the msix interrupt for the given msix_id | |
2229 | * | |
2230 | * Returns: 0, | |
2231 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | |
2232 | * status. | |
2233 | * See also: | |
2234 | */ | |
2235 | void | |
2236 | vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |
2237 | { | |
2238 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2239 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 2240 | (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 2241 | &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); |
11324132 RV |
2242 | } |
2243 | ||
11324132 RV |
2244 | /** |
2245 | * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. | |
2246 | * @vp: Virtual Path handle. | |
2247 | * @msix_id: MSI ID | |
2248 | * | |
2249 | * The function unmasks the msix interrupt for the given msix_id | |
2250 | * | |
2251 | * Returns: 0, | |
2252 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | |
2253 | * status. | |
2254 | * See also: | |
2255 | */ | |
2256 | void | |
2257 | vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |
2258 | { | |
2259 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2260 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 2261 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 2262 | &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); |
11324132 RV |
2263 | } |
2264 | ||
11324132 RV |
2265 | /** |
2266 | * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. | |
2267 | * @vp: Virtual Path handle. | |
2268 | * | |
2269 | * Mask Tx and Rx vpath interrupts. | |
2270 | * | |
2271 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | |
2272 | */ | |
2273 | void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |
2274 | { | |
2275 | u64 tim_int_mask0[4] = {[0 ...3] = 0}; | |
2276 | u32 tim_int_mask1[4] = {[0 ...3] = 0}; | |
2277 | u64 val64; | |
2278 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2279 | ||
2280 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | |
2281 | tim_int_mask1, vp->vpath->vp_id); | |
2282 | ||
2283 | val64 = readq(&hldev->common_reg->tim_int_mask0); | |
2284 | ||
2285 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2286 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2287 | writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
2288 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), | |
2289 | &hldev->common_reg->tim_int_mask0); | |
2290 | } | |
2291 | ||
2292 | val64 = readl(&hldev->common_reg->tim_int_mask1); | |
2293 | ||
2294 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2295 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2296 | __vxge_hw_pio_mem_write32_upper( | |
2297 | (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
2298 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), | |
2299 | &hldev->common_reg->tim_int_mask1); | |
2300 | } | |
11324132 RV |
2301 | } |
2302 | ||
2303 | /** | |
2304 | * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. | |
2305 | * @vp: Virtual Path handle. | |
2306 | * | |
2307 | * Unmask Tx and Rx vpath interrupts. | |
2308 | * | |
2309 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | |
2310 | */ | |
2311 | void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |
2312 | { | |
2313 | u64 tim_int_mask0[4] = {[0 ...3] = 0}; | |
2314 | u32 tim_int_mask1[4] = {[0 ...3] = 0}; | |
2315 | u64 val64; | |
2316 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2317 | ||
2318 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | |
2319 | tim_int_mask1, vp->vpath->vp_id); | |
2320 | ||
2321 | val64 = readq(&hldev->common_reg->tim_int_mask0); | |
2322 | ||
2323 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2324 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2325 | writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
2326 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, | |
2327 | &hldev->common_reg->tim_int_mask0); | |
2328 | } | |
2329 | ||
2330 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2331 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2332 | __vxge_hw_pio_mem_write32_upper( | |
2333 | (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
2334 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, | |
2335 | &hldev->common_reg->tim_int_mask1); | |
2336 | } | |
11324132 RV |
2337 | } |
2338 | ||
2339 | /** | |
2340 | * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed | |
2341 | * descriptors and process the same. | |
2342 | * @ring: Handle to the ring object used for receive | |
2343 | * | |
2344 | * The function polls the Rx for the completed descriptors and calls | |
2345 | * the driver via supplied completion callback. | |
2346 | * | |
2347 | * Returns: VXGE_HW_OK, if the polling is completed successful. | |
2348 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | |
2349 | * descriptors available which are yet to be processed. | |
2350 | * | |
2351 | * See also: vxge_hw_vpath_poll_rx() | |
2352 | */ | |
2353 | enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) | |
2354 | { | |
2355 | u8 t_code; | |
2356 | enum vxge_hw_status status = VXGE_HW_OK; | |
2357 | void *first_rxdh; | |
2358 | u64 val64 = 0; | |
2359 | int new_count = 0; | |
2360 | ||
2361 | ring->cmpl_cnt = 0; | |
2362 | ||
2363 | status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); | |
2364 | if (status == VXGE_HW_OK) | |
2365 | ring->callback(ring, first_rxdh, | |
2366 | t_code, ring->channel.userdata); | |
2367 | ||
2368 | if (ring->cmpl_cnt != 0) { | |
2369 | ring->doorbell_cnt += ring->cmpl_cnt; | |
2370 | if (ring->doorbell_cnt >= ring->rxds_limit) { | |
2371 | /* | |
2372 | * Each RxD is of 4 qwords, update the number of | |
2373 | * qwords replenished | |
2374 | */ | |
2375 | new_count = (ring->doorbell_cnt * 4); | |
2376 | ||
2377 | /* For each block add 4 more qwords */ | |
2378 | ring->total_db_cnt += ring->doorbell_cnt; | |
2379 | if (ring->total_db_cnt >= ring->rxds_per_block) { | |
2380 | new_count += 4; | |
2381 | /* Reset total count */ | |
2382 | ring->total_db_cnt %= ring->rxds_per_block; | |
2383 | } | |
2384 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), | |
2385 | &ring->vp_reg->prc_rxd_doorbell); | |
2386 | val64 = | |
2387 | readl(&ring->common_reg->titan_general_int_status); | |
2388 | ring->doorbell_cnt = 0; | |
2389 | } | |
2390 | } | |
2391 | ||
2392 | return status; | |
2393 | } | |
2394 | ||
2395 | /** | |
2396 | * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process | |
2397 | * the same. | |
2398 | * @fifo: Handle to the fifo object used for non offload send | |
2399 | * | |
98f45da2 | 2400 | * The function polls the Tx for the completed descriptors and calls |
11324132 RV |
2401 | * the driver via supplied completion callback. |
2402 | * | |
2403 | * Returns: VXGE_HW_OK, if the polling is completed successful. | |
2404 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | |
2405 | * descriptors available which are yet to be processed. | |
11324132 RV |
2406 | */ |
2407 | enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, | |
ff67df55 BL |
2408 | struct sk_buff ***skb_ptr, int nr_skb, |
2409 | int *more) | |
11324132 RV |
2410 | { |
2411 | enum vxge_hw_fifo_tcode t_code; | |
2412 | void *first_txdlh; | |
2413 | enum vxge_hw_status status = VXGE_HW_OK; | |
2414 | struct __vxge_hw_channel *channel; | |
2415 | ||
2416 | channel = &fifo->channel; | |
2417 | ||
2418 | status = vxge_hw_fifo_txdl_next_completed(fifo, | |
2419 | &first_txdlh, &t_code); | |
2420 | if (status == VXGE_HW_OK) | |
ff67df55 BL |
2421 | if (fifo->callback(fifo, first_txdlh, t_code, |
2422 | channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK) | |
11324132 RV |
2423 | status = VXGE_HW_COMPLETIONS_REMAIN; |
2424 | ||
2425 | return status; | |
2426 | } |