]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/vxge/vxge-traffic.c
vxge: Allow multiple functions with INTA.
[net-next-2.6.git] / drivers / net / vxge / vxge-traffic.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2009 Neterion Inc.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
19
20 /*
21  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22  * @vp: Virtual Path handle.
23  *
24  * Enable vpath interrupts. The function is to be executed the last in
25  * vpath initialization sequence.
26  *
27  * See also: vxge_hw_vpath_intr_disable()
28  */
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30 {
31         u64 val64;
32
33         struct __vxge_hw_virtualpath *vpath;
34         struct vxge_hw_vpath_reg __iomem *vp_reg;
35         enum vxge_hw_status status = VXGE_HW_OK;
36         if (vp == NULL) {
37                 status = VXGE_HW_ERR_INVALID_HANDLE;
38                 goto exit;
39         }
40
41         vpath = vp->vpath;
42
43         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45                 goto exit;
46         }
47
48         vp_reg = vpath->vp_reg;
49
50         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53                         &vp_reg->general_errors_reg);
54
55         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56                         &vp_reg->pci_config_errors_reg);
57
58         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59                         &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62                         &vp_reg->srpcim_to_vpath_alarm_reg);
63
64         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65                         &vp_reg->vpath_ppif_int_status);
66
67         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68                         &vp_reg->srpcim_msg_to_vpath_reg);
69
70         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71                         &vp_reg->vpath_pcipif_int_status);
72
73         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74                         &vp_reg->prc_alarm_reg);
75
76         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77                         &vp_reg->wrdma_alarm_status);
78
79         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80                         &vp_reg->asic_ntwk_vp_err_reg);
81
82         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83                         &vp_reg->xgmac_vp_int_status);
84
85         val64 = readq(&vp_reg->vpath_general_int_status);
86
87         /* Mask unwanted interrupts */
88
89         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90                         &vp_reg->vpath_pcipif_int_mask);
91
92         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93                         &vp_reg->srpcim_msg_to_vpath_mask);
94
95         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96                         &vp_reg->srpcim_to_vpath_alarm_mask);
97
98         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99                         &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102                         &vp_reg->pci_config_errors_mask);
103
104         /* Unmask the individual interrupts */
105
106         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110                 &vp_reg->general_errors_mask);
111
112         __vxge_hw_pio_mem_write32_upper(
113                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119                 &vp_reg->kdfcctl_errors_mask);
120
121         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123         __vxge_hw_pio_mem_write32_upper(
124                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125                 &vp_reg->prc_alarm_mask);
126
127         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130         if (vpath->hldev->first_vp_id != vpath->vp_id)
131                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132                         &vp_reg->asic_ntwk_vp_err_mask);
133         else
134                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137                 &vp_reg->asic_ntwk_vp_err_mask);
138
139         __vxge_hw_pio_mem_write32_upper(0,
140                 &vp_reg->vpath_general_int_mask);
141 exit:
142         return status;
143
144 }
145
146 /*
147  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148  * @vp: Virtual Path handle.
149  *
150  * Disable vpath interrupts. The function is to be executed the last in
151  * vpath initialization sequence.
152  *
153  * See also: vxge_hw_vpath_intr_enable()
154  */
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156                         struct __vxge_hw_vpath_handle *vp)
157 {
158         u64 val64;
159
160         struct __vxge_hw_virtualpath *vpath;
161         enum vxge_hw_status status = VXGE_HW_OK;
162         struct vxge_hw_vpath_reg __iomem *vp_reg;
163         if (vp == NULL) {
164                 status = VXGE_HW_ERR_INVALID_HANDLE;
165                 goto exit;
166         }
167
168         vpath = vp->vpath;
169
170         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172                 goto exit;
173         }
174         vp_reg = vpath->vp_reg;
175
176         __vxge_hw_pio_mem_write32_upper(
177                 (u32)VXGE_HW_INTR_MASK_ALL,
178                 &vp_reg->vpath_general_int_mask);
179
180         val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185                         &vp_reg->general_errors_mask);
186
187         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188                         &vp_reg->pci_config_errors_mask);
189
190         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191                         &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194                         &vp_reg->srpcim_to_vpath_alarm_mask);
195
196         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197                         &vp_reg->vpath_ppif_int_mask);
198
199         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200                         &vp_reg->srpcim_msg_to_vpath_mask);
201
202         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203                         &vp_reg->vpath_pcipif_int_mask);
204
205         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206                         &vp_reg->wrdma_alarm_mask);
207
208         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209                         &vp_reg->prc_alarm_mask);
210
211         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212                         &vp_reg->xgmac_vp_int_mask);
213
214         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215                         &vp_reg->asic_ntwk_vp_err_mask);
216
217 exit:
218         return status;
219 }
220
221 /**
222  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223  * @channeh: Channel for rx or tx handle
224  * @msix_id:  MSIX ID
225  *
226  * The function masks the msix interrupt for the given msix_id
227  *
228  * Returns: 0
229  */
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231 {
232
233         __vxge_hw_pio_mem_write32_upper(
234                 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
235                         0, 32),
236                 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238         return;
239 }
240
241 /**
242  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243  * @channeh: Channel for rx or tx handle
244  * @msix_id:  MSI ID
245  *
246  * The function unmasks the msix interrupt for the given msix_id
247  *
248  * Returns: 0
249  */
250 void
251 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252 {
253
254         __vxge_hw_pio_mem_write32_upper(
255                 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
256                         0, 32),
257                 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259         return;
260 }
261
262 /**
263  * vxge_hw_device_set_intr_type - Updates the configuration
264  *              with new interrupt type.
265  * @hldev: HW device handle.
266  * @intr_mode: New interrupt type
267  */
268 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269 {
270
271         if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272            (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273            (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274            (intr_mode != VXGE_HW_INTR_MODE_DEF))
275                 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
276
277         hldev->config.intr_mode = intr_mode;
278         return intr_mode;
279 }
280
281 /**
282  * vxge_hw_device_intr_enable - Enable interrupts.
283  * @hldev: HW device handle.
284  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285  *      the type(s) of interrupts to enable.
286  *
287  * Enable Titan interrupts. The function is to be executed the last in
288  * Titan initialization sequence.
289  *
290  * See also: vxge_hw_device_intr_disable()
291  */
292 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
293 {
294         u32 i;
295         u64 val64;
296         u32 val32;
297
298         vxge_hw_device_mask_all(hldev);
299
300         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
301
302                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
303                         continue;
304
305                 vxge_hw_vpath_intr_enable(
306                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
307         }
308
309         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
310                 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
311                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
312
313                 if (val64 != 0) {
314                         writeq(val64, &hldev->common_reg->tim_int_status0);
315
316                         writeq(~val64, &hldev->common_reg->tim_int_mask0);
317                 }
318
319                 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
320                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
321
322                 if (val32 != 0) {
323                         __vxge_hw_pio_mem_write32_upper(val32,
324                                         &hldev->common_reg->tim_int_status1);
325
326                         __vxge_hw_pio_mem_write32_upper(~val32,
327                                         &hldev->common_reg->tim_int_mask1);
328                 }
329         }
330
331         val64 = readq(&hldev->common_reg->titan_general_int_status);
332
333         vxge_hw_device_unmask_all(hldev);
334
335         return;
336 }
337
338 /**
339  * vxge_hw_device_intr_disable - Disable Titan interrupts.
340  * @hldev: HW device handle.
341  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
342  *      the type(s) of interrupts to disable.
343  *
344  * Disable Titan interrupts.
345  *
346  * See also: vxge_hw_device_intr_enable()
347  */
348 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
349 {
350         u32 i;
351
352         vxge_hw_device_mask_all(hldev);
353
354         /* mask all the tim interrupts */
355         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
356         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
357                 &hldev->common_reg->tim_int_mask1);
358
359         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
360
361                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
362                         continue;
363
364                 vxge_hw_vpath_intr_disable(
365                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
366         }
367
368         return;
369 }
370
371 /**
372  * vxge_hw_device_mask_all - Mask all device interrupts.
373  * @hldev: HW device handle.
374  *
375  * Mask all device interrupts.
376  *
377  * See also: vxge_hw_device_unmask_all()
378  */
379 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
380 {
381         u64 val64;
382
383         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
384                 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
385
386         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387                                 &hldev->common_reg->titan_mask_all_int);
388
389         return;
390 }
391
392 /**
393  * vxge_hw_device_unmask_all - Unmask all device interrupts.
394  * @hldev: HW device handle.
395  *
396  * Unmask all device interrupts.
397  *
398  * See also: vxge_hw_device_mask_all()
399  */
400 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
401 {
402         u64 val64 = 0;
403
404         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
405                 val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
406
407         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408                         &hldev->common_reg->titan_mask_all_int);
409
410         return;
411 }
412
413 /**
414  * vxge_hw_device_flush_io - Flush io writes.
415  * @hldev: HW device handle.
416  *
417  * The function performs a read operation to flush io writes.
418  *
419  * Returns: void
420  */
421 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
422 {
423         u32 val32;
424
425         val32 = readl(&hldev->common_reg->titan_general_int_status);
426 }
427
428 /**
429  * vxge_hw_device_begin_irq - Begin IRQ processing.
430  * @hldev: HW device handle.
431  * @skip_alarms: Do not clear the alarms
432  * @reason: "Reason" for the interrupt, the value of Titan's
433  *      general_int_status register.
434  *
435  * The function performs two actions, It first checks whether (shared IRQ) the
436  * interrupt was raised by the device. Next, it masks the device interrupts.
437  *
438  * Note:
439  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
440  * bridge. Therefore, two back-to-back interrupts are potentially possible.
441  *
442  * Returns: 0, if the interrupt is not "ours" (note that in this case the
443  * device remain enabled).
444  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
445  * status.
446  */
447 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
448                                              u32 skip_alarms, u64 *reason)
449 {
450         u32 i;
451         u64 val64;
452         u64 adapter_status;
453         u64 vpath_mask;
454         enum vxge_hw_status ret = VXGE_HW_OK;
455
456         val64 = readq(&hldev->common_reg->titan_general_int_status);
457
458         if (unlikely(!val64)) {
459                 /* not Titan interrupt  */
460                 *reason = 0;
461                 ret = VXGE_HW_ERR_WRONG_IRQ;
462                 goto exit;
463         }
464
465         if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
466
467                 adapter_status = readq(&hldev->common_reg->adapter_status);
468
469                 if (adapter_status == VXGE_HW_ALL_FOXES) {
470
471                         __vxge_hw_device_handle_error(hldev,
472                                 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
473                         *reason = 0;
474                         ret = VXGE_HW_ERR_SLOT_FREEZE;
475                         goto exit;
476                 }
477         }
478
479         hldev->stats.sw_dev_info_stats.total_intr_cnt++;
480
481         *reason = val64;
482
483         vpath_mask = hldev->vpaths_deployed >>
484                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
485
486         if (val64 &
487             VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
488                 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
489
490                 return VXGE_HW_OK;
491         }
492
493         hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
494
495         if (unlikely(val64 &
496                         VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
497
498                 enum vxge_hw_status error_level = VXGE_HW_OK;
499
500                 hldev->stats.sw_dev_err_stats.vpath_alarms++;
501
502                 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
503
504                         if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
505                                 continue;
506
507                         ret = __vxge_hw_vpath_alarm_process(
508                                 &hldev->virtual_paths[i], skip_alarms);
509
510                         error_level = VXGE_HW_SET_LEVEL(ret, error_level);
511
512                         if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
513                                 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
514                                 break;
515                 }
516
517                 ret = error_level;
518         }
519 exit:
520         return ret;
521 }
522
523 /*
524  * __vxge_hw_device_handle_link_up_ind
525  * @hldev: HW device handle.
526  *
527  * Link up indication handler. The function is invoked by HW when
528  * Titan indicates that the link is up for programmable amount of time.
529  */
530 enum vxge_hw_status
531 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
532 {
533         /*
534          * If the previous link state is not down, return.
535          */
536         if (hldev->link_state == VXGE_HW_LINK_UP)
537                 goto exit;
538
539         hldev->link_state = VXGE_HW_LINK_UP;
540
541         /* notify driver */
542         if (hldev->uld_callbacks.link_up)
543                 hldev->uld_callbacks.link_up(hldev);
544 exit:
545         return VXGE_HW_OK;
546 }
547
548 /*
549  * __vxge_hw_device_handle_link_down_ind
550  * @hldev: HW device handle.
551  *
552  * Link down indication handler. The function is invoked by HW when
553  * Titan indicates that the link is down.
554  */
555 enum vxge_hw_status
556 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
557 {
558         /*
559          * If the previous link state is not down, return.
560          */
561         if (hldev->link_state == VXGE_HW_LINK_DOWN)
562                 goto exit;
563
564         hldev->link_state = VXGE_HW_LINK_DOWN;
565
566         /* notify driver */
567         if (hldev->uld_callbacks.link_down)
568                 hldev->uld_callbacks.link_down(hldev);
569 exit:
570         return VXGE_HW_OK;
571 }
572
573 /**
574  * __vxge_hw_device_handle_error - Handle error
575  * @hldev: HW device
576  * @vp_id: Vpath Id
577  * @type: Error type. Please see enum vxge_hw_event{}
578  *
579  * Handle error.
580  */
581 enum vxge_hw_status
582 __vxge_hw_device_handle_error(
583                 struct __vxge_hw_device *hldev,
584                 u32 vp_id,
585                 enum vxge_hw_event type)
586 {
587         switch (type) {
588         case VXGE_HW_EVENT_UNKNOWN:
589                 break;
590         case VXGE_HW_EVENT_RESET_START:
591         case VXGE_HW_EVENT_RESET_COMPLETE:
592         case VXGE_HW_EVENT_LINK_DOWN:
593         case VXGE_HW_EVENT_LINK_UP:
594                 goto out;
595         case VXGE_HW_EVENT_ALARM_CLEARED:
596                 goto out;
597         case VXGE_HW_EVENT_ECCERR:
598         case VXGE_HW_EVENT_MRPCIM_ECCERR:
599                 goto out;
600         case VXGE_HW_EVENT_FIFO_ERR:
601         case VXGE_HW_EVENT_VPATH_ERR:
602         case VXGE_HW_EVENT_CRITICAL_ERR:
603         case VXGE_HW_EVENT_SERR:
604                 break;
605         case VXGE_HW_EVENT_SRPCIM_SERR:
606         case VXGE_HW_EVENT_MRPCIM_SERR:
607                 goto out;
608         case VXGE_HW_EVENT_SLOT_FREEZE:
609                 break;
610         default:
611                 vxge_assert(0);
612                 goto out;
613         }
614
615         /* notify driver */
616         if (hldev->uld_callbacks.crit_err)
617                 hldev->uld_callbacks.crit_err(
618                         (struct __vxge_hw_device *)hldev,
619                         type, vp_id);
620 out:
621
622         return VXGE_HW_OK;
623 }
624
625 /**
626  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
627  * condition that has caused the Tx and RX interrupt.
628  * @hldev: HW device.
629  *
630  * Acknowledge (that is, clear) the condition that has caused
631  * the Tx and Rx interrupt.
632  * See also: vxge_hw_device_begin_irq(),
633  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
634  */
635 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
636 {
637
638         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
639            (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
640                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
641                                  hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
642                                 &hldev->common_reg->tim_int_status0);
643         }
644
645         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
646            (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
647                 __vxge_hw_pio_mem_write32_upper(
648                                 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
649                                  hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650                                 &hldev->common_reg->tim_int_status1);
651         }
652
653         return;
654 }
655
656 /*
657  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
658  * @channel: Channel
659  * @dtrh: Buffer to return the DTR pointer
660  *
661  * Allocates a dtr from the reserve array. If the reserve array is empty,
662  * it swaps the reserve and free arrays.
663  *
664  */
665 enum vxge_hw_status
666 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
667 {
668         void **tmp_arr;
669
670         if (channel->reserve_ptr - channel->reserve_top > 0) {
671 _alloc_after_swap:
672                 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
673
674                 return VXGE_HW_OK;
675         }
676
677         /* switch between empty and full arrays */
678
679         /* the idea behind such a design is that by having free and reserved
680          * arrays separated we basically separated irq and non-irq parts.
681          * i.e. no additional lock need to be done when we free a resource */
682
683         if (channel->length - channel->free_ptr > 0) {
684
685                 tmp_arr = channel->reserve_arr;
686                 channel->reserve_arr = channel->free_arr;
687                 channel->free_arr = tmp_arr;
688                 channel->reserve_ptr = channel->length;
689                 channel->reserve_top = channel->free_ptr;
690                 channel->free_ptr = channel->length;
691
692                 channel->stats->reserve_free_swaps_cnt++;
693
694                 goto _alloc_after_swap;
695         }
696
697         channel->stats->full_cnt++;
698
699         *dtrh = NULL;
700         return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
701 }
702
703 /*
704  * vxge_hw_channel_dtr_post - Post a dtr to the channel
705  * @channelh: Channel
706  * @dtrh: DTR pointer
707  *
708  * Posts a dtr to work array.
709  *
710  */
711 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
712 {
713         vxge_assert(channel->work_arr[channel->post_index] == NULL);
714
715         channel->work_arr[channel->post_index++] = dtrh;
716
717         /* wrap-around */
718         if (channel->post_index == channel->length)
719                 channel->post_index = 0;
720 }
721
722 /*
723  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
724  * @channel: Channel
725  * @dtr: Buffer to return the next completed DTR pointer
726  *
727  * Returns the next completed dtr with out removing it from work array
728  *
729  */
730 void
731 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
732 {
733         vxge_assert(channel->compl_index < channel->length);
734
735         *dtrh = channel->work_arr[channel->compl_index];
736         prefetch(*dtrh);
737 }
738
739 /*
740  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
741  * @channel: Channel handle
742  *
743  * Removes the next completed dtr from work array
744  *
745  */
746 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
747 {
748         channel->work_arr[channel->compl_index] = NULL;
749
750         /* wrap-around */
751         if (++channel->compl_index == channel->length)
752                 channel->compl_index = 0;
753
754         channel->stats->total_compl_cnt++;
755 }
756
757 /*
758  * vxge_hw_channel_dtr_free - Frees a dtr
759  * @channel: Channel handle
760  * @dtr:  DTR pointer
761  *
762  * Returns the dtr to free array
763  *
764  */
765 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
766 {
767         channel->free_arr[--channel->free_ptr] = dtrh;
768 }
769
770 /*
771  * vxge_hw_channel_dtr_count
772  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
773  *
774  * Retreive number of DTRs available. This function can not be called
775  * from data path. ring_initial_replenishi() is the only user.
776  */
777 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
778 {
779         return (channel->reserve_ptr - channel->reserve_top) +
780                 (channel->length - channel->free_ptr);
781 }
782
783 /**
784  * vxge_hw_ring_rxd_reserve     - Reserve ring descriptor.
785  * @ring: Handle to the ring object used for receive
786  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
787  * with a valid handle.
788  *
789  * Reserve Rx descriptor for the subsequent filling-in driver
790  * and posting on the corresponding channel (@channelh)
791  * via vxge_hw_ring_rxd_post().
792  *
793  * Returns: VXGE_HW_OK - success.
794  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
795  *
796  */
797 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
798         void **rxdh)
799 {
800         enum vxge_hw_status status;
801         struct __vxge_hw_channel *channel;
802
803         channel = &ring->channel;
804
805         status = vxge_hw_channel_dtr_alloc(channel, rxdh);
806
807         if (status == VXGE_HW_OK) {
808                 struct vxge_hw_ring_rxd_1 *rxdp =
809                         (struct vxge_hw_ring_rxd_1 *)*rxdh;
810
811                 rxdp->control_0 = rxdp->control_1 = 0;
812         }
813
814         return status;
815 }
816
817 /**
818  * vxge_hw_ring_rxd_free - Free descriptor.
819  * @ring: Handle to the ring object used for receive
820  * @rxdh: Descriptor handle.
821  *
822  * Free the reserved descriptor. This operation is "symmetrical" to
823  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
824  * lifecycle.
825  *
826  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
827  * be:
828  *
829  * - reserved (vxge_hw_ring_rxd_reserve);
830  *
831  * - posted     (vxge_hw_ring_rxd_post);
832  *
833  * - completed (vxge_hw_ring_rxd_next_completed);
834  *
835  * - and recycled again (vxge_hw_ring_rxd_free).
836  *
837  * For alternative state transitions and more details please refer to
838  * the design doc.
839  *
840  */
841 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
842 {
843         struct __vxge_hw_channel *channel;
844
845         channel = &ring->channel;
846
847         vxge_hw_channel_dtr_free(channel, rxdh);
848
849 }
850
851 /**
852  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
853  * @ring: Handle to the ring object used for receive
854  * @rxdh: Descriptor handle.
855  *
856  * This routine prepares a rxd and posts
857  */
858 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
859 {
860         struct __vxge_hw_channel *channel;
861
862         channel = &ring->channel;
863
864         vxge_hw_channel_dtr_post(channel, rxdh);
865 }
866
867 /**
868  * vxge_hw_ring_rxd_post_post - Process rxd after post.
869  * @ring: Handle to the ring object used for receive
870  * @rxdh: Descriptor handle.
871  *
872  * Processes rxd after post
873  */
874 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
875 {
876         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
877         struct __vxge_hw_channel *channel;
878
879         channel = &ring->channel;
880
881         rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
882
883         if (ring->stats->common_stats.usage_cnt > 0)
884                 ring->stats->common_stats.usage_cnt--;
885 }
886
887 /**
888  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
889  * @ring: Handle to the ring object used for receive
890  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
891  *
892  * Post descriptor on the ring.
893  * Prior to posting the descriptor should be filled in accordance with
894  * Host/Titan interface specification for a given service (LL, etc.).
895  *
896  */
897 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
898 {
899         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
900         struct __vxge_hw_channel *channel;
901
902         channel = &ring->channel;
903
904         wmb();
905         rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
906
907         vxge_hw_channel_dtr_post(channel, rxdh);
908
909         if (ring->stats->common_stats.usage_cnt > 0)
910                 ring->stats->common_stats.usage_cnt--;
911 }
912
913 /**
914  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
915  * @ring: Handle to the ring object used for receive
916  * @rxdh: Descriptor handle.
917  *
918  * Processes rxd after post with memory barrier.
919  */
920 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
921 {
922         struct __vxge_hw_channel *channel;
923
924         channel = &ring->channel;
925
926         wmb();
927         vxge_hw_ring_rxd_post_post(ring, rxdh);
928 }
929
930 /**
931  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
932  * @ring: Handle to the ring object used for receive
933  * @rxdh: Descriptor handle. Returned by HW.
934  * @t_code:     Transfer code, as per Titan User Guide,
935  *       Receive Descriptor Format. Returned by HW.
936  *
937  * Retrieve the _next_ completed descriptor.
938  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
939  * driver of new completed descriptors. After that
940  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
941  * completions (the very first completion is passed by HW via
942  * vxge_hw_ring_callback_f).
943  *
944  * Implementation-wise, the driver is free to call
945  * vxge_hw_ring_rxd_next_completed either immediately from inside the
946  * ring callback, or in a deferred fashion and separate (from HW)
947  * context.
948  *
949  * Non-zero @t_code means failure to fill-in receive buffer(s)
950  * of the descriptor.
951  * For instance, parity error detected during the data transfer.
952  * In this case Titan will complete the descriptor and indicate
953  * for the host that the received data is not to be used.
954  * For details please refer to Titan User Guide.
955  *
956  * Returns: VXGE_HW_OK - success.
957  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
958  * are currently available for processing.
959  *
960  * See also: vxge_hw_ring_callback_f{},
961  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
962  */
963 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
964         struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
965 {
966         struct __vxge_hw_channel *channel;
967         struct vxge_hw_ring_rxd_1 *rxdp;
968         enum vxge_hw_status status = VXGE_HW_OK;
969
970         channel = &ring->channel;
971
972         vxge_hw_channel_dtr_try_complete(channel, rxdh);
973
974         rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
975         if (rxdp == NULL) {
976                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
977                 goto exit;
978         }
979
980         /* check whether it is not the end */
981         if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
982
983                 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
984                                 0);
985
986                 ++ring->cmpl_cnt;
987                 vxge_hw_channel_dtr_complete(channel);
988
989                 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
990
991                 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
992
993                 ring->stats->common_stats.usage_cnt++;
994                 if (ring->stats->common_stats.usage_max <
995                                 ring->stats->common_stats.usage_cnt)
996                         ring->stats->common_stats.usage_max =
997                                 ring->stats->common_stats.usage_cnt;
998
999                 status = VXGE_HW_OK;
1000                 goto exit;
1001         }
1002
1003         /* reset it. since we don't want to return
1004          * garbage to the driver */
1005         *rxdh = NULL;
1006         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1007 exit:
1008         return status;
1009 }
1010
1011 /**
1012  * vxge_hw_ring_handle_tcode - Handle transfer code.
1013  * @ring: Handle to the ring object used for receive
1014  * @rxdh: Descriptor handle.
1015  * @t_code: One of the enumerated (and documented in the Titan user guide)
1016  * "transfer codes".
1017  *
1018  * Handle descriptor's transfer code. The latter comes with each completed
1019  * descriptor.
1020  *
1021  * Returns: one of the enum vxge_hw_status{} enumerated types.
1022  * VXGE_HW_OK                   - for success.
1023  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1024  */
1025 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1026         struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1027 {
1028         struct __vxge_hw_channel *channel;
1029         enum vxge_hw_status status = VXGE_HW_OK;
1030
1031         channel = &ring->channel;
1032
1033         /* If the t_code is not supported and if the
1034          * t_code is other than 0x5 (unparseable packet
1035          * such as unknown UPV6 header), Drop it !!!
1036          */
1037
1038         if (t_code == 0 || t_code == 5) {
1039                 status = VXGE_HW_OK;
1040                 goto exit;
1041         }
1042
1043         if (t_code > 0xF) {
1044                 status = VXGE_HW_ERR_INVALID_TCODE;
1045                 goto exit;
1046         }
1047
1048         ring->stats->rxd_t_code_err_cnt[t_code]++;
1049 exit:
1050         return status;
1051 }
1052
1053 /**
1054  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1055  *
1056  * @fifo: fifohandle
1057  * @txdl_ptr: The starting location of the TxDL in host memory
1058  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1059  * @no_snoop: No snoop flags
1060  *
1061  * This function posts a non-offload doorbell to doorbell FIFO
1062  *
1063  */
1064 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1065         u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1066 {
1067         struct __vxge_hw_channel *channel;
1068
1069         channel = &fifo->channel;
1070
1071         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1072                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1073                 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1074                 &fifo->nofl_db->control_0);
1075
1076         mmiowb();
1077
1078         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1079
1080         mmiowb();
1081 }
1082
1083 /**
1084  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1085  * the fifo
1086  * @fifoh: Handle to the fifo object used for non offload send
1087  */
1088 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1089 {
1090         return vxge_hw_channel_dtr_count(&fifoh->channel);
1091 }
1092
1093 /**
1094  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1095  * @fifoh: Handle to the fifo object used for non offload send
1096  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1097  *        with a valid handle.
1098  * @txdl_priv: Buffer to return the pointer to per txdl space
1099  *
1100  * Reserve a single TxDL (that is, fifo descriptor)
1101  * for the subsequent filling-in by driver)
1102  * and posting on the corresponding channel (@channelh)
1103  * via vxge_hw_fifo_txdl_post().
1104  *
1105  * Note: it is the responsibility of driver to reserve multiple descriptors
1106  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1107  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1108  *
1109  * Returns: VXGE_HW_OK - success;
1110  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1111  *
1112  */
1113 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1114         struct __vxge_hw_fifo *fifo,
1115         void **txdlh, void **txdl_priv)
1116 {
1117         struct __vxge_hw_channel *channel;
1118         enum vxge_hw_status status;
1119         int i;
1120
1121         channel = &fifo->channel;
1122
1123         status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1124
1125         if (status == VXGE_HW_OK) {
1126                 struct vxge_hw_fifo_txd *txdp =
1127                         (struct vxge_hw_fifo_txd *)*txdlh;
1128                 struct __vxge_hw_fifo_txdl_priv *priv;
1129
1130                 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1131
1132                 /* reset the TxDL's private */
1133                 priv->align_dma_offset = 0;
1134                 priv->align_vaddr_start = priv->align_vaddr;
1135                 priv->align_used_frags = 0;
1136                 priv->frags = 0;
1137                 priv->alloc_frags = fifo->config->max_frags;
1138                 priv->next_txdl_priv = NULL;
1139
1140                 *txdl_priv = (void *)(size_t)txdp->host_control;
1141
1142                 for (i = 0; i < fifo->config->max_frags; i++) {
1143                         txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1144                         txdp->control_0 = txdp->control_1 = 0;
1145                 }
1146         }
1147
1148         return status;
1149 }
1150
1151 /**
1152  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1153  * descriptor.
1154  * @fifo: Handle to the fifo object used for non offload send
1155  * @txdlh: Descriptor handle.
1156  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1157  *            (of buffers).
1158  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1159  * @size: Size of the data buffer (in bytes).
1160  *
1161  * This API is part of the preparation of the transmit descriptor for posting
1162  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1163  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1164  * All three APIs fill in the fields of the fifo descriptor,
1165  * in accordance with the Titan specification.
1166  *
1167  */
1168 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1169                                   void *txdlh, u32 frag_idx,
1170                                   dma_addr_t dma_pointer, u32 size)
1171 {
1172         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1173         struct vxge_hw_fifo_txd *txdp, *txdp_last;
1174         struct __vxge_hw_channel *channel;
1175
1176         channel = &fifo->channel;
1177
1178         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1179         txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1180
1181         if (frag_idx != 0)
1182                 txdp->control_0 = txdp->control_1 = 0;
1183         else {
1184                 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1185                         VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1186                 txdp->control_1 |= fifo->interrupt_type;
1187                 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1188                         fifo->tx_intr_num);
1189                 if (txdl_priv->frags) {
1190                         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1191                         (txdl_priv->frags - 1);
1192                         txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1193                                 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1194                 }
1195         }
1196
1197         vxge_assert(frag_idx < txdl_priv->alloc_frags);
1198
1199         txdp->buffer_pointer = (u64)dma_pointer;
1200         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1201         fifo->stats->total_buffers++;
1202         txdl_priv->frags++;
1203 }
1204
1205 /**
1206  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1207  * @fifo: Handle to the fifo object used for non offload send
1208  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1209  * @frags: Number of contiguous buffers that are part of a single
1210  *         transmit operation.
1211  *
1212  * Post descriptor on the 'fifo' type channel for transmission.
1213  * Prior to posting the descriptor should be filled in accordance with
1214  * Host/Titan interface specification for a given service (LL, etc.).
1215  *
1216  */
1217 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1218 {
1219         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1220         struct vxge_hw_fifo_txd *txdp_last;
1221         struct vxge_hw_fifo_txd *txdp_first;
1222         struct __vxge_hw_channel *channel;
1223
1224         channel = &fifo->channel;
1225
1226         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1227         txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1228
1229         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1230         txdp_last->control_0 |=
1231               VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1232         txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1233
1234         vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1235
1236         __vxge_hw_non_offload_db_post(fifo,
1237                 (u64)txdl_priv->dma_addr,
1238                 txdl_priv->frags - 1,
1239                 fifo->no_snoop_bits);
1240
1241         fifo->stats->total_posts++;
1242         fifo->stats->common_stats.usage_cnt++;
1243         if (fifo->stats->common_stats.usage_max <
1244                 fifo->stats->common_stats.usage_cnt)
1245                 fifo->stats->common_stats.usage_max =
1246                         fifo->stats->common_stats.usage_cnt;
1247 }
1248
1249 /**
1250  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1251  * @fifo: Handle to the fifo object used for non offload send
1252  * @txdlh: Descriptor handle. Returned by HW.
1253  * @t_code: Transfer code, as per Titan User Guide,
1254  *          Transmit Descriptor Format.
1255  *          Returned by HW.
1256  *
1257  * Retrieve the _next_ completed descriptor.
1258  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1259  * driver of new completed descriptors. After that
1260  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1261  * completions (the very first completion is passed by HW via
1262  * vxge_hw_channel_callback_f).
1263  *
1264  * Implementation-wise, the driver is free to call
1265  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1266  * channel callback, or in a deferred fashion and separate (from HW)
1267  * context.
1268  *
1269  * Non-zero @t_code means failure to process the descriptor.
1270  * The failure could happen, for instance, when the link is
1271  * down, in which case Titan completes the descriptor because it
1272  * is not able to send the data out.
1273  *
1274  * For details please refer to Titan User Guide.
1275  *
1276  * Returns: VXGE_HW_OK - success.
1277  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1278  * are currently available for processing.
1279  *
1280  */
1281 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1282         struct __vxge_hw_fifo *fifo, void **txdlh,
1283         enum vxge_hw_fifo_tcode *t_code)
1284 {
1285         struct __vxge_hw_channel *channel;
1286         struct vxge_hw_fifo_txd *txdp;
1287         enum vxge_hw_status status = VXGE_HW_OK;
1288
1289         channel = &fifo->channel;
1290
1291         vxge_hw_channel_dtr_try_complete(channel, txdlh);
1292
1293         txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1294         if (txdp == NULL) {
1295                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1296                 goto exit;
1297         }
1298
1299         /* check whether host owns it */
1300         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1301
1302                 vxge_assert(txdp->host_control != 0);
1303
1304                 vxge_hw_channel_dtr_complete(channel);
1305
1306                 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1307
1308                 if (fifo->stats->common_stats.usage_cnt > 0)
1309                         fifo->stats->common_stats.usage_cnt--;
1310
1311                 status = VXGE_HW_OK;
1312                 goto exit;
1313         }
1314
1315         /* no more completions */
1316         *txdlh = NULL;
1317         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1318 exit:
1319         return status;
1320 }
1321
1322 /**
1323  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1324  * @fifo: Handle to the fifo object used for non offload send
1325  * @txdlh: Descriptor handle.
1326  * @t_code: One of the enumerated (and documented in the Titan user guide)
1327  *          "transfer codes".
1328  *
1329  * Handle descriptor's transfer code. The latter comes with each completed
1330  * descriptor.
1331  *
1332  * Returns: one of the enum vxge_hw_status{} enumerated types.
1333  * VXGE_HW_OK - for success.
1334  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1335  */
1336 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1337                                               void *txdlh,
1338                                               enum vxge_hw_fifo_tcode t_code)
1339 {
1340         struct __vxge_hw_channel *channel;
1341
1342         enum vxge_hw_status status = VXGE_HW_OK;
1343         channel = &fifo->channel;
1344
1345         if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1346                 status = VXGE_HW_ERR_INVALID_TCODE;
1347                 goto exit;
1348         }
1349
1350         fifo->stats->txd_t_code_err_cnt[t_code]++;
1351 exit:
1352         return status;
1353 }
1354
1355 /**
1356  * vxge_hw_fifo_txdl_free - Free descriptor.
1357  * @fifo: Handle to the fifo object used for non offload send
1358  * @txdlh: Descriptor handle.
1359  *
1360  * Free the reserved descriptor. This operation is "symmetrical" to
1361  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1362  * lifecycle.
1363  *
1364  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1365  * be:
1366  *
1367  * - reserved (vxge_hw_fifo_txdl_reserve);
1368  *
1369  * - posted (vxge_hw_fifo_txdl_post);
1370  *
1371  * - completed (vxge_hw_fifo_txdl_next_completed);
1372  *
1373  * - and recycled again (vxge_hw_fifo_txdl_free).
1374  *
1375  * For alternative state transitions and more details please refer to
1376  * the design doc.
1377  *
1378  */
1379 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1380 {
1381         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1382         u32 max_frags;
1383         struct __vxge_hw_channel *channel;
1384
1385         channel = &fifo->channel;
1386
1387         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1388                         (struct vxge_hw_fifo_txd *)txdlh);
1389
1390         max_frags = fifo->config->max_frags;
1391
1392         vxge_hw_channel_dtr_free(channel, txdlh);
1393 }
1394
1395 /**
1396  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1397  *               to MAC address table.
1398  * @vp: Vpath handle.
1399  * @macaddr: MAC address to be added for this vpath into the list
1400  * @macaddr_mask: MAC address mask for macaddr
1401  * @duplicate_mode: Duplicate MAC address add mode. Please see
1402  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1403  *
1404  * Adds the given mac address and mac address mask into the list for this
1405  * vpath.
1406  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1407  * vxge_hw_vpath_mac_addr_get_next
1408  *
1409  */
1410 enum vxge_hw_status
1411 vxge_hw_vpath_mac_addr_add(
1412         struct __vxge_hw_vpath_handle *vp,
1413         u8 (macaddr)[ETH_ALEN],
1414         u8 (macaddr_mask)[ETH_ALEN],
1415         enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1416 {
1417         u32 i;
1418         u64 data1 = 0ULL;
1419         u64 data2 = 0ULL;
1420         enum vxge_hw_status status = VXGE_HW_OK;
1421
1422         if (vp == NULL) {
1423                 status = VXGE_HW_ERR_INVALID_HANDLE;
1424                 goto exit;
1425         }
1426
1427         for (i = 0; i < ETH_ALEN; i++) {
1428                 data1 <<= 8;
1429                 data1 |= (u8)macaddr[i];
1430
1431                 data2 <<= 8;
1432                 data2 |= (u8)macaddr_mask[i];
1433         }
1434
1435         switch (duplicate_mode) {
1436         case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1437                 i = 0;
1438                 break;
1439         case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1440                 i = 1;
1441                 break;
1442         case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1443                 i = 2;
1444                 break;
1445         default:
1446                 i = 0;
1447                 break;
1448         }
1449
1450         status = __vxge_hw_vpath_rts_table_set(vp,
1451                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1452                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1453                         0,
1454                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1455                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1456                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1457 exit:
1458         return status;
1459 }
1460
1461 /**
1462  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1463  *               from MAC address table.
1464  * @vp: Vpath handle.
1465  * @macaddr: First MAC address entry for this vpath in the list
1466  * @macaddr_mask: MAC address mask for macaddr
1467  *
1468  * Returns the first mac address and mac address mask in the list for this
1469  * vpath.
1470  * see also: vxge_hw_vpath_mac_addr_get_next
1471  *
1472  */
1473 enum vxge_hw_status
1474 vxge_hw_vpath_mac_addr_get(
1475         struct __vxge_hw_vpath_handle *vp,
1476         u8 (macaddr)[ETH_ALEN],
1477         u8 (macaddr_mask)[ETH_ALEN])
1478 {
1479         u32 i;
1480         u64 data1 = 0ULL;
1481         u64 data2 = 0ULL;
1482         enum vxge_hw_status status = VXGE_HW_OK;
1483
1484         if (vp == NULL) {
1485                 status = VXGE_HW_ERR_INVALID_HANDLE;
1486                 goto exit;
1487         }
1488
1489         status = __vxge_hw_vpath_rts_table_get(vp,
1490                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1491                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1492                         0, &data1, &data2);
1493
1494         if (status != VXGE_HW_OK)
1495                 goto exit;
1496
1497         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1498
1499         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1500
1501         for (i = ETH_ALEN; i > 0; i--) {
1502                 macaddr[i-1] = (u8)(data1 & 0xFF);
1503                 data1 >>= 8;
1504
1505                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1506                 data2 >>= 8;
1507         }
1508 exit:
1509         return status;
1510 }
1511
1512 /**
1513  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1514  * vpath
1515  *               from MAC address table.
1516  * @vp: Vpath handle.
1517  * @macaddr: Next MAC address entry for this vpath in the list
1518  * @macaddr_mask: MAC address mask for macaddr
1519  *
1520  * Returns the next mac address and mac address mask in the list for this
1521  * vpath.
1522  * see also: vxge_hw_vpath_mac_addr_get
1523  *
1524  */
1525 enum vxge_hw_status
1526 vxge_hw_vpath_mac_addr_get_next(
1527         struct __vxge_hw_vpath_handle *vp,
1528         u8 (macaddr)[ETH_ALEN],
1529         u8 (macaddr_mask)[ETH_ALEN])
1530 {
1531         u32 i;
1532         u64 data1 = 0ULL;
1533         u64 data2 = 0ULL;
1534         enum vxge_hw_status status = VXGE_HW_OK;
1535
1536         if (vp == NULL) {
1537                 status = VXGE_HW_ERR_INVALID_HANDLE;
1538                 goto exit;
1539         }
1540
1541         status = __vxge_hw_vpath_rts_table_get(vp,
1542                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1543                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1544                         0, &data1, &data2);
1545
1546         if (status != VXGE_HW_OK)
1547                 goto exit;
1548
1549         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1550
1551         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1552
1553         for (i = ETH_ALEN; i > 0; i--) {
1554                 macaddr[i-1] = (u8)(data1 & 0xFF);
1555                 data1 >>= 8;
1556
1557                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1558                 data2 >>= 8;
1559         }
1560
1561 exit:
1562         return status;
1563 }
1564
1565 /**
1566  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1567  *               to MAC address table.
1568  * @vp: Vpath handle.
1569  * @macaddr: MAC address to be added for this vpath into the list
1570  * @macaddr_mask: MAC address mask for macaddr
1571  *
1572  * Delete the given mac address and mac address mask into the list for this
1573  * vpath.
1574  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1575  * vxge_hw_vpath_mac_addr_get_next
1576  *
1577  */
1578 enum vxge_hw_status
1579 vxge_hw_vpath_mac_addr_delete(
1580         struct __vxge_hw_vpath_handle *vp,
1581         u8 (macaddr)[ETH_ALEN],
1582         u8 (macaddr_mask)[ETH_ALEN])
1583 {
1584         u32 i;
1585         u64 data1 = 0ULL;
1586         u64 data2 = 0ULL;
1587         enum vxge_hw_status status = VXGE_HW_OK;
1588
1589         if (vp == NULL) {
1590                 status = VXGE_HW_ERR_INVALID_HANDLE;
1591                 goto exit;
1592         }
1593
1594         for (i = 0; i < ETH_ALEN; i++) {
1595                 data1 <<= 8;
1596                 data1 |= (u8)macaddr[i];
1597
1598                 data2 <<= 8;
1599                 data2 |= (u8)macaddr_mask[i];
1600         }
1601
1602         status = __vxge_hw_vpath_rts_table_set(vp,
1603                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1604                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1605                         0,
1606                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1607                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1608 exit:
1609         return status;
1610 }
1611
1612 /**
1613  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1614  *               to vlan id table.
1615  * @vp: Vpath handle.
1616  * @vid: vlan id to be added for this vpath into the list
1617  *
1618  * Adds the given vlan id into the list for this  vpath.
1619  * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1620  * vxge_hw_vpath_vid_get_next
1621  *
1622  */
1623 enum vxge_hw_status
1624 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1625 {
1626         enum vxge_hw_status status = VXGE_HW_OK;
1627
1628         if (vp == NULL) {
1629                 status = VXGE_HW_ERR_INVALID_HANDLE;
1630                 goto exit;
1631         }
1632
1633         status = __vxge_hw_vpath_rts_table_set(vp,
1634                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1635                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1636                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1637 exit:
1638         return status;
1639 }
1640
1641 /**
1642  * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1643  *               from vlan id table.
1644  * @vp: Vpath handle.
1645  * @vid: Buffer to return vlan id
1646  *
1647  * Returns the first vlan id in the list for this vpath.
1648  * see also: vxge_hw_vpath_vid_get_next
1649  *
1650  */
1651 enum vxge_hw_status
1652 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1653 {
1654         u64 data;
1655         enum vxge_hw_status status = VXGE_HW_OK;
1656
1657         if (vp == NULL) {
1658                 status = VXGE_HW_ERR_INVALID_HANDLE;
1659                 goto exit;
1660         }
1661
1662         status = __vxge_hw_vpath_rts_table_get(vp,
1663                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1664                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1665                         0, vid, &data);
1666
1667         *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1668 exit:
1669         return status;
1670 }
1671
1672 /**
1673  * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1674  *               from vlan id table.
1675  * @vp: Vpath handle.
1676  * @vid: Buffer to return vlan id
1677  *
1678  * Returns the next vlan id in the list for this vpath.
1679  * see also: vxge_hw_vpath_vid_get
1680  *
1681  */
1682 enum vxge_hw_status
1683 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1684 {
1685         u64 data;
1686         enum vxge_hw_status status = VXGE_HW_OK;
1687
1688         if (vp == NULL) {
1689                 status = VXGE_HW_ERR_INVALID_HANDLE;
1690                 goto exit;
1691         }
1692
1693         status = __vxge_hw_vpath_rts_table_get(vp,
1694                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1695                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1696                         0, vid, &data);
1697
1698         *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1699 exit:
1700         return status;
1701 }
1702
1703 /**
1704  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1705  *               to vlan id table.
1706  * @vp: Vpath handle.
1707  * @vid: vlan id to be added for this vpath into the list
1708  *
1709  * Adds the given vlan id into the list for this  vpath.
1710  * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1711  * vxge_hw_vpath_vid_get_next
1712  *
1713  */
1714 enum vxge_hw_status
1715 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1716 {
1717         enum vxge_hw_status status = VXGE_HW_OK;
1718
1719         if (vp == NULL) {
1720                 status = VXGE_HW_ERR_INVALID_HANDLE;
1721                 goto exit;
1722         }
1723
1724         status = __vxge_hw_vpath_rts_table_set(vp,
1725                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1726                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1727                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1728 exit:
1729         return status;
1730 }
1731
1732 /**
1733  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1734  * @vp: Vpath handle.
1735  *
1736  * Enable promiscuous mode of Titan-e operation.
1737  *
1738  * See also: vxge_hw_vpath_promisc_disable().
1739  */
1740 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1741                         struct __vxge_hw_vpath_handle *vp)
1742 {
1743         u64 val64;
1744         struct __vxge_hw_virtualpath *vpath;
1745         enum vxge_hw_status status = VXGE_HW_OK;
1746
1747         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1748                 status = VXGE_HW_ERR_INVALID_HANDLE;
1749                 goto exit;
1750         }
1751
1752         vpath = vp->vpath;
1753
1754         /* Enable promiscous mode for function 0 only */
1755         if (!(vpath->hldev->access_rights &
1756                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1757                 return VXGE_HW_OK;
1758
1759         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1760
1761         if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1762
1763                 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1764                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1765                          VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1766                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1767
1768                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1769         }
1770 exit:
1771         return status;
1772 }
1773
1774 /**
1775  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1776  * @vp: Vpath handle.
1777  *
1778  * Disable promiscuous mode of Titan-e operation.
1779  *
1780  * See also: vxge_hw_vpath_promisc_enable().
1781  */
1782 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1783                         struct __vxge_hw_vpath_handle *vp)
1784 {
1785         u64 val64;
1786         struct __vxge_hw_virtualpath *vpath;
1787         enum vxge_hw_status status = VXGE_HW_OK;
1788
1789         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1790                 status = VXGE_HW_ERR_INVALID_HANDLE;
1791                 goto exit;
1792         }
1793
1794         vpath = vp->vpath;
1795
1796         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1797
1798         if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1799
1800                 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1801                            VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1802                            VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1803
1804                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1805         }
1806 exit:
1807         return status;
1808 }
1809
1810 /*
1811  * vxge_hw_vpath_bcast_enable - Enable broadcast
1812  * @vp: Vpath handle.
1813  *
1814  * Enable receiving broadcasts.
1815  */
1816 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1817                         struct __vxge_hw_vpath_handle *vp)
1818 {
1819         u64 val64;
1820         struct __vxge_hw_virtualpath *vpath;
1821         enum vxge_hw_status status = VXGE_HW_OK;
1822
1823         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1824                 status = VXGE_HW_ERR_INVALID_HANDLE;
1825                 goto exit;
1826         }
1827
1828         vpath = vp->vpath;
1829
1830         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1831
1832         if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1833                 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1834                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1835         }
1836 exit:
1837         return status;
1838 }
1839
1840 /**
1841  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1842  * @vp: Vpath handle.
1843  *
1844  * Enable Titan-e multicast addresses.
1845  * Returns: VXGE_HW_OK on success.
1846  *
1847  */
1848 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1849                         struct __vxge_hw_vpath_handle *vp)
1850 {
1851         u64 val64;
1852         struct __vxge_hw_virtualpath *vpath;
1853         enum vxge_hw_status status = VXGE_HW_OK;
1854
1855         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1856                 status = VXGE_HW_ERR_INVALID_HANDLE;
1857                 goto exit;
1858         }
1859
1860         vpath = vp->vpath;
1861
1862         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1863
1864         if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1865                 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1866                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1867         }
1868 exit:
1869         return status;
1870 }
1871
1872 /**
1873  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
1874  * @vp: Vpath handle.
1875  *
1876  * Disable Titan-e multicast addresses.
1877  * Returns: VXGE_HW_OK - success.
1878  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1879  *
1880  */
1881 enum vxge_hw_status
1882 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1883 {
1884         u64 val64;
1885         struct __vxge_hw_virtualpath *vpath;
1886         enum vxge_hw_status status = VXGE_HW_OK;
1887
1888         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1889                 status = VXGE_HW_ERR_INVALID_HANDLE;
1890                 goto exit;
1891         }
1892
1893         vpath = vp->vpath;
1894
1895         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1896
1897         if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1898                 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1899                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1900         }
1901 exit:
1902         return status;
1903 }
1904
1905 /*
1906  * __vxge_hw_vpath_alarm_process - Process Alarms.
1907  * @vpath: Virtual Path.
1908  * @skip_alarms: Do not clear the alarms
1909  *
1910  * Process vpath alarms.
1911  *
1912  */
1913 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1914                         struct __vxge_hw_virtualpath *vpath,
1915                         u32 skip_alarms)
1916 {
1917         u64 val64;
1918         u64 alarm_status;
1919         u64 pic_status;
1920         struct __vxge_hw_device *hldev = NULL;
1921         enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1922         u64 mask64;
1923         struct vxge_hw_vpath_stats_sw_info *sw_stats;
1924         struct vxge_hw_vpath_reg __iomem *vp_reg;
1925
1926         if (vpath == NULL) {
1927                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1928                         alarm_event);
1929                 goto out2;
1930         }
1931
1932         hldev = vpath->hldev;
1933         vp_reg = vpath->vp_reg;
1934         alarm_status = readq(&vp_reg->vpath_general_int_status);
1935
1936         if (alarm_status == VXGE_HW_ALL_FOXES) {
1937                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1938                         alarm_event);
1939                 goto out;
1940         }
1941
1942         sw_stats = vpath->sw_stats;
1943
1944         if (alarm_status & ~(
1945                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1946                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1947                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1948                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1949                 sw_stats->error_stats.unknown_alarms++;
1950
1951                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1952                         alarm_event);
1953                 goto out;
1954         }
1955
1956         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1957
1958                 val64 = readq(&vp_reg->xgmac_vp_int_status);
1959
1960                 if (val64 &
1961                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1962
1963                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1964
1965                         if (((val64 &
1966                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1967                             (!(val64 &
1968                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1969                             ((val64 &
1970                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1971                                 && (!(val64 &
1972                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1973                         ))) {
1974                                 sw_stats->error_stats.network_sustained_fault++;
1975
1976                                 writeq(
1977                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1978                                         &vp_reg->asic_ntwk_vp_err_mask);
1979
1980                                 __vxge_hw_device_handle_link_down_ind(hldev);
1981                                 alarm_event = VXGE_HW_SET_LEVEL(
1982                                         VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1983                         }
1984
1985                         if (((val64 &
1986                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1987                             (!(val64 &
1988                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1989                             ((val64 &
1990                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1991                                 && (!(val64 &
1992                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1993                         ))) {
1994
1995                                 sw_stats->error_stats.network_sustained_ok++;
1996
1997                                 writeq(
1998                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1999                                         &vp_reg->asic_ntwk_vp_err_mask);
2000
2001                                 __vxge_hw_device_handle_link_up_ind(hldev);
2002                                 alarm_event = VXGE_HW_SET_LEVEL(
2003                                         VXGE_HW_EVENT_LINK_UP, alarm_event);
2004                         }
2005
2006                         writeq(VXGE_HW_INTR_MASK_ALL,
2007                                 &vp_reg->asic_ntwk_vp_err_reg);
2008
2009                         alarm_event = VXGE_HW_SET_LEVEL(
2010                                 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2011
2012                         if (skip_alarms)
2013                                 return VXGE_HW_OK;
2014                 }
2015         }
2016
2017         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2018
2019                 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2020
2021                 if (pic_status &
2022                     VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2023
2024                         val64 = readq(&vp_reg->general_errors_reg);
2025                         mask64 = readq(&vp_reg->general_errors_mask);
2026
2027                         if ((val64 &
2028                                 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2029                                 ~mask64) {
2030                                 sw_stats->error_stats.ini_serr_det++;
2031
2032                                 alarm_event = VXGE_HW_SET_LEVEL(
2033                                         VXGE_HW_EVENT_SERR, alarm_event);
2034                         }
2035
2036                         if ((val64 &
2037                             VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2038                                 ~mask64) {
2039                                 sw_stats->error_stats.dblgen_fifo0_overflow++;
2040
2041                                 alarm_event = VXGE_HW_SET_LEVEL(
2042                                         VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2043                         }
2044
2045                         if ((val64 &
2046                             VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2047                                 ~mask64)
2048                                 sw_stats->error_stats.statsb_pif_chain_error++;
2049
2050                         if ((val64 &
2051                            VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2052                                 ~mask64)
2053                                 sw_stats->error_stats.statsb_drop_timeout++;
2054
2055                         if ((val64 &
2056                                 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2057                                 ~mask64)
2058                                 sw_stats->error_stats.target_illegal_access++;
2059
2060                         if (!skip_alarms) {
2061                                 writeq(VXGE_HW_INTR_MASK_ALL,
2062                                         &vp_reg->general_errors_reg);
2063                                 alarm_event = VXGE_HW_SET_LEVEL(
2064                                         VXGE_HW_EVENT_ALARM_CLEARED,
2065                                         alarm_event);
2066                         }
2067                 }
2068
2069                 if (pic_status &
2070                     VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2071
2072                         val64 = readq(&vp_reg->kdfcctl_errors_reg);
2073                         mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2074
2075                         if ((val64 &
2076                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2077                                 ~mask64) {
2078                                 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2079
2080                                 alarm_event = VXGE_HW_SET_LEVEL(
2081                                         VXGE_HW_EVENT_FIFO_ERR,
2082                                         alarm_event);
2083                         }
2084
2085                         if ((val64 &
2086                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2087                                 ~mask64) {
2088                                 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2089
2090                                 alarm_event = VXGE_HW_SET_LEVEL(
2091                                         VXGE_HW_EVENT_FIFO_ERR,
2092                                         alarm_event);
2093                         }
2094
2095                         if ((val64 &
2096                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2097                                 ~mask64) {
2098                                 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2099
2100                                 alarm_event = VXGE_HW_SET_LEVEL(
2101                                         VXGE_HW_EVENT_FIFO_ERR,
2102                                         alarm_event);
2103                         }
2104
2105                         if (!skip_alarms) {
2106                                 writeq(VXGE_HW_INTR_MASK_ALL,
2107                                         &vp_reg->kdfcctl_errors_reg);
2108                                 alarm_event = VXGE_HW_SET_LEVEL(
2109                                         VXGE_HW_EVENT_ALARM_CLEARED,
2110                                         alarm_event);
2111                         }
2112                 }
2113
2114         }
2115
2116         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2117
2118                 val64 = readq(&vp_reg->wrdma_alarm_status);
2119
2120                 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2121
2122                         val64 = readq(&vp_reg->prc_alarm_reg);
2123                         mask64 = readq(&vp_reg->prc_alarm_mask);
2124
2125                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2126                                 ~mask64)
2127                                 sw_stats->error_stats.prc_ring_bumps++;
2128
2129                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2130                                 ~mask64) {
2131                                 sw_stats->error_stats.prc_rxdcm_sc_err++;
2132
2133                                 alarm_event = VXGE_HW_SET_LEVEL(
2134                                         VXGE_HW_EVENT_VPATH_ERR,
2135                                         alarm_event);
2136                         }
2137
2138                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2139                                 & ~mask64) {
2140                                 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2141
2142                                 alarm_event = VXGE_HW_SET_LEVEL(
2143                                                 VXGE_HW_EVENT_VPATH_ERR,
2144                                                 alarm_event);
2145                         }
2146
2147                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2148                                  & ~mask64) {
2149                                 sw_stats->error_stats.prc_quanta_size_err++;
2150
2151                                 alarm_event = VXGE_HW_SET_LEVEL(
2152                                         VXGE_HW_EVENT_VPATH_ERR,
2153                                         alarm_event);
2154                         }
2155
2156                         if (!skip_alarms) {
2157                                 writeq(VXGE_HW_INTR_MASK_ALL,
2158                                         &vp_reg->prc_alarm_reg);
2159                                 alarm_event = VXGE_HW_SET_LEVEL(
2160                                                 VXGE_HW_EVENT_ALARM_CLEARED,
2161                                                 alarm_event);
2162                         }
2163                 }
2164         }
2165 out:
2166         hldev->stats.sw_dev_err_stats.vpath_alarms++;
2167 out2:
2168         if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2169                 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2170                 return VXGE_HW_OK;
2171
2172         __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2173
2174         if (alarm_event == VXGE_HW_EVENT_SERR)
2175                 return VXGE_HW_ERR_CRITICAL;
2176
2177         return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2178                 VXGE_HW_ERR_SLOT_FREEZE :
2179                 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2180                 VXGE_HW_ERR_VPATH;
2181 }
2182
2183 /*
2184  * vxge_hw_vpath_alarm_process - Process Alarms.
2185  * @vpath: Virtual Path.
2186  * @skip_alarms: Do not clear the alarms
2187  *
2188  * Process vpath alarms.
2189  *
2190  */
2191 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2192                         struct __vxge_hw_vpath_handle *vp,
2193                         u32 skip_alarms)
2194 {
2195         enum vxge_hw_status status = VXGE_HW_OK;
2196
2197         if (vp == NULL) {
2198                 status = VXGE_HW_ERR_INVALID_HANDLE;
2199                 goto exit;
2200         }
2201
2202         status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2203 exit:
2204         return status;
2205 }
2206
2207 /**
2208  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2209  *                            alrms
2210  * @vp: Virtual Path handle.
2211  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2212  *             interrupts(Can be repeated). If fifo or ring are not enabled
2213  *             the MSIX vector for that should be set to 0
2214  * @alarm_msix_id: MSIX vector for alarm.
2215  *
2216  * This API will associate a given MSIX vector numbers with the four TIM
2217  * interrupts and alarm interrupt.
2218  */
2219 enum vxge_hw_status
2220 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2221                        int alarm_msix_id)
2222 {
2223         u64 val64;
2224         struct __vxge_hw_virtualpath *vpath = vp->vpath;
2225         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2226         u32 first_vp_id = vpath->hldev->first_vp_id;
2227
2228         val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2229                   (first_vp_id * 4) + tim_msix_id[0]) |
2230                  VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2231                   (first_vp_id * 4) + tim_msix_id[1]) |
2232                  VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2233                         (first_vp_id * 4) + tim_msix_id[2]);
2234
2235                 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2236                         (first_vp_id * 4) + tim_msix_id[3]);
2237
2238         writeq(val64, &vp_reg->interrupt_cfg0);
2239
2240         writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2241                         (first_vp_id * 4) + alarm_msix_id),
2242                         &vp_reg->interrupt_cfg2);
2243
2244         if (vpath->hldev->config.intr_mode ==
2245                                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2246                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2247                                 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2248                                 0, 32), &vp_reg->one_shot_vect1_en);
2249         }
2250
2251         if (vpath->hldev->config.intr_mode ==
2252                 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2253                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2254                                 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2255                                 0, 32), &vp_reg->one_shot_vect2_en);
2256
2257                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2258                                 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2259                                 0, 32), &vp_reg->one_shot_vect3_en);
2260         }
2261
2262         return VXGE_HW_OK;
2263 }
2264
2265 /**
2266  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2267  * @vp: Virtual Path handle.
2268  * @msix_id:  MSIX ID
2269  *
2270  * The function masks the msix interrupt for the given msix_id
2271  *
2272  * Returns: 0,
2273  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2274  * status.
2275  * See also:
2276  */
2277 void
2278 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2279 {
2280         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281         __vxge_hw_pio_mem_write32_upper(
2282                 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2283                         (msix_id  / 4)), 0, 32),
2284                 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2285
2286         return;
2287 }
2288
2289 /**
2290  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2291  * @vp: Virtual Path handle.
2292  * @msix_id:  MSI ID
2293  *
2294  * The function clears the msix interrupt for the given msix_id
2295  *
2296  * Returns: 0,
2297  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2298  * status.
2299  * See also:
2300  */
2301 void
2302 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2303 {
2304         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2305         if (hldev->config.intr_mode ==
2306                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2307                 __vxge_hw_pio_mem_write32_upper(
2308                         (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2309                                 (msix_id/4)), 0, 32),
2310                                 &hldev->common_reg->
2311                                         clr_msix_one_shot_vec[msix_id%4]);
2312         } else {
2313                 __vxge_hw_pio_mem_write32_upper(
2314                         (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2315                                 (msix_id/4)), 0, 32),
2316                                 &hldev->common_reg->
2317                                         clear_msix_mask_vect[msix_id%4]);
2318         }
2319
2320         return;
2321 }
2322
2323 /**
2324  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2325  * @vp: Virtual Path handle.
2326  * @msix_id:  MSI ID
2327  *
2328  * The function unmasks the msix interrupt for the given msix_id
2329  *
2330  * Returns: 0,
2331  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2332  * status.
2333  * See also:
2334  */
2335 void
2336 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2337 {
2338         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2339         __vxge_hw_pio_mem_write32_upper(
2340                         (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2341                         (msix_id/4)), 0, 32),
2342                         &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2343
2344         return;
2345 }
2346
2347 /**
2348  * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2349  * @vp: Virtual Path handle.
2350  *
2351  * The function masks all msix interrupt for the given vpath
2352  *
2353  */
2354 void
2355 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2356 {
2357
2358         __vxge_hw_pio_mem_write32_upper(
2359                 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2360                 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2361
2362         return;
2363 }
2364
2365 /**
2366  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2367  * @vp: Virtual Path handle.
2368  *
2369  * Mask Tx and Rx vpath interrupts.
2370  *
2371  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2372  */
2373 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2374 {
2375         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2376         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2377         u64     val64;
2378         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2379
2380         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2381                 tim_int_mask1, vp->vpath->vp_id);
2382
2383         val64 = readq(&hldev->common_reg->tim_int_mask0);
2384
2385         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2386                 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2387                 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2388                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2389                         &hldev->common_reg->tim_int_mask0);
2390         }
2391
2392         val64 = readl(&hldev->common_reg->tim_int_mask1);
2393
2394         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2395                 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2396                 __vxge_hw_pio_mem_write32_upper(
2397                         (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2398                         tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2399                         &hldev->common_reg->tim_int_mask1);
2400         }
2401
2402         return;
2403 }
2404
2405 /**
2406  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2407  * @vp: Virtual Path handle.
2408  *
2409  * Unmask Tx and Rx vpath interrupts.
2410  *
2411  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2412  */
2413 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2414 {
2415         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2416         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2417         u64     val64;
2418         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2419
2420         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2421                 tim_int_mask1, vp->vpath->vp_id);
2422
2423         val64 = readq(&hldev->common_reg->tim_int_mask0);
2424
2425         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2426            (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2427                 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2428                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2429                         &hldev->common_reg->tim_int_mask0);
2430         }
2431
2432         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2433            (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2434                 __vxge_hw_pio_mem_write32_upper(
2435                         (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2436                           tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2437                         &hldev->common_reg->tim_int_mask1);
2438         }
2439
2440         return;
2441 }
2442
2443 /**
2444  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2445  * descriptors and process the same.
2446  * @ring: Handle to the ring object used for receive
2447  *
2448  * The function polls the Rx for the completed  descriptors and calls
2449  * the driver via supplied completion   callback.
2450  *
2451  * Returns: VXGE_HW_OK, if the polling is completed successful.
2452  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2453  * descriptors available which are yet to be processed.
2454  *
2455  * See also: vxge_hw_vpath_poll_rx()
2456  */
2457 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2458 {
2459         u8 t_code;
2460         enum vxge_hw_status status = VXGE_HW_OK;
2461         void *first_rxdh;
2462         u64 val64 = 0;
2463         int new_count = 0;
2464
2465         ring->cmpl_cnt = 0;
2466
2467         status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2468         if (status == VXGE_HW_OK)
2469                 ring->callback(ring, first_rxdh,
2470                         t_code, ring->channel.userdata);
2471
2472         if (ring->cmpl_cnt != 0) {
2473                 ring->doorbell_cnt += ring->cmpl_cnt;
2474                 if (ring->doorbell_cnt >= ring->rxds_limit) {
2475                         /*
2476                          * Each RxD is of 4 qwords, update the number of
2477                          * qwords replenished
2478                          */
2479                         new_count = (ring->doorbell_cnt * 4);
2480
2481                         /* For each block add 4 more qwords */
2482                         ring->total_db_cnt += ring->doorbell_cnt;
2483                         if (ring->total_db_cnt >= ring->rxds_per_block) {
2484                                 new_count += 4;
2485                                 /* Reset total count */
2486                                 ring->total_db_cnt %= ring->rxds_per_block;
2487                         }
2488                         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2489                                 &ring->vp_reg->prc_rxd_doorbell);
2490                         val64 =
2491                           readl(&ring->common_reg->titan_general_int_status);
2492                         ring->doorbell_cnt = 0;
2493                 }
2494         }
2495
2496         return status;
2497 }
2498
2499 /**
2500  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2501  * the same.
2502  * @fifo: Handle to the fifo object used for non offload send
2503  *
2504  * The function polls the Tx for the completed  descriptors and calls
2505  * the driver via supplied completion callback.
2506  *
2507  * Returns: VXGE_HW_OK, if the polling is completed successful.
2508  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2509  * descriptors available which are yet to be processed.
2510  *
2511  * See also: vxge_hw_vpath_poll_tx().
2512  */
2513 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2514                                         struct sk_buff ***skb_ptr, int nr_skb,
2515                                         int *more)
2516 {
2517         enum vxge_hw_fifo_tcode t_code;
2518         void *first_txdlh;
2519         enum vxge_hw_status status = VXGE_HW_OK;
2520         struct __vxge_hw_channel *channel;
2521
2522         channel = &fifo->channel;
2523
2524         status = vxge_hw_fifo_txdl_next_completed(fifo,
2525                                 &first_txdlh, &t_code);
2526         if (status == VXGE_HW_OK)
2527                 if (fifo->callback(fifo, first_txdlh, t_code,
2528                         channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2529                         status = VXGE_HW_COMPLETIONS_REMAIN;
2530
2531         return status;
2532 }