]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/vxge/vxge-config.c
0a35ab1ee2e1d5876acd53230bcc3bb991a3934a
[net-next-2.6.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22
23 static enum vxge_hw_status
24 __vxge_hw_fifo_create(
25         struct __vxge_hw_vpath_handle *vpath_handle,
26         struct vxge_hw_fifo_attr *attr);
27
28 static enum vxge_hw_status
29 __vxge_hw_fifo_abort(
30         struct __vxge_hw_fifo *fifoh);
31
32 static enum vxge_hw_status
33 __vxge_hw_fifo_reset(
34         struct __vxge_hw_fifo *ringh);
35
36 static enum vxge_hw_status
37 __vxge_hw_fifo_delete(
38         struct __vxge_hw_vpath_handle *vpath_handle);
39
40 static struct __vxge_hw_blockpool_entry *
41 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42                         u32 size);
43
44 static void
45 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
46                         struct __vxge_hw_blockpool_entry *entry);
47
48 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49                                         void *block_addr,
50                                         u32 length,
51                                         struct pci_dev *dma_h,
52                                         struct pci_dev *acc_handle);
53
54 static enum vxge_hw_status
55 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56                         struct __vxge_hw_blockpool  *blockpool,
57                         u32 pool_size,
58                         u32 pool_max);
59
60 static void
61 __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
62
63 static void *
64 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
65                         u32 size,
66                         struct vxge_hw_mempool_dma *dma_object);
67
68 static void
69 __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
70                         void *memblock,
71                         u32 size,
72                         struct vxge_hw_mempool_dma *dma_object);
73
74
75 static struct __vxge_hw_channel*
76 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77                         enum __vxge_hw_channel_type type, u32 length,
78                         u32 per_dtr_space, void *userdata);
79
80 static void
81 __vxge_hw_channel_free(
82         struct __vxge_hw_channel *channel);
83
84 static enum vxge_hw_status
85 __vxge_hw_channel_initialize(
86         struct __vxge_hw_channel *channel);
87
88 static enum vxge_hw_status
89 __vxge_hw_channel_reset(
90         struct __vxge_hw_channel *channel);
91
92 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93
94 static enum vxge_hw_status
95 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97 static enum vxge_hw_status
98 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99
100 static void
101 __vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103 static void
104 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106 static enum vxge_hw_status
107 __vxge_hw_vpath_card_info_get(
108         u32 vp_id,
109         struct vxge_hw_vpath_reg __iomem *vpath_reg,
110         struct vxge_hw_device_hw_info *hw_info);
111
112 static enum vxge_hw_status
113 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115 static void
116 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118 static enum vxge_hw_status
119 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121 static enum vxge_hw_status
122 __vxge_hw_device_register_poll(
123         void __iomem    *reg,
124         u64 mask, u32 max_millis);
125
126 static inline enum vxge_hw_status
127 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
128                           u64 mask, u32 max_millis)
129 {
130         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131         wmb();
132
133         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134         wmb();
135
136         return  __vxge_hw_device_register_poll(addr, mask, max_millis);
137 }
138
139 static struct vxge_hw_mempool*
140 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141                          u32 item_size, u32 private_size, u32 items_initial,
142                          u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143                          void *userdata);
144 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146 static enum vxge_hw_status
147 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
148                           struct vxge_hw_vpath_stats_hw_info *hw_stats);
149
150 static enum vxge_hw_status
151 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152
153 static enum vxge_hw_status
154 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155
156 static u64
157 __vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
158                                   struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160 static u32
161 __vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162
163 static enum vxge_hw_status
164 __vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
165                          u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
166
167 static enum vxge_hw_status
168 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
169
170
171 static enum vxge_hw_status
172 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
173
174 static enum vxge_hw_status
175 __vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
176                            struct vxge_hw_device_hw_info *hw_info);
177
178 static enum vxge_hw_status
179 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180
181 static void
182 __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
183
184 static enum vxge_hw_status
185 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186                              u32 operation, u32 offset, u64 *stat);
187
188 static enum vxge_hw_status
189 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath  *vpath,
190                                   struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191
192 static enum vxge_hw_status
193 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath  *vpath,
194                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
195
196 /*
197  * __vxge_hw_channel_allocate - Allocate memory for channel
198  * This function allocates required memory for the channel and various arrays
199  * in the channel
200  */
201 struct __vxge_hw_channel*
202 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
203                            enum __vxge_hw_channel_type type,
204         u32 length, u32 per_dtr_space, void *userdata)
205 {
206         struct __vxge_hw_channel *channel;
207         struct __vxge_hw_device *hldev;
208         int size = 0;
209         u32 vp_id;
210
211         hldev = vph->vpath->hldev;
212         vp_id = vph->vpath->vp_id;
213
214         switch (type) {
215         case VXGE_HW_CHANNEL_TYPE_FIFO:
216                 size = sizeof(struct __vxge_hw_fifo);
217                 break;
218         case VXGE_HW_CHANNEL_TYPE_RING:
219                 size = sizeof(struct __vxge_hw_ring);
220                 break;
221         default:
222                 break;
223         }
224
225         channel = kzalloc(size, GFP_KERNEL);
226         if (channel == NULL)
227                 goto exit0;
228         INIT_LIST_HEAD(&channel->item);
229
230         channel->common_reg = hldev->common_reg;
231         channel->first_vp_id = hldev->first_vp_id;
232         channel->type = type;
233         channel->devh = hldev;
234         channel->vph = vph;
235         channel->userdata = userdata;
236         channel->per_dtr_space = per_dtr_space;
237         channel->length = length;
238         channel->vp_id = vp_id;
239
240         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
241         if (channel->work_arr == NULL)
242                 goto exit1;
243
244         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
245         if (channel->free_arr == NULL)
246                 goto exit1;
247         channel->free_ptr = length;
248
249         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
250         if (channel->reserve_arr == NULL)
251                 goto exit1;
252         channel->reserve_ptr = length;
253         channel->reserve_top = 0;
254
255         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
256         if (channel->orig_arr == NULL)
257                 goto exit1;
258
259         return channel;
260 exit1:
261         __vxge_hw_channel_free(channel);
262
263 exit0:
264         return NULL;
265 }
266
267 /*
268  * __vxge_hw_channel_free - Free memory allocated for channel
269  * This function deallocates memory from the channel and various arrays
270  * in the channel
271  */
272 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273 {
274         kfree(channel->work_arr);
275         kfree(channel->free_arr);
276         kfree(channel->reserve_arr);
277         kfree(channel->orig_arr);
278         kfree(channel);
279 }
280
281 /*
282  * __vxge_hw_channel_initialize - Initialize a channel
283  * This function initializes a channel by properly setting the
284  * various references
285  */
286 enum vxge_hw_status
287 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288 {
289         u32 i;
290         struct __vxge_hw_virtualpath *vpath;
291
292         vpath = channel->vph->vpath;
293
294         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
295                 for (i = 0; i < channel->length; i++)
296                         channel->orig_arr[i] = channel->reserve_arr[i];
297         }
298
299         switch (channel->type) {
300         case VXGE_HW_CHANNEL_TYPE_FIFO:
301                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
302                 channel->stats = &((struct __vxge_hw_fifo *)
303                                 channel)->stats->common_stats;
304                 break;
305         case VXGE_HW_CHANNEL_TYPE_RING:
306                 vpath->ringh = (struct __vxge_hw_ring *)channel;
307                 channel->stats = &((struct __vxge_hw_ring *)
308                                 channel)->stats->common_stats;
309                 break;
310         default:
311                 break;
312         }
313
314         return VXGE_HW_OK;
315 }
316
317 /*
318  * __vxge_hw_channel_reset - Resets a channel
319  * This function resets a channel by properly setting the various references
320  */
321 enum vxge_hw_status
322 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323 {
324         u32 i;
325
326         for (i = 0; i < channel->length; i++) {
327                 if (channel->reserve_arr != NULL)
328                         channel->reserve_arr[i] = channel->orig_arr[i];
329                 if (channel->free_arr != NULL)
330                         channel->free_arr[i] = NULL;
331                 if (channel->work_arr != NULL)
332                         channel->work_arr[i] = NULL;
333         }
334         channel->free_ptr = channel->length;
335         channel->reserve_ptr = channel->length;
336         channel->reserve_top = 0;
337         channel->post_index = 0;
338         channel->compl_index = 0;
339
340         return VXGE_HW_OK;
341 }
342
343 /*
344  * __vxge_hw_device_pci_e_init
345  * Initialize certain PCI/PCI-X configuration registers
346  * with recommended values. Save config space for future hw resets.
347  */
348 void
349 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350 {
351         u16 cmd = 0;
352
353         /* Set the PErr Repconse bit and SERR in PCI command register. */
354         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
355         cmd |= 0x140;
356         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
357
358         pci_save_state(hldev->pdev);
359 }
360
361 /*
362  * __vxge_hw_device_register_poll
363  * Will poll certain register for specified amount of time.
364  * Will poll until masked bit is not cleared.
365  */
366 static enum vxge_hw_status
367 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
368 {
369         u64 val64;
370         u32 i = 0;
371         enum vxge_hw_status ret = VXGE_HW_FAIL;
372
373         udelay(10);
374
375         do {
376                 val64 = readq(reg);
377                 if (!(val64 & mask))
378                         return VXGE_HW_OK;
379                 udelay(100);
380         } while (++i <= 9);
381
382         i = 0;
383         do {
384                 val64 = readq(reg);
385                 if (!(val64 & mask))
386                         return VXGE_HW_OK;
387                 mdelay(1);
388         } while (++i <= max_millis);
389
390         return ret;
391 }
392
393  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394  * in progress
395  * This routine checks the vpath reset in progress register is turned zero
396  */
397 static enum vxge_hw_status
398 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
399 {
400         enum vxge_hw_status status;
401         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
402                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
403                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
404         return status;
405 }
406
407 /*
408  * __vxge_hw_device_toc_get
409  * This routine sets the swapper and reads the toc pointer and returns the
410  * memory mapped address of the toc
411  */
412 static struct vxge_hw_toc_reg __iomem *
413 __vxge_hw_device_toc_get(void __iomem *bar0)
414 {
415         u64 val64;
416         struct vxge_hw_toc_reg __iomem *toc = NULL;
417         enum vxge_hw_status status;
418
419         struct vxge_hw_legacy_reg __iomem *legacy_reg =
420                 (struct vxge_hw_legacy_reg __iomem *)bar0;
421
422         status = __vxge_hw_legacy_swapper_set(legacy_reg);
423         if (status != VXGE_HW_OK)
424                 goto exit;
425
426         val64 = readq(&legacy_reg->toc_first_pointer);
427         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
428 exit:
429         return toc;
430 }
431
432 /*
433  * __vxge_hw_device_reg_addr_get
434  * This routine sets the swapper and reads the toc pointer and initializes the
435  * register location pointers in the device object. It waits until the ric is
436  * completed initializing registers.
437  */
438 enum vxge_hw_status
439 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440 {
441         u64 val64;
442         u32 i;
443         enum vxge_hw_status status = VXGE_HW_OK;
444
445         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
446
447         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
448         if (hldev->toc_reg  == NULL) {
449                 status = VXGE_HW_FAIL;
450                 goto exit;
451         }
452
453         val64 = readq(&hldev->toc_reg->toc_common_pointer);
454         hldev->common_reg =
455         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
456
457         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
458         hldev->mrpcim_reg =
459                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
460
461         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
462                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
463                 hldev->srpcim_reg[i] =
464                         (struct vxge_hw_srpcim_reg __iomem *)
465                                 (hldev->bar0 + val64);
466         }
467
468         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
469                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
470                 hldev->vpmgmt_reg[i] =
471                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
472         }
473
474         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
475                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
476                 hldev->vpath_reg[i] =
477                         (struct vxge_hw_vpath_reg __iomem *)
478                                 (hldev->bar0 + val64);
479         }
480
481         val64 = readq(&hldev->toc_reg->toc_kdfc);
482
483         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
484         case 0:
485                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
486                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
487                 break;
488         default:
489                 break;
490         }
491
492         status = __vxge_hw_device_vpath_reset_in_prog_check(
493                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
494 exit:
495         return status;
496 }
497
498 /*
499  * __vxge_hw_device_id_get
500  * This routine returns sets the device id and revision numbers into the device
501  * structure
502  */
503 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504 {
505         u64 val64;
506
507         val64 = readq(&hldev->common_reg->titan_asic_id);
508         hldev->device_id =
509                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511         hldev->major_revision =
512                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514         hldev->minor_revision =
515                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516 }
517
518 /*
519  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520  * This routine returns the Access Rights of the driver
521  */
522 static u32
523 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
524 {
525         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
526
527         switch (host_type) {
528         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
529                 if (func_id == 0) {
530                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
531                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
532                 }
533                 break;
534         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
535                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
536                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
537                 break;
538         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
539                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
540                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
541                 break;
542         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
543         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
544         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
545                 break;
546         case VXGE_HW_SR_VH_FUNCTION0:
547         case VXGE_HW_VH_NORMAL_FUNCTION:
548                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
549                 break;
550         }
551
552         return access_rights;
553 }
554 /*
555  * __vxge_hw_device_is_privilaged
556  * This routine checks if the device function is privilaged or not
557  */
558
559 enum vxge_hw_status
560 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
561 {
562         if (__vxge_hw_device_access_rights_get(host_type,
563                 func_id) &
564                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
565                 return VXGE_HW_OK;
566         else
567                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
568 }
569
570 /*
571  * __vxge_hw_device_host_info_get
572  * This routine returns the host type assignments
573  */
574 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575 {
576         u64 val64;
577         u32 i;
578
579         val64 = readq(&hldev->common_reg->host_type_assignments);
580
581         hldev->host_type =
582            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
583
584         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585
586         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589                         continue;
590
591                 hldev->func_id =
592                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
593
594                 hldev->access_rights = __vxge_hw_device_access_rights_get(
595                         hldev->host_type, hldev->func_id);
596
597                 hldev->first_vp_id = i;
598                 break;
599         }
600 }
601
602 /*
603  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
604  * link width and signalling rate.
605  */
606 static enum vxge_hw_status
607 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
608 {
609         int exp_cap;
610         u16 lnk;
611
612         /* Get the negotiated link width and speed from PCI config space */
613         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
614         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
615
616         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
617                 return VXGE_HW_ERR_INVALID_PCI_INFO;
618
619         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
620         case PCIE_LNK_WIDTH_RESRV:
621         case PCIE_LNK_X1:
622         case PCIE_LNK_X2:
623         case PCIE_LNK_X4:
624         case PCIE_LNK_X8:
625                 break;
626         default:
627                 return VXGE_HW_ERR_INVALID_PCI_INFO;
628         }
629
630         return VXGE_HW_OK;
631 }
632
633 /*
634  * __vxge_hw_device_initialize
635  * Initialize Titan-V hardware.
636  */
637 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638 {
639         enum vxge_hw_status status = VXGE_HW_OK;
640
641         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
642                                 hldev->func_id)) {
643                 /* Validate the pci-e link width and speed */
644                 status = __vxge_hw_verify_pci_e_info(hldev);
645                 if (status != VXGE_HW_OK)
646                         goto exit;
647         }
648
649 exit:
650         return status;
651 }
652
653 /**
654  * vxge_hw_device_hw_info_get - Get the hw information
655  * Returns the vpath mask that has the bits set for each vpath allocated
656  * for the driver, FW version information and the first mac addresse for
657  * each vpath
658  */
659 enum vxge_hw_status __devinit
660 vxge_hw_device_hw_info_get(void __iomem *bar0,
661                            struct vxge_hw_device_hw_info *hw_info)
662 {
663         u32 i;
664         u64 val64;
665         struct vxge_hw_toc_reg __iomem *toc;
666         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667         struct vxge_hw_common_reg __iomem *common_reg;
668         struct vxge_hw_vpath_reg __iomem *vpath_reg;
669         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670         enum vxge_hw_status status;
671
672         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673
674         toc = __vxge_hw_device_toc_get(bar0);
675         if (toc == NULL) {
676                 status = VXGE_HW_ERR_CRITICAL;
677                 goto exit;
678         }
679
680         val64 = readq(&toc->toc_common_pointer);
681         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
682
683         status = __vxge_hw_device_vpath_reset_in_prog_check(
684                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
685         if (status != VXGE_HW_OK)
686                 goto exit;
687
688         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
689
690         val64 = readq(&common_reg->host_type_assignments);
691
692         hw_info->host_type =
693            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
694
695         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
696
697                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
698                         continue;
699
700                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
701
702                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703                                 (bar0 + val64);
704
705                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
706                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707                         hw_info->func_id) &
708                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
709
710                         val64 = readq(&toc->toc_mrpcim_pointer);
711
712                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
713                                         (bar0 + val64);
714
715                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
716                         wmb();
717                 }
718
719                 val64 = readq(&toc->toc_vpath_pointer[i]);
720
721                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
722
723                 hw_info->function_mode =
724                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
725
726                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
727                 if (status != VXGE_HW_OK)
728                         goto exit;
729
730                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
731                 if (status != VXGE_HW_OK)
732                         goto exit;
733
734                 break;
735         }
736
737         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740                         continue;
741
742                 val64 = readq(&toc->toc_vpath_pointer[i]);
743                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
744
745                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
746                                 hw_info->mac_addrs[i],
747                                 hw_info->mac_addr_masks[i]);
748                 if (status != VXGE_HW_OK)
749                         goto exit;
750         }
751 exit:
752         return status;
753 }
754
755 /*
756  * vxge_hw_device_initialize - Initialize Titan device.
757  * Initialize Titan device. Note that all the arguments of this public API
758  * are 'IN', including @hldev. Driver cooperates with
759  * OS to find new Titan device, locate its PCI and memory spaces.
760  *
761  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
762  * to enable the latter to perform Titan hardware initialization.
763  */
764 enum vxge_hw_status __devinit
765 vxge_hw_device_initialize(
766         struct __vxge_hw_device **devh,
767         struct vxge_hw_device_attr *attr,
768         struct vxge_hw_device_config *device_config)
769 {
770         u32 i;
771         u32 nblocks = 0;
772         struct __vxge_hw_device *hldev = NULL;
773         enum vxge_hw_status status = VXGE_HW_OK;
774
775         status = __vxge_hw_device_config_check(device_config);
776         if (status != VXGE_HW_OK)
777                 goto exit;
778
779         hldev = (struct __vxge_hw_device *)
780                         vmalloc(sizeof(struct __vxge_hw_device));
781         if (hldev == NULL) {
782                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
783                 goto exit;
784         }
785
786         memset(hldev, 0, sizeof(struct __vxge_hw_device));
787         hldev->magic = VXGE_HW_DEVICE_MAGIC;
788
789         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
790
791         /* apply config */
792         memcpy(&hldev->config, device_config,
793                 sizeof(struct vxge_hw_device_config));
794
795         hldev->bar0 = attr->bar0;
796         hldev->pdev = attr->pdev;
797
798         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
799         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
800         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
801
802         __vxge_hw_device_pci_e_init(hldev);
803
804         status = __vxge_hw_device_reg_addr_get(hldev);
805         if (status != VXGE_HW_OK) {
806                 vfree(hldev);
807                 goto exit;
808         }
809         __vxge_hw_device_id_get(hldev);
810
811         __vxge_hw_device_host_info_get(hldev);
812
813         /* Incrementing for stats blocks */
814         nblocks++;
815
816         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819                         continue;
820
821                 if (device_config->vp_config[i].ring.enable ==
822                         VXGE_HW_RING_ENABLE)
823                         nblocks += device_config->vp_config[i].ring.ring_blocks;
824
825                 if (device_config->vp_config[i].fifo.enable ==
826                         VXGE_HW_FIFO_ENABLE)
827                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
828                 nblocks++;
829         }
830
831         if (__vxge_hw_blockpool_create(hldev,
832                 &hldev->block_pool,
833                 device_config->dma_blockpool_initial + nblocks,
834                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
835
836                 vxge_hw_device_terminate(hldev);
837                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
838                 goto exit;
839         }
840
841         status = __vxge_hw_device_initialize(hldev);
842
843         if (status != VXGE_HW_OK) {
844                 vxge_hw_device_terminate(hldev);
845                 goto exit;
846         }
847
848         *devh = hldev;
849 exit:
850         return status;
851 }
852
853 /*
854  * vxge_hw_device_terminate - Terminate Titan device.
855  * Terminate HW device.
856  */
857 void
858 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
859 {
860         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
861
862         hldev->magic = VXGE_HW_DEVICE_DEAD;
863         __vxge_hw_blockpool_destroy(&hldev->block_pool);
864         vfree(hldev);
865 }
866
867 /*
868  * vxge_hw_device_stats_get - Get the device hw statistics.
869  * Returns the vpath h/w stats for the device.
870  */
871 enum vxge_hw_status
872 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
873                         struct vxge_hw_device_stats_hw_info *hw_stats)
874 {
875         u32 i;
876         enum vxge_hw_status status = VXGE_HW_OK;
877
878         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881                         (hldev->virtual_paths[i].vp_open ==
882                                 VXGE_HW_VP_NOT_OPEN))
883                         continue;
884
885                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
886                                 hldev->virtual_paths[i].hw_stats,
887                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
888
889                 status = __vxge_hw_vpath_stats_get(
890                         &hldev->virtual_paths[i],
891                         hldev->virtual_paths[i].hw_stats);
892         }
893
894         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
895                         sizeof(struct vxge_hw_device_stats_hw_info));
896
897         return status;
898 }
899
900 /*
901  * vxge_hw_driver_stats_get - Get the device sw statistics.
902  * Returns the vpath s/w stats for the device.
903  */
904 enum vxge_hw_status vxge_hw_driver_stats_get(
905                         struct __vxge_hw_device *hldev,
906                         struct vxge_hw_device_stats_sw_info *sw_stats)
907 {
908         enum vxge_hw_status status = VXGE_HW_OK;
909
910         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
911                 sizeof(struct vxge_hw_device_stats_sw_info));
912
913         return status;
914 }
915
916 /*
917  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
918  *                           and offset and perform an operation
919  * Get the statistics from the given location and offset.
920  */
921 enum vxge_hw_status
922 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
923                             u32 operation, u32 location, u32 offset, u64 *stat)
924 {
925         u64 val64;
926         enum vxge_hw_status status = VXGE_HW_OK;
927
928         status = __vxge_hw_device_is_privilaged(hldev->host_type,
929                         hldev->func_id);
930         if (status != VXGE_HW_OK)
931                 goto exit;
932
933         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
934                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
935                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
936                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
937
938         status = __vxge_hw_pio_mem_write64(val64,
939                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
940                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
941                                 hldev->config.device_poll_millis);
942
943         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
944                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
945         else
946                 *stat = 0;
947 exit:
948         return status;
949 }
950
951 /*
952  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
953  * Get the Statistics on aggregate port
954  */
955 static enum vxge_hw_status
956 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
957                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
958 {
959         u64 *val64;
960         int i;
961         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
962         enum vxge_hw_status status = VXGE_HW_OK;
963
964         val64 = (u64 *)aggr_stats;
965
966         status = __vxge_hw_device_is_privilaged(hldev->host_type,
967                         hldev->func_id);
968         if (status != VXGE_HW_OK)
969                 goto exit;
970
971         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
972                 status = vxge_hw_mrpcim_stats_access(hldev,
973                                         VXGE_HW_STATS_OP_READ,
974                                         VXGE_HW_STATS_LOC_AGGR,
975                                         ((offset + (104 * port)) >> 3), val64);
976                 if (status != VXGE_HW_OK)
977                         goto exit;
978
979                 offset += 8;
980                 val64++;
981         }
982 exit:
983         return status;
984 }
985
986 /*
987  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
988  * Get the Statistics on port
989  */
990 static enum vxge_hw_status
991 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
992                                    struct vxge_hw_xmac_port_stats *port_stats)
993 {
994         u64 *val64;
995         enum vxge_hw_status status = VXGE_HW_OK;
996         int i;
997         u32 offset = 0x0;
998         val64 = (u64 *) port_stats;
999
1000         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1001                         hldev->func_id);
1002         if (status != VXGE_HW_OK)
1003                 goto exit;
1004
1005         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1006                 status = vxge_hw_mrpcim_stats_access(hldev,
1007                                         VXGE_HW_STATS_OP_READ,
1008                                         VXGE_HW_STATS_LOC_AGGR,
1009                                         ((offset + (608 * port)) >> 3), val64);
1010                 if (status != VXGE_HW_OK)
1011                         goto exit;
1012
1013                 offset += 8;
1014                 val64++;
1015         }
1016
1017 exit:
1018         return status;
1019 }
1020
1021 /*
1022  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1023  * Get the XMAC Statistics
1024  */
1025 enum vxge_hw_status
1026 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1027                               struct vxge_hw_xmac_stats *xmac_stats)
1028 {
1029         enum vxge_hw_status status = VXGE_HW_OK;
1030         u32 i;
1031
1032         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1033                                         0, &xmac_stats->aggr_stats[0]);
1034
1035         if (status != VXGE_HW_OK)
1036                 goto exit;
1037
1038         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1039                                 1, &xmac_stats->aggr_stats[1]);
1040         if (status != VXGE_HW_OK)
1041                 goto exit;
1042
1043         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1044
1045                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1046                                         i, &xmac_stats->port_stats[i]);
1047                 if (status != VXGE_HW_OK)
1048                         goto exit;
1049         }
1050
1051         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1052
1053                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1054                         continue;
1055
1056                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1057                                         &hldev->virtual_paths[i],
1058                                         &xmac_stats->vpath_tx_stats[i]);
1059                 if (status != VXGE_HW_OK)
1060                         goto exit;
1061
1062                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1063                                         &hldev->virtual_paths[i],
1064                                         &xmac_stats->vpath_rx_stats[i]);
1065                 if (status != VXGE_HW_OK)
1066                         goto exit;
1067         }
1068 exit:
1069         return status;
1070 }
1071
1072 /*
1073  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1074  * This routine is used to dynamically change the debug output
1075  */
1076 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1077                               enum vxge_debug_level level, u32 mask)
1078 {
1079         if (hldev == NULL)
1080                 return;
1081
1082 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1083         defined(VXGE_DEBUG_ERR_MASK)
1084         hldev->debug_module_mask = mask;
1085         hldev->debug_level = level;
1086 #endif
1087
1088 #if defined(VXGE_DEBUG_ERR_MASK)
1089         hldev->level_err = level & VXGE_ERR;
1090 #endif
1091
1092 #if defined(VXGE_DEBUG_TRACE_MASK)
1093         hldev->level_trace = level & VXGE_TRACE;
1094 #endif
1095 }
1096
1097 /*
1098  * vxge_hw_device_error_level_get - Get the error level
1099  * This routine returns the current error level set
1100  */
1101 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1102 {
1103 #if defined(VXGE_DEBUG_ERR_MASK)
1104         if (hldev == NULL)
1105                 return VXGE_ERR;
1106         else
1107                 return hldev->level_err;
1108 #else
1109         return 0;
1110 #endif
1111 }
1112
1113 /*
1114  * vxge_hw_device_trace_level_get - Get the trace level
1115  * This routine returns the current trace level set
1116  */
1117 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1118 {
1119 #if defined(VXGE_DEBUG_TRACE_MASK)
1120         if (hldev == NULL)
1121                 return VXGE_TRACE;
1122         else
1123                 return hldev->level_trace;
1124 #else
1125         return 0;
1126 #endif
1127 }
1128
1129 /*
1130  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1131  * Returns the Pause frame generation and reception capability of the NIC.
1132  */
1133 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1134                                                  u32 port, u32 *tx, u32 *rx)
1135 {
1136         u64 val64;
1137         enum vxge_hw_status status = VXGE_HW_OK;
1138
1139         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1140                 status = VXGE_HW_ERR_INVALID_DEVICE;
1141                 goto exit;
1142         }
1143
1144         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1145                 status = VXGE_HW_ERR_INVALID_PORT;
1146                 goto exit;
1147         }
1148
1149         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1150                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1151                 goto exit;
1152         }
1153
1154         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1155         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1156                 *tx = 1;
1157         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1158                 *rx = 1;
1159 exit:
1160         return status;
1161 }
1162
1163 /*
1164  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1165  * It can be used to set or reset Pause frame generation or reception
1166  * support of the NIC.
1167  */
1168
1169 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170                                                  u32 port, u32 tx, u32 rx)
1171 {
1172         u64 val64;
1173         enum vxge_hw_status status = VXGE_HW_OK;
1174
1175         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1176                 status = VXGE_HW_ERR_INVALID_DEVICE;
1177                 goto exit;
1178         }
1179
1180         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1181                 status = VXGE_HW_ERR_INVALID_PORT;
1182                 goto exit;
1183         }
1184
1185         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1186                         hldev->func_id);
1187         if (status != VXGE_HW_OK)
1188                 goto exit;
1189
1190         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1191         if (tx)
1192                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1193         else
1194                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1195         if (rx)
1196                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1197         else
1198                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1199
1200         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1201 exit:
1202         return status;
1203 }
1204
1205 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1206 {
1207         int link_width, exp_cap;
1208         u16 lnk;
1209
1210         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1211         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1212         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1213         return link_width;
1214 }
1215
1216 /*
1217  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1218  * This function returns the index of memory block
1219  */
1220 static inline u32
1221 __vxge_hw_ring_block_memblock_idx(u8 *block)
1222 {
1223         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1224 }
1225
1226 /*
1227  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1228  * This function sets index to a memory block
1229  */
1230 static inline void
1231 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1232 {
1233         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1234 }
1235
1236 /*
1237  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1238  * in RxD block
1239  * Sets the next block pointer in RxD block
1240  */
1241 static inline void
1242 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1243 {
1244         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1245 }
1246
1247 /*
1248  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1249  *             first block
1250  * Returns the dma address of the first RxD block
1251  */
1252 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1253 {
1254         struct vxge_hw_mempool_dma *dma_object;
1255
1256         dma_object = ring->mempool->memblocks_dma_arr;
1257         vxge_assert(dma_object != NULL);
1258
1259         return dma_object->addr;
1260 }
1261
1262 /*
1263  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1264  * This function returns the dma address of a given item
1265  */
1266 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1267                                                void *item)
1268 {
1269         u32 memblock_idx;
1270         void *memblock;
1271         struct vxge_hw_mempool_dma *memblock_dma_object;
1272         ptrdiff_t dma_item_offset;
1273
1274         /* get owner memblock index */
1275         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1276
1277         /* get owner memblock by memblock index */
1278         memblock = mempoolh->memblocks_arr[memblock_idx];
1279
1280         /* get memblock DMA object by memblock index */
1281         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1282
1283         /* calculate offset in the memblock of this item */
1284         dma_item_offset = (u8 *)item - (u8 *)memblock;
1285
1286         return memblock_dma_object->addr + dma_item_offset;
1287 }
1288
1289 /*
1290  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1291  * This function returns the dma address of a given item
1292  */
1293 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1294                                          struct __vxge_hw_ring *ring, u32 from,
1295                                          u32 to)
1296 {
1297         u8 *to_item , *from_item;
1298         dma_addr_t to_dma;
1299
1300         /* get "from" RxD block */
1301         from_item = mempoolh->items_arr[from];
1302         vxge_assert(from_item);
1303
1304         /* get "to" RxD block */
1305         to_item = mempoolh->items_arr[to];
1306         vxge_assert(to_item);
1307
1308         /* return address of the beginning of previous RxD block */
1309         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1310
1311         /* set next pointer for this RxD block to point on
1312          * previous item's DMA start address */
1313         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1314 }
1315
1316 /*
1317  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1318  * block callback
1319  * This function is callback passed to __vxge_hw_mempool_create to create memory
1320  * pool for RxD block
1321  */
1322 static void
1323 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1324                                   u32 memblock_index,
1325                                   struct vxge_hw_mempool_dma *dma_object,
1326                                   u32 index, u32 is_last)
1327 {
1328         u32 i;
1329         void *item = mempoolh->items_arr[index];
1330         struct __vxge_hw_ring *ring =
1331                 (struct __vxge_hw_ring *)mempoolh->userdata;
1332
1333         /* format rxds array */
1334         for (i = 0; i < ring->rxds_per_block; i++) {
1335                 void *rxdblock_priv;
1336                 void *uld_priv;
1337                 struct vxge_hw_ring_rxd_1 *rxdp;
1338
1339                 u32 reserve_index = ring->channel.reserve_ptr -
1340                                 (index * ring->rxds_per_block + i + 1);
1341                 u32 memblock_item_idx;
1342
1343                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1344                                                 i * ring->rxd_size;
1345
1346                 /* Note: memblock_item_idx is index of the item within
1347                  *       the memblock. For instance, in case of three RxD-blocks
1348                  *       per memblock this value can be 0, 1 or 2. */
1349                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1350                                         memblock_index, item,
1351                                         &memblock_item_idx);
1352
1353                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1354                                 ring->channel.reserve_arr[reserve_index];
1355
1356                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1357
1358                 /* pre-format Host_Control */
1359                 rxdp->host_control = (u64)(size_t)uld_priv;
1360         }
1361
1362         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1363
1364         if (is_last) {
1365                 /* link last one with first one */
1366                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1367         }
1368
1369         if (index > 0) {
1370                 /* link this RxD block with previous one */
1371                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1372         }
1373 }
1374
1375 /*
1376  * __vxge_hw_ring_replenish - Initial replenish of RxDs
1377  * This function replenishes the RxDs from reserve array to work array
1378  */
1379 enum vxge_hw_status
1380 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1381 {
1382         void *rxd;
1383         struct __vxge_hw_channel *channel;
1384         enum vxge_hw_status status = VXGE_HW_OK;
1385
1386         channel = &ring->channel;
1387
1388         while (vxge_hw_channel_dtr_count(channel) > 0) {
1389
1390                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1391
1392                 vxge_assert(status == VXGE_HW_OK);
1393
1394                 if (ring->rxd_init) {
1395                         status = ring->rxd_init(rxd, channel->userdata);
1396                         if (status != VXGE_HW_OK) {
1397                                 vxge_hw_ring_rxd_free(ring, rxd);
1398                                 goto exit;
1399                         }
1400                 }
1401
1402                 vxge_hw_ring_rxd_post(ring, rxd);
1403         }
1404         status = VXGE_HW_OK;
1405 exit:
1406         return status;
1407 }
1408
1409 /*
1410  * __vxge_hw_ring_create - Create a Ring
1411  * This function creates Ring and initializes it.
1412  *
1413  */
1414 static enum vxge_hw_status
1415 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1416                       struct vxge_hw_ring_attr *attr)
1417 {
1418         enum vxge_hw_status status = VXGE_HW_OK;
1419         struct __vxge_hw_ring *ring;
1420         u32 ring_length;
1421         struct vxge_hw_ring_config *config;
1422         struct __vxge_hw_device *hldev;
1423         u32 vp_id;
1424         struct vxge_hw_mempool_cbs ring_mp_callback;
1425
1426         if ((vp == NULL) || (attr == NULL)) {
1427                 status = VXGE_HW_FAIL;
1428                 goto exit;
1429         }
1430
1431         hldev = vp->vpath->hldev;
1432         vp_id = vp->vpath->vp_id;
1433
1434         config = &hldev->config.vp_config[vp_id].ring;
1435
1436         ring_length = config->ring_blocks *
1437                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1438
1439         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1440                                                 VXGE_HW_CHANNEL_TYPE_RING,
1441                                                 ring_length,
1442                                                 attr->per_rxd_space,
1443                                                 attr->userdata);
1444
1445         if (ring == NULL) {
1446                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1447                 goto exit;
1448         }
1449
1450         vp->vpath->ringh = ring;
1451         ring->vp_id = vp_id;
1452         ring->vp_reg = vp->vpath->vp_reg;
1453         ring->common_reg = hldev->common_reg;
1454         ring->stats = &vp->vpath->sw_stats->ring_stats;
1455         ring->config = config;
1456         ring->callback = attr->callback;
1457         ring->rxd_init = attr->rxd_init;
1458         ring->rxd_term = attr->rxd_term;
1459         ring->buffer_mode = config->buffer_mode;
1460         ring->rxds_limit = config->rxds_limit;
1461
1462         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1463         ring->rxd_priv_size =
1464                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1465         ring->per_rxd_space = attr->per_rxd_space;
1466
1467         ring->rxd_priv_size =
1468                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1469                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1470
1471         /* how many RxDs can fit into one block. Depends on configured
1472          * buffer_mode. */
1473         ring->rxds_per_block =
1474                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1475
1476         /* calculate actual RxD block private size */
1477         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1478         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1479         ring->mempool = __vxge_hw_mempool_create(hldev,
1480                                 VXGE_HW_BLOCK_SIZE,
1481                                 VXGE_HW_BLOCK_SIZE,
1482                                 ring->rxdblock_priv_size,
1483                                 ring->config->ring_blocks,
1484                                 ring->config->ring_blocks,
1485                                 &ring_mp_callback,
1486                                 ring);
1487
1488         if (ring->mempool == NULL) {
1489                 __vxge_hw_ring_delete(vp);
1490                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1491         }
1492
1493         status = __vxge_hw_channel_initialize(&ring->channel);
1494         if (status != VXGE_HW_OK) {
1495                 __vxge_hw_ring_delete(vp);
1496                 goto exit;
1497         }
1498
1499         /* Note:
1500          * Specifying rxd_init callback means two things:
1501          * 1) rxds need to be initialized by driver at channel-open time;
1502          * 2) rxds need to be posted at channel-open time
1503          *    (that's what the initial_replenish() below does)
1504          * Currently we don't have a case when the 1) is done without the 2).
1505          */
1506         if (ring->rxd_init) {
1507                 status = vxge_hw_ring_replenish(ring);
1508                 if (status != VXGE_HW_OK) {
1509                         __vxge_hw_ring_delete(vp);
1510                         goto exit;
1511                 }
1512         }
1513
1514         /* initial replenish will increment the counter in its post() routine,
1515          * we have to reset it */
1516         ring->stats->common_stats.usage_cnt = 0;
1517 exit:
1518         return status;
1519 }
1520
1521 /*
1522  * __vxge_hw_ring_abort - Returns the RxD
1523  * This function terminates the RxDs of ring
1524  */
1525 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1526 {
1527         void *rxdh;
1528         struct __vxge_hw_channel *channel;
1529
1530         channel = &ring->channel;
1531
1532         for (;;) {
1533                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1534
1535                 if (rxdh == NULL)
1536                         break;
1537
1538                 vxge_hw_channel_dtr_complete(channel);
1539
1540                 if (ring->rxd_term)
1541                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1542                                 channel->userdata);
1543
1544                 vxge_hw_channel_dtr_free(channel, rxdh);
1545         }
1546
1547         return VXGE_HW_OK;
1548 }
1549
1550 /*
1551  * __vxge_hw_ring_reset - Resets the ring
1552  * This function resets the ring during vpath reset operation
1553  */
1554 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1555 {
1556         enum vxge_hw_status status = VXGE_HW_OK;
1557         struct __vxge_hw_channel *channel;
1558
1559         channel = &ring->channel;
1560
1561         __vxge_hw_ring_abort(ring);
1562
1563         status = __vxge_hw_channel_reset(channel);
1564
1565         if (status != VXGE_HW_OK)
1566                 goto exit;
1567
1568         if (ring->rxd_init) {
1569                 status = vxge_hw_ring_replenish(ring);
1570                 if (status != VXGE_HW_OK)
1571                         goto exit;
1572         }
1573 exit:
1574         return status;
1575 }
1576
1577 /*
1578  * __vxge_hw_ring_delete - Removes the ring
1579  * This function freeup the memory pool and removes the ring
1580  */
1581 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1582 {
1583         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1584
1585         __vxge_hw_ring_abort(ring);
1586
1587         if (ring->mempool)
1588                 __vxge_hw_mempool_destroy(ring->mempool);
1589
1590         vp->vpath->ringh = NULL;
1591         __vxge_hw_channel_free(&ring->channel);
1592
1593         return VXGE_HW_OK;
1594 }
1595
1596 /*
1597  * __vxge_hw_mempool_grow
1598  * Will resize mempool up to %num_allocate value.
1599  */
1600 static enum vxge_hw_status
1601 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1602                        u32 *num_allocated)
1603 {
1604         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1605         u32 n_items = mempool->items_per_memblock;
1606         u32 start_block_idx = mempool->memblocks_allocated;
1607         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1608         enum vxge_hw_status status = VXGE_HW_OK;
1609
1610         *num_allocated = 0;
1611
1612         if (end_block_idx > mempool->memblocks_max) {
1613                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1614                 goto exit;
1615         }
1616
1617         for (i = start_block_idx; i < end_block_idx; i++) {
1618                 u32 j;
1619                 u32 is_last = ((end_block_idx - 1) == i);
1620                 struct vxge_hw_mempool_dma *dma_object =
1621                         mempool->memblocks_dma_arr + i;
1622                 void *the_memblock;
1623
1624                 /* allocate memblock's private part. Each DMA memblock
1625                  * has a space allocated for item's private usage upon
1626                  * mempool's user request. Each time mempool grows, it will
1627                  * allocate new memblock and its private part at once.
1628                  * This helps to minimize memory usage a lot. */
1629                 mempool->memblocks_priv_arr[i] =
1630                                 vmalloc(mempool->items_priv_size * n_items);
1631                 if (mempool->memblocks_priv_arr[i] == NULL) {
1632                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1633                         goto exit;
1634                 }
1635
1636                 memset(mempool->memblocks_priv_arr[i], 0,
1637                              mempool->items_priv_size * n_items);
1638
1639                 /* allocate DMA-capable memblock */
1640                 mempool->memblocks_arr[i] =
1641                         __vxge_hw_blockpool_malloc(mempool->devh,
1642                                 mempool->memblock_size, dma_object);
1643                 if (mempool->memblocks_arr[i] == NULL) {
1644                         vfree(mempool->memblocks_priv_arr[i]);
1645                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1646                         goto exit;
1647                 }
1648
1649                 (*num_allocated)++;
1650                 mempool->memblocks_allocated++;
1651
1652                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1653
1654                 the_memblock = mempool->memblocks_arr[i];
1655
1656                 /* fill the items hash array */
1657                 for (j = 0; j < n_items; j++) {
1658                         u32 index = i * n_items + j;
1659
1660                         if (first_time && index >= mempool->items_initial)
1661                                 break;
1662
1663                         mempool->items_arr[index] =
1664                                 ((char *)the_memblock + j*mempool->item_size);
1665
1666                         /* let caller to do more job on each item */
1667                         if (mempool->item_func_alloc != NULL)
1668                                 mempool->item_func_alloc(mempool, i,
1669                                         dma_object, index, is_last);
1670
1671                         mempool->items_current = index + 1;
1672                 }
1673
1674                 if (first_time && mempool->items_current ==
1675                                         mempool->items_initial)
1676                         break;
1677         }
1678 exit:
1679         return status;
1680 }
1681
1682 /*
1683  * vxge_hw_mempool_create
1684  * This function will create memory pool object. Pool may grow but will
1685  * never shrink. Pool consists of number of dynamically allocated blocks
1686  * with size enough to hold %items_initial number of items. Memory is
1687  * DMA-able but client must map/unmap before interoperating with the device.
1688  */
1689 static struct vxge_hw_mempool*
1690 __vxge_hw_mempool_create(
1691         struct __vxge_hw_device *devh,
1692         u32 memblock_size,
1693         u32 item_size,
1694         u32 items_priv_size,
1695         u32 items_initial,
1696         u32 items_max,
1697         struct vxge_hw_mempool_cbs *mp_callback,
1698         void *userdata)
1699 {
1700         enum vxge_hw_status status = VXGE_HW_OK;
1701         u32 memblocks_to_allocate;
1702         struct vxge_hw_mempool *mempool = NULL;
1703         u32 allocated;
1704
1705         if (memblock_size < item_size) {
1706                 status = VXGE_HW_FAIL;
1707                 goto exit;
1708         }
1709
1710         mempool = (struct vxge_hw_mempool *)
1711                         vmalloc(sizeof(struct vxge_hw_mempool));
1712         if (mempool == NULL) {
1713                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1714                 goto exit;
1715         }
1716         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1717
1718         mempool->devh                   = devh;
1719         mempool->memblock_size          = memblock_size;
1720         mempool->items_max              = items_max;
1721         mempool->items_initial          = items_initial;
1722         mempool->item_size              = item_size;
1723         mempool->items_priv_size        = items_priv_size;
1724         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1725         mempool->userdata               = userdata;
1726
1727         mempool->memblocks_allocated = 0;
1728
1729         mempool->items_per_memblock = memblock_size / item_size;
1730
1731         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1732                                         mempool->items_per_memblock;
1733
1734         /* allocate array of memblocks */
1735         mempool->memblocks_arr =
1736                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1737         if (mempool->memblocks_arr == NULL) {
1738                 __vxge_hw_mempool_destroy(mempool);
1739                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1740                 mempool = NULL;
1741                 goto exit;
1742         }
1743         memset(mempool->memblocks_arr, 0,
1744                 sizeof(void *) * mempool->memblocks_max);
1745
1746         /* allocate array of private parts of items per memblocks */
1747         mempool->memblocks_priv_arr =
1748                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1749         if (mempool->memblocks_priv_arr == NULL) {
1750                 __vxge_hw_mempool_destroy(mempool);
1751                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1752                 mempool = NULL;
1753                 goto exit;
1754         }
1755         memset(mempool->memblocks_priv_arr, 0,
1756                     sizeof(void *) * mempool->memblocks_max);
1757
1758         /* allocate array of memblocks DMA objects */
1759         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1760                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1761                         mempool->memblocks_max);
1762
1763         if (mempool->memblocks_dma_arr == NULL) {
1764                 __vxge_hw_mempool_destroy(mempool);
1765                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1766                 mempool = NULL;
1767                 goto exit;
1768         }
1769         memset(mempool->memblocks_dma_arr, 0,
1770                         sizeof(struct vxge_hw_mempool_dma) *
1771                         mempool->memblocks_max);
1772
1773         /* allocate hash array of items */
1774         mempool->items_arr =
1775                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1776         if (mempool->items_arr == NULL) {
1777                 __vxge_hw_mempool_destroy(mempool);
1778                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1779                 mempool = NULL;
1780                 goto exit;
1781         }
1782         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1783
1784         /* calculate initial number of memblocks */
1785         memblocks_to_allocate = (mempool->items_initial +
1786                                  mempool->items_per_memblock - 1) /
1787                                                 mempool->items_per_memblock;
1788
1789         /* pre-allocate the mempool */
1790         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1791                                         &allocated);
1792         if (status != VXGE_HW_OK) {
1793                 __vxge_hw_mempool_destroy(mempool);
1794                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1795                 mempool = NULL;
1796                 goto exit;
1797         }
1798
1799 exit:
1800         return mempool;
1801 }
1802
1803 /*
1804  * vxge_hw_mempool_destroy
1805  */
1806 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1807 {
1808         u32 i, j;
1809         struct __vxge_hw_device *devh = mempool->devh;
1810
1811         for (i = 0; i < mempool->memblocks_allocated; i++) {
1812                 struct vxge_hw_mempool_dma *dma_object;
1813
1814                 vxge_assert(mempool->memblocks_arr[i]);
1815                 vxge_assert(mempool->memblocks_dma_arr + i);
1816
1817                 dma_object = mempool->memblocks_dma_arr + i;
1818
1819                 for (j = 0; j < mempool->items_per_memblock; j++) {
1820                         u32 index = i * mempool->items_per_memblock + j;
1821
1822                         /* to skip last partially filled(if any) memblock */
1823                         if (index >= mempool->items_current)
1824                                 break;
1825                 }
1826
1827                 vfree(mempool->memblocks_priv_arr[i]);
1828
1829                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1830                                 mempool->memblock_size, dma_object);
1831         }
1832
1833         vfree(mempool->items_arr);
1834
1835         vfree(mempool->memblocks_dma_arr);
1836
1837         vfree(mempool->memblocks_priv_arr);
1838
1839         vfree(mempool->memblocks_arr);
1840
1841         vfree(mempool);
1842 }
1843
1844 /*
1845  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1846  * Check the fifo configuration
1847  */
1848 enum vxge_hw_status
1849 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1850 {
1851         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1852              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1853                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1854
1855         return VXGE_HW_OK;
1856 }
1857
1858 /*
1859  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1860  * Check the vpath configuration
1861  */
1862 static enum vxge_hw_status
1863 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1864 {
1865         enum vxge_hw_status status;
1866
1867         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1868                 (vp_config->min_bandwidth >
1869                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1870                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1871
1872         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1873         if (status != VXGE_HW_OK)
1874                 return status;
1875
1876         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1877                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1878                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1879                 return VXGE_HW_BADCFG_VPATH_MTU;
1880
1881         if ((vp_config->rpa_strip_vlan_tag !=
1882                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1883                 (vp_config->rpa_strip_vlan_tag !=
1884                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1885                 (vp_config->rpa_strip_vlan_tag !=
1886                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1887                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1888
1889         return VXGE_HW_OK;
1890 }
1891
1892 /*
1893  * __vxge_hw_device_config_check - Check device configuration.
1894  * Check the device configuration
1895  */
1896 enum vxge_hw_status
1897 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1898 {
1899         u32 i;
1900         enum vxge_hw_status status;
1901
1902         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1903            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1904            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1905            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1906                 return VXGE_HW_BADCFG_INTR_MODE;
1907
1908         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1909            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1910                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1911
1912         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1913                 status = __vxge_hw_device_vpath_config_check(
1914                                 &new_config->vp_config[i]);
1915                 if (status != VXGE_HW_OK)
1916                         return status;
1917         }
1918
1919         return VXGE_HW_OK;
1920 }
1921
1922 /*
1923  * vxge_hw_device_config_default_get - Initialize device config with defaults.
1924  * Initialize Titan device config with default values.
1925  */
1926 enum vxge_hw_status __devinit
1927 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1928 {
1929         u32 i;
1930
1931         device_config->dma_blockpool_initial =
1932                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1933         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1934         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1935         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1936         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1937         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1938         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1939
1940         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1941
1942                 device_config->vp_config[i].vp_id = i;
1943
1944                 device_config->vp_config[i].min_bandwidth =
1945                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1946
1947                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1948
1949                 device_config->vp_config[i].ring.ring_blocks =
1950                                 VXGE_HW_DEF_RING_BLOCKS;
1951
1952                 device_config->vp_config[i].ring.buffer_mode =
1953                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1954
1955                 device_config->vp_config[i].ring.scatter_mode =
1956                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1957
1958                 device_config->vp_config[i].ring.rxds_limit =
1959                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
1960
1961                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1962
1963                 device_config->vp_config[i].fifo.fifo_blocks =
1964                                 VXGE_HW_MIN_FIFO_BLOCKS;
1965
1966                 device_config->vp_config[i].fifo.max_frags =
1967                                 VXGE_HW_MAX_FIFO_FRAGS;
1968
1969                 device_config->vp_config[i].fifo.memblock_size =
1970                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1971
1972                 device_config->vp_config[i].fifo.alignment_size =
1973                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1974
1975                 device_config->vp_config[i].fifo.intr =
1976                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1977
1978                 device_config->vp_config[i].fifo.no_snoop_bits =
1979                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1980                 device_config->vp_config[i].tti.intr_enable =
1981                                 VXGE_HW_TIM_INTR_DEFAULT;
1982
1983                 device_config->vp_config[i].tti.btimer_val =
1984                                 VXGE_HW_USE_FLASH_DEFAULT;
1985
1986                 device_config->vp_config[i].tti.timer_ac_en =
1987                                 VXGE_HW_USE_FLASH_DEFAULT;
1988
1989                 device_config->vp_config[i].tti.timer_ci_en =
1990                                 VXGE_HW_USE_FLASH_DEFAULT;
1991
1992                 device_config->vp_config[i].tti.timer_ri_en =
1993                                 VXGE_HW_USE_FLASH_DEFAULT;
1994
1995                 device_config->vp_config[i].tti.rtimer_val =
1996                                 VXGE_HW_USE_FLASH_DEFAULT;
1997
1998                 device_config->vp_config[i].tti.util_sel =
1999                                 VXGE_HW_USE_FLASH_DEFAULT;
2000
2001                 device_config->vp_config[i].tti.ltimer_val =
2002                                 VXGE_HW_USE_FLASH_DEFAULT;
2003
2004                 device_config->vp_config[i].tti.urange_a =
2005                                 VXGE_HW_USE_FLASH_DEFAULT;
2006
2007                 device_config->vp_config[i].tti.uec_a =
2008                                 VXGE_HW_USE_FLASH_DEFAULT;
2009
2010                 device_config->vp_config[i].tti.urange_b =
2011                                 VXGE_HW_USE_FLASH_DEFAULT;
2012
2013                 device_config->vp_config[i].tti.uec_b =
2014                                 VXGE_HW_USE_FLASH_DEFAULT;
2015
2016                 device_config->vp_config[i].tti.urange_c =
2017                                 VXGE_HW_USE_FLASH_DEFAULT;
2018
2019                 device_config->vp_config[i].tti.uec_c =
2020                                 VXGE_HW_USE_FLASH_DEFAULT;
2021
2022                 device_config->vp_config[i].tti.uec_d =
2023                                 VXGE_HW_USE_FLASH_DEFAULT;
2024
2025                 device_config->vp_config[i].rti.intr_enable =
2026                                 VXGE_HW_TIM_INTR_DEFAULT;
2027
2028                 device_config->vp_config[i].rti.btimer_val =
2029                                 VXGE_HW_USE_FLASH_DEFAULT;
2030
2031                 device_config->vp_config[i].rti.timer_ac_en =
2032                                 VXGE_HW_USE_FLASH_DEFAULT;
2033
2034                 device_config->vp_config[i].rti.timer_ci_en =
2035                                 VXGE_HW_USE_FLASH_DEFAULT;
2036
2037                 device_config->vp_config[i].rti.timer_ri_en =
2038                                 VXGE_HW_USE_FLASH_DEFAULT;
2039
2040                 device_config->vp_config[i].rti.rtimer_val =
2041                                 VXGE_HW_USE_FLASH_DEFAULT;
2042
2043                 device_config->vp_config[i].rti.util_sel =
2044                                 VXGE_HW_USE_FLASH_DEFAULT;
2045
2046                 device_config->vp_config[i].rti.ltimer_val =
2047                                 VXGE_HW_USE_FLASH_DEFAULT;
2048
2049                 device_config->vp_config[i].rti.urange_a =
2050                                 VXGE_HW_USE_FLASH_DEFAULT;
2051
2052                 device_config->vp_config[i].rti.uec_a =
2053                                 VXGE_HW_USE_FLASH_DEFAULT;
2054
2055                 device_config->vp_config[i].rti.urange_b =
2056                                 VXGE_HW_USE_FLASH_DEFAULT;
2057
2058                 device_config->vp_config[i].rti.uec_b =
2059                                 VXGE_HW_USE_FLASH_DEFAULT;
2060
2061                 device_config->vp_config[i].rti.urange_c =
2062                                 VXGE_HW_USE_FLASH_DEFAULT;
2063
2064                 device_config->vp_config[i].rti.uec_c =
2065                                 VXGE_HW_USE_FLASH_DEFAULT;
2066
2067                 device_config->vp_config[i].rti.uec_d =
2068                                 VXGE_HW_USE_FLASH_DEFAULT;
2069
2070                 device_config->vp_config[i].mtu =
2071                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2072
2073                 device_config->vp_config[i].rpa_strip_vlan_tag =
2074                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2075         }
2076
2077         return VXGE_HW_OK;
2078 }
2079
2080 /*
2081  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2082  * Set the swapper bits appropriately for the lagacy section.
2083  */
2084 static enum vxge_hw_status
2085 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2086 {
2087         u64 val64;
2088         enum vxge_hw_status status = VXGE_HW_OK;
2089
2090         val64 = readq(&legacy_reg->toc_swapper_fb);
2091
2092         wmb();
2093
2094         switch (val64) {
2095
2096         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2097                 return status;
2098
2099         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2100                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2101                         &legacy_reg->pifm_rd_swap_en);
2102                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2103                         &legacy_reg->pifm_rd_flip_en);
2104                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2105                         &legacy_reg->pifm_wr_swap_en);
2106                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2107                         &legacy_reg->pifm_wr_flip_en);
2108                 break;
2109
2110         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2111                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2112                         &legacy_reg->pifm_rd_swap_en);
2113                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2114                         &legacy_reg->pifm_wr_swap_en);
2115                 break;
2116
2117         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2118                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2119                         &legacy_reg->pifm_rd_flip_en);
2120                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2121                         &legacy_reg->pifm_wr_flip_en);
2122                 break;
2123         }
2124
2125         wmb();
2126
2127         val64 = readq(&legacy_reg->toc_swapper_fb);
2128
2129         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2130                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2131
2132         return status;
2133 }
2134
2135 /*
2136  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2137  * Set the swapper bits appropriately for the vpath.
2138  */
2139 static enum vxge_hw_status
2140 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2141 {
2142 #ifndef __BIG_ENDIAN
2143         u64 val64;
2144
2145         val64 = readq(&vpath_reg->vpath_general_cfg1);
2146         wmb();
2147         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2148         writeq(val64, &vpath_reg->vpath_general_cfg1);
2149         wmb();
2150 #endif
2151         return VXGE_HW_OK;
2152 }
2153
2154 /*
2155  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2156  * Set the swapper bits appropriately for the vpath.
2157  */
2158 static enum vxge_hw_status
2159 __vxge_hw_kdfc_swapper_set(
2160         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2161         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2162 {
2163         u64 val64;
2164
2165         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2166
2167         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2168                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2169                 wmb();
2170
2171                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2172                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2173                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2174
2175                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2176                 wmb();
2177         }
2178
2179         return VXGE_HW_OK;
2180 }
2181
2182 /*
2183  * vxge_hw_mgmt_reg_read - Read Titan register.
2184  */
2185 enum vxge_hw_status
2186 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2187                       enum vxge_hw_mgmt_reg_type type,
2188                       u32 index, u32 offset, u64 *value)
2189 {
2190         enum vxge_hw_status status = VXGE_HW_OK;
2191
2192         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2193                 status = VXGE_HW_ERR_INVALID_DEVICE;
2194                 goto exit;
2195         }
2196
2197         switch (type) {
2198         case vxge_hw_mgmt_reg_type_legacy:
2199                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2200                         status = VXGE_HW_ERR_INVALID_OFFSET;
2201                         break;
2202                 }
2203                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2204                 break;
2205         case vxge_hw_mgmt_reg_type_toc:
2206                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2207                         status = VXGE_HW_ERR_INVALID_OFFSET;
2208                         break;
2209                 }
2210                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2211                 break;
2212         case vxge_hw_mgmt_reg_type_common:
2213                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2214                         status = VXGE_HW_ERR_INVALID_OFFSET;
2215                         break;
2216                 }
2217                 *value = readq((void __iomem *)hldev->common_reg + offset);
2218                 break;
2219         case vxge_hw_mgmt_reg_type_mrpcim:
2220                 if (!(hldev->access_rights &
2221                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2222                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2223                         break;
2224                 }
2225                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2226                         status = VXGE_HW_ERR_INVALID_OFFSET;
2227                         break;
2228                 }
2229                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2230                 break;
2231         case vxge_hw_mgmt_reg_type_srpcim:
2232                 if (!(hldev->access_rights &
2233                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2234                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2235                         break;
2236                 }
2237                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2238                         status = VXGE_HW_ERR_INVALID_INDEX;
2239                         break;
2240                 }
2241                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2242                         status = VXGE_HW_ERR_INVALID_OFFSET;
2243                         break;
2244                 }
2245                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2246                                 offset);
2247                 break;
2248         case vxge_hw_mgmt_reg_type_vpmgmt:
2249                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2250                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2251                         status = VXGE_HW_ERR_INVALID_INDEX;
2252                         break;
2253                 }
2254                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2255                         status = VXGE_HW_ERR_INVALID_OFFSET;
2256                         break;
2257                 }
2258                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2259                                 offset);
2260                 break;
2261         case vxge_hw_mgmt_reg_type_vpath:
2262                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2263                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2264                         status = VXGE_HW_ERR_INVALID_INDEX;
2265                         break;
2266                 }
2267                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2268                         status = VXGE_HW_ERR_INVALID_INDEX;
2269                         break;
2270                 }
2271                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2272                         status = VXGE_HW_ERR_INVALID_OFFSET;
2273                         break;
2274                 }
2275                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2276                                 offset);
2277                 break;
2278         default:
2279                 status = VXGE_HW_ERR_INVALID_TYPE;
2280                 break;
2281         }
2282
2283 exit:
2284         return status;
2285 }
2286
2287 /*
2288  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2289  */
2290 enum vxge_hw_status
2291 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2292 {
2293         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2294         enum vxge_hw_status status = VXGE_HW_OK;
2295         int i = 0, j = 0;
2296
2297         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2298                 if (!((vpath_mask) & vxge_mBIT(i)))
2299                         continue;
2300                 vpmgmt_reg = hldev->vpmgmt_reg[i];
2301                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2302                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2303                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2304                                 return VXGE_HW_FAIL;
2305                 }
2306         }
2307         return status;
2308 }
2309 /*
2310  * vxge_hw_mgmt_reg_Write - Write Titan register.
2311  */
2312 enum vxge_hw_status
2313 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2314                       enum vxge_hw_mgmt_reg_type type,
2315                       u32 index, u32 offset, u64 value)
2316 {
2317         enum vxge_hw_status status = VXGE_HW_OK;
2318
2319         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2320                 status = VXGE_HW_ERR_INVALID_DEVICE;
2321                 goto exit;
2322         }
2323
2324         switch (type) {
2325         case vxge_hw_mgmt_reg_type_legacy:
2326                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2327                         status = VXGE_HW_ERR_INVALID_OFFSET;
2328                         break;
2329                 }
2330                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2331                 break;
2332         case vxge_hw_mgmt_reg_type_toc:
2333                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2334                         status = VXGE_HW_ERR_INVALID_OFFSET;
2335                         break;
2336                 }
2337                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2338                 break;
2339         case vxge_hw_mgmt_reg_type_common:
2340                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2341                         status = VXGE_HW_ERR_INVALID_OFFSET;
2342                         break;
2343                 }
2344                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2345                 break;
2346         case vxge_hw_mgmt_reg_type_mrpcim:
2347                 if (!(hldev->access_rights &
2348                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2349                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2350                         break;
2351                 }
2352                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2353                         status = VXGE_HW_ERR_INVALID_OFFSET;
2354                         break;
2355                 }
2356                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2357                 break;
2358         case vxge_hw_mgmt_reg_type_srpcim:
2359                 if (!(hldev->access_rights &
2360                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2361                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2362                         break;
2363                 }
2364                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2365                         status = VXGE_HW_ERR_INVALID_INDEX;
2366                         break;
2367                 }
2368                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2369                         status = VXGE_HW_ERR_INVALID_OFFSET;
2370                         break;
2371                 }
2372                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2373                         offset);
2374
2375                 break;
2376         case vxge_hw_mgmt_reg_type_vpmgmt:
2377                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2378                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2379                         status = VXGE_HW_ERR_INVALID_INDEX;
2380                         break;
2381                 }
2382                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2383                         status = VXGE_HW_ERR_INVALID_OFFSET;
2384                         break;
2385                 }
2386                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2387                         offset);
2388                 break;
2389         case vxge_hw_mgmt_reg_type_vpath:
2390                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2391                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2392                         status = VXGE_HW_ERR_INVALID_INDEX;
2393                         break;
2394                 }
2395                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2396                         status = VXGE_HW_ERR_INVALID_OFFSET;
2397                         break;
2398                 }
2399                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2400                         offset);
2401                 break;
2402         default:
2403                 status = VXGE_HW_ERR_INVALID_TYPE;
2404                 break;
2405         }
2406 exit:
2407         return status;
2408 }
2409
2410 /*
2411  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2412  * list callback
2413  * This function is callback passed to __vxge_hw_mempool_create to create memory
2414  * pool for TxD list
2415  */
2416 static void
2417 __vxge_hw_fifo_mempool_item_alloc(
2418         struct vxge_hw_mempool *mempoolh,
2419         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2420         u32 index, u32 is_last)
2421 {
2422         u32 memblock_item_idx;
2423         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2424         struct vxge_hw_fifo_txd *txdp =
2425                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2426         struct __vxge_hw_fifo *fifo =
2427                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2428         void *memblock = mempoolh->memblocks_arr[memblock_index];
2429
2430         vxge_assert(txdp);
2431
2432         txdp->host_control = (u64) (size_t)
2433         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2434                                         &memblock_item_idx);
2435
2436         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2437
2438         vxge_assert(txdl_priv);
2439
2440         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2441
2442         /* pre-format HW's TxDL's private */
2443         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2444         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2445         txdl_priv->dma_handle = dma_object->handle;
2446         txdl_priv->memblock   = memblock;
2447         txdl_priv->first_txdp = txdp;
2448         txdl_priv->next_txdl_priv = NULL;
2449         txdl_priv->alloc_frags = 0;
2450 }
2451
2452 /*
2453  * __vxge_hw_fifo_create - Create a FIFO
2454  * This function creates FIFO and initializes it.
2455  */
2456 enum vxge_hw_status
2457 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458                       struct vxge_hw_fifo_attr *attr)
2459 {
2460         enum vxge_hw_status status = VXGE_HW_OK;
2461         struct __vxge_hw_fifo *fifo;
2462         struct vxge_hw_fifo_config *config;
2463         u32 txdl_size, txdl_per_memblock;
2464         struct vxge_hw_mempool_cbs fifo_mp_callback;
2465         struct __vxge_hw_virtualpath *vpath;
2466
2467         if ((vp == NULL) || (attr == NULL)) {
2468                 status = VXGE_HW_ERR_INVALID_HANDLE;
2469                 goto exit;
2470         }
2471         vpath = vp->vpath;
2472         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2473
2474         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2475
2476         txdl_per_memblock = config->memblock_size / txdl_size;
2477
2478         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2479                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2480                                         config->fifo_blocks * txdl_per_memblock,
2481                                         attr->per_txdl_space, attr->userdata);
2482
2483         if (fifo == NULL) {
2484                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2485                 goto exit;
2486         }
2487
2488         vpath->fifoh = fifo;
2489         fifo->nofl_db = vpath->nofl_db;
2490
2491         fifo->vp_id = vpath->vp_id;
2492         fifo->vp_reg = vpath->vp_reg;
2493         fifo->stats = &vpath->sw_stats->fifo_stats;
2494
2495         fifo->config = config;
2496
2497         /* apply "interrupts per txdl" attribute */
2498         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2499
2500         if (fifo->config->intr)
2501                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2502
2503         fifo->no_snoop_bits = config->no_snoop_bits;
2504
2505         /*
2506          * FIFO memory management strategy:
2507          *
2508          * TxDL split into three independent parts:
2509          *      - set of TxD's
2510          *      - TxD HW private part
2511          *      - driver private part
2512          *
2513          * Adaptative memory allocation used. i.e. Memory allocated on
2514          * demand with the size which will fit into one memory block.
2515          * One memory block may contain more than one TxDL.
2516          *
2517          * During "reserve" operations more memory can be allocated on demand
2518          * for example due to FIFO full condition.
2519          *
2520          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2521          * routine which will essentially stop the channel and free resources.
2522          */
2523
2524         /* TxDL common private size == TxDL private  +  driver private */
2525         fifo->priv_size =
2526                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2527         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2528                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2529
2530         fifo->per_txdl_space = attr->per_txdl_space;
2531
2532         /* recompute txdl size to be cacheline aligned */
2533         fifo->txdl_size = txdl_size;
2534         fifo->txdl_per_memblock = txdl_per_memblock;
2535
2536         fifo->txdl_term = attr->txdl_term;
2537         fifo->callback = attr->callback;
2538
2539         if (fifo->txdl_per_memblock == 0) {
2540                 __vxge_hw_fifo_delete(vp);
2541                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2542                 goto exit;
2543         }
2544
2545         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2546
2547         fifo->mempool =
2548                 __vxge_hw_mempool_create(vpath->hldev,
2549                         fifo->config->memblock_size,
2550                         fifo->txdl_size,
2551                         fifo->priv_size,
2552                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2553                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2554                         &fifo_mp_callback,
2555                         fifo);
2556
2557         if (fifo->mempool == NULL) {
2558                 __vxge_hw_fifo_delete(vp);
2559                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2560                 goto exit;
2561         }
2562
2563         status = __vxge_hw_channel_initialize(&fifo->channel);
2564         if (status != VXGE_HW_OK) {
2565                 __vxge_hw_fifo_delete(vp);
2566                 goto exit;
2567         }
2568
2569         vxge_assert(fifo->channel.reserve_ptr);
2570 exit:
2571         return status;
2572 }
2573
2574 /*
2575  * __vxge_hw_fifo_abort - Returns the TxD
2576  * This function terminates the TxDs of fifo
2577  */
2578 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2579 {
2580         void *txdlh;
2581
2582         for (;;) {
2583                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2584
2585                 if (txdlh == NULL)
2586                         break;
2587
2588                 vxge_hw_channel_dtr_complete(&fifo->channel);
2589
2590                 if (fifo->txdl_term) {
2591                         fifo->txdl_term(txdlh,
2592                         VXGE_HW_TXDL_STATE_POSTED,
2593                         fifo->channel.userdata);
2594                 }
2595
2596                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2597         }
2598
2599         return VXGE_HW_OK;
2600 }
2601
2602 /*
2603  * __vxge_hw_fifo_reset - Resets the fifo
2604  * This function resets the fifo during vpath reset operation
2605  */
2606 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2607 {
2608         enum vxge_hw_status status = VXGE_HW_OK;
2609
2610         __vxge_hw_fifo_abort(fifo);
2611         status = __vxge_hw_channel_reset(&fifo->channel);
2612
2613         return status;
2614 }
2615
2616 /*
2617  * __vxge_hw_fifo_delete - Removes the FIFO
2618  * This function freeup the memory pool and removes the FIFO
2619  */
2620 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621 {
2622         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623
2624         __vxge_hw_fifo_abort(fifo);
2625
2626         if (fifo->mempool)
2627                 __vxge_hw_mempool_destroy(fifo->mempool);
2628
2629         vp->vpath->fifoh = NULL;
2630
2631         __vxge_hw_channel_free(&fifo->channel);
2632
2633         return VXGE_HW_OK;
2634 }
2635
2636 /*
2637  * __vxge_hw_vpath_pci_read - Read the content of given address
2638  *                          in pci config space.
2639  * Read from the vpath pci config space.
2640  */
2641 static enum vxge_hw_status
2642 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2643                          u32 phy_func_0, u32 offset, u32 *val)
2644 {
2645         u64 val64;
2646         enum vxge_hw_status status = VXGE_HW_OK;
2647         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2648
2649         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2650
2651         if (phy_func_0)
2652                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2653
2654         writeq(val64, &vp_reg->pci_config_access_cfg1);
2655         wmb();
2656         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2657                         &vp_reg->pci_config_access_cfg2);
2658         wmb();
2659
2660         status = __vxge_hw_device_register_poll(
2661                         &vp_reg->pci_config_access_cfg2,
2662                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2663
2664         if (status != VXGE_HW_OK)
2665                 goto exit;
2666
2667         val64 = readq(&vp_reg->pci_config_access_status);
2668
2669         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2670                 status = VXGE_HW_FAIL;
2671                 *val = 0;
2672         } else
2673                 *val = (u32)vxge_bVALn(val64, 32, 32);
2674 exit:
2675         return status;
2676 }
2677
2678 /*
2679  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680  * Returns the function number of the vpath.
2681  */
2682 static u32
2683 __vxge_hw_vpath_func_id_get(u32 vp_id,
2684         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685 {
2686         u64 val64;
2687
2688         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690         return
2691          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692 }
2693
2694 /*
2695  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696  */
2697 static inline void
2698 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699                       u64 dta_struct_sel)
2700 {
2701         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702         wmb();
2703         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704         writeq(0, &vpath_reg->rts_access_steer_data1);
2705         wmb();
2706 }
2707
2708
2709 /*
2710  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711  * part number and product description.
2712  */
2713 static enum vxge_hw_status
2714 __vxge_hw_vpath_card_info_get(
2715         u32 vp_id,
2716         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717         struct vxge_hw_device_hw_info *hw_info)
2718 {
2719         u32 i, j;
2720         u64 val64;
2721         u64 data1 = 0ULL;
2722         u64 data2 = 0ULL;
2723         enum vxge_hw_status status = VXGE_HW_OK;
2724         u8 *serial_number = hw_info->serial_number;
2725         u8 *part_number = hw_info->part_number;
2726         u8 *product_desc = hw_info->product_desc;
2727
2728         __vxge_hw_read_rts_ds(vpath_reg,
2729                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738         status = __vxge_hw_pio_mem_write64(val64,
2739                                 &vpath_reg->rts_access_steer_ctrl,
2740                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743         if (status != VXGE_HW_OK)
2744                 return status;
2745
2746         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754                 status = VXGE_HW_OK;
2755         } else
2756                 *serial_number = 0;
2757
2758         __vxge_hw_read_rts_ds(vpath_reg,
2759                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768         status = __vxge_hw_pio_mem_write64(val64,
2769                                 &vpath_reg->rts_access_steer_ctrl,
2770                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773         if (status != VXGE_HW_OK)
2774                 return status;
2775
2776         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786                 status = VXGE_HW_OK;
2787
2788         } else
2789                 *part_number = 0;
2790
2791         j = 0;
2792
2793         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796                 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805                 status = __vxge_hw_pio_mem_write64(val64,
2806                                 &vpath_reg->rts_access_steer_ctrl,
2807                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810                 if (status != VXGE_HW_OK)
2811                         return status;
2812
2813                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2818                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2821                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823                         status = VXGE_HW_OK;
2824                 } else
2825                         *product_desc = 0;
2826         }
2827
2828         return status;
2829 }
2830
2831 /*
2832  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833  * Returns FW Version
2834  */
2835 static enum vxge_hw_status
2836 __vxge_hw_vpath_fw_ver_get(
2837         u32 vp_id,
2838         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839         struct vxge_hw_device_hw_info *hw_info)
2840 {
2841         u64 val64;
2842         u64 data1 = 0ULL;
2843         u64 data2 = 0ULL;
2844         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848         enum vxge_hw_status status = VXGE_HW_OK;
2849
2850         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857         status = __vxge_hw_pio_mem_write64(val64,
2858                                 &vpath_reg->rts_access_steer_ctrl,
2859                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862         if (status != VXGE_HW_OK)
2863                 goto exit;
2864
2865         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872                 fw_date->day =
2873                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874                                                 data1);
2875                 fw_date->month =
2876                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877                                                 data1);
2878                 fw_date->year =
2879                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880                                                 data1);
2881
2882                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883                         fw_date->month, fw_date->day, fw_date->year);
2884
2885                 fw_version->major =
2886                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887                 fw_version->minor =
2888                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889                 fw_version->build =
2890                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893                     fw_version->major, fw_version->minor, fw_version->build);
2894
2895                 flash_date->day =
2896                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897                 flash_date->month =
2898                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899                 flash_date->year =
2900                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903                         "%2.2d/%2.2d/%4.4d",
2904                         flash_date->month, flash_date->day, flash_date->year);
2905
2906                 flash_version->major =
2907                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908                 flash_version->minor =
2909                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910                 flash_version->build =
2911                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914                         flash_version->major, flash_version->minor,
2915                         flash_version->build);
2916
2917                 status = VXGE_HW_OK;
2918
2919         } else
2920                 status = VXGE_HW_FAIL;
2921 exit:
2922         return status;
2923 }
2924
2925 /*
2926  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927  * Returns pci function mode
2928  */
2929 static u64
2930 __vxge_hw_vpath_pci_func_mode_get(
2931         u32  vp_id,
2932         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933 {
2934         u64 val64;
2935         u64 data1 = 0ULL;
2936         enum vxge_hw_status status = VXGE_HW_OK;
2937
2938         __vxge_hw_read_rts_ds(vpath_reg,
2939                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948         status = __vxge_hw_pio_mem_write64(val64,
2949                                 &vpath_reg->rts_access_steer_ctrl,
2950                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953         if (status != VXGE_HW_OK)
2954                 goto exit;
2955
2956         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960                 status = VXGE_HW_OK;
2961         } else {
2962                 data1 = 0;
2963                 status = VXGE_HW_FAIL;
2964         }
2965 exit:
2966         return data1;
2967 }
2968
2969 /**
2970  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971  * @hldev: HW device.
2972  * @on_off: TRUE if flickering to be on, FALSE to be off
2973  *
2974  * Flicker the link LED.
2975  */
2976 enum vxge_hw_status
2977 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2978                                u64 on_off)
2979 {
2980         u64 val64;
2981         enum vxge_hw_status status = VXGE_HW_OK;
2982         struct vxge_hw_vpath_reg __iomem *vp_reg;
2983
2984         if (hldev == NULL) {
2985                 status = VXGE_HW_ERR_INVALID_DEVICE;
2986                 goto exit;
2987         }
2988
2989         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2990
2991         writeq(0, &vp_reg->rts_access_steer_ctrl);
2992         wmb();
2993         writeq(on_off, &vp_reg->rts_access_steer_data0);
2994         writeq(0, &vp_reg->rts_access_steer_data1);
2995         wmb();
2996
2997         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2998                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2999                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3000                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3001                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3002                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003
3004         status = __vxge_hw_pio_mem_write64(val64,
3005                                 &vp_reg->rts_access_steer_ctrl,
3006                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3007                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3008 exit:
3009         return status;
3010 }
3011
3012 /*
3013  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014  */
3015 enum vxge_hw_status
3016 __vxge_hw_vpath_rts_table_get(
3017         struct __vxge_hw_vpath_handle *vp,
3018         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3019 {
3020         u64 val64;
3021         struct __vxge_hw_virtualpath *vpath;
3022         struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024         enum vxge_hw_status status = VXGE_HW_OK;
3025
3026         if (vp == NULL) {
3027                 status = VXGE_HW_ERR_INVALID_HANDLE;
3028                 goto exit;
3029         }
3030
3031         vpath = vp->vpath;
3032         vp_reg = vpath->vp_reg;
3033
3034         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039         if ((rts_table ==
3040                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041             (rts_table ==
3042                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043             (rts_table ==
3044                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045             (rts_table ==
3046                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048         }
3049
3050         status = __vxge_hw_pio_mem_write64(val64,
3051                                 &vp_reg->rts_access_steer_ctrl,
3052                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053                                 vpath->hldev->config.device_poll_millis);
3054
3055         if (status != VXGE_HW_OK)
3056                 goto exit;
3057
3058         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3059
3060         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3061
3062                 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064                 if ((rts_table ==
3065                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066                 (rts_table ==
3067                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068                         *data2 = readq(&vp_reg->rts_access_steer_data1);
3069                 }
3070                 status = VXGE_HW_OK;
3071         } else
3072                 status = VXGE_HW_FAIL;
3073 exit:
3074         return status;
3075 }
3076
3077 /*
3078  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079  */
3080 enum vxge_hw_status
3081 __vxge_hw_vpath_rts_table_set(
3082         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3083         u32 offset, u64 data1, u64 data2)
3084 {
3085         u64 val64;
3086         struct __vxge_hw_virtualpath *vpath;
3087         enum vxge_hw_status status = VXGE_HW_OK;
3088         struct vxge_hw_vpath_reg __iomem *vp_reg;
3089
3090         if (vp == NULL) {
3091                 status = VXGE_HW_ERR_INVALID_HANDLE;
3092                 goto exit;
3093         }
3094
3095         vpath = vp->vpath;
3096         vp_reg = vpath->vp_reg;
3097
3098         writeq(data1, &vp_reg->rts_access_steer_data0);
3099         wmb();
3100
3101         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102             (rts_table ==
3103                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3104                 writeq(data2, &vp_reg->rts_access_steer_data1);
3105                 wmb();
3106         }
3107
3108         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3109                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3110                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113         status = __vxge_hw_pio_mem_write64(val64,
3114                                 &vp_reg->rts_access_steer_ctrl,
3115                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116                                 vpath->hldev->config.device_poll_millis);
3117
3118         if (status != VXGE_HW_OK)
3119                 goto exit;
3120
3121         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124                 status = VXGE_HW_OK;
3125         else
3126                 status = VXGE_HW_FAIL;
3127 exit:
3128         return status;
3129 }
3130
3131 /*
3132  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133  *               from MAC address table.
3134  */
3135 static enum vxge_hw_status
3136 __vxge_hw_vpath_addr_get(
3137         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139 {
3140         u32 i;
3141         u64 val64;
3142         u64 data1 = 0ULL;
3143         u64 data2 = 0ULL;
3144         enum vxge_hw_status status = VXGE_HW_OK;
3145
3146         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153         status = __vxge_hw_pio_mem_write64(val64,
3154                                 &vpath_reg->rts_access_steer_ctrl,
3155                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158         if (status != VXGE_HW_OK)
3159                 goto exit;
3160
3161         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164
3165                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3166                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3167
3168                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170                                                         data2);
3171
3172                 for (i = ETH_ALEN; i > 0; i--) {
3173                         macaddr[i-1] = (u8)(data1 & 0xFF);
3174                         data1 >>= 8;
3175
3176                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177                         data2 >>= 8;
3178                 }
3179                 status = VXGE_HW_OK;
3180         } else
3181                 status = VXGE_HW_FAIL;
3182 exit:
3183         return status;
3184 }
3185
3186 /*
3187  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3188  */
3189 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3190                         struct __vxge_hw_vpath_handle *vp,
3191                         enum vxge_hw_rth_algoritms algorithm,
3192                         struct vxge_hw_rth_hash_types *hash_type,
3193                         u16 bucket_size)
3194 {
3195         u64 data0, data1;
3196         enum vxge_hw_status status = VXGE_HW_OK;
3197
3198         if (vp == NULL) {
3199                 status = VXGE_HW_ERR_INVALID_HANDLE;
3200                 goto exit;
3201         }
3202
3203         status = __vxge_hw_vpath_rts_table_get(vp,
3204                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206                         0, &data0, &data1);
3207         if (status != VXGE_HW_OK)
3208                 goto exit;
3209
3210         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3211                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3212
3213         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3214         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3215         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3216
3217         if (hash_type->hash_type_tcpipv4_en)
3218                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3219
3220         if (hash_type->hash_type_ipv4_en)
3221                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3222
3223         if (hash_type->hash_type_tcpipv6_en)
3224                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3225
3226         if (hash_type->hash_type_ipv6_en)
3227                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3228
3229         if (hash_type->hash_type_tcpipv6ex_en)
3230                 data0 |=
3231                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3232
3233         if (hash_type->hash_type_ipv6ex_en)
3234                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3235
3236         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3237                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3238         else
3239                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3240
3241         status = __vxge_hw_vpath_rts_table_set(vp,
3242                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3243                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3244                 0, data0, 0);
3245 exit:
3246         return status;
3247 }
3248
3249 static void
3250 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3251                                 u16 flag, u8 *itable)
3252 {
3253         switch (flag) {
3254         case 1:
3255                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3256                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3257                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3258                         itable[j]);
3259         case 2:
3260                 *data0 |=
3261                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3262                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3263                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3264                         itable[j]);
3265         case 3:
3266                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3267                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3268                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3269                         itable[j]);
3270         case 4:
3271                 *data1 |=
3272                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3273                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3274                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3275                         itable[j]);
3276         default:
3277                 return;
3278         }
3279 }
3280 /*
3281  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3282  */
3283 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3284                         struct __vxge_hw_vpath_handle **vpath_handles,
3285                         u32 vpath_count,
3286                         u8 *mtable,
3287                         u8 *itable,
3288                         u32 itable_size)
3289 {
3290         u32 i, j, action, rts_table;
3291         u64 data0;
3292         u64 data1;
3293         u32 max_entries;
3294         enum vxge_hw_status status = VXGE_HW_OK;
3295         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3296
3297         if (vp == NULL) {
3298                 status = VXGE_HW_ERR_INVALID_HANDLE;
3299                 goto exit;
3300         }
3301
3302         max_entries = (((u32)1) << itable_size);
3303
3304         if (vp->vpath->hldev->config.rth_it_type
3305                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3306                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3307                 rts_table =
3308                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3309
3310                 for (j = 0; j < max_entries; j++) {
3311
3312                         data1 = 0;
3313
3314                         data0 =
3315                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3316                                 itable[j]);
3317
3318                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3319                                 action, rts_table, j, data0, data1);
3320
3321                         if (status != VXGE_HW_OK)
3322                                 goto exit;
3323                 }
3324
3325                 for (j = 0; j < max_entries; j++) {
3326
3327                         data1 = 0;
3328
3329                         data0 =
3330                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3331                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3332                                 itable[j]);
3333
3334                         status = __vxge_hw_vpath_rts_table_set(
3335                                 vpath_handles[mtable[itable[j]]], action,
3336                                 rts_table, j, data0, data1);
3337
3338                         if (status != VXGE_HW_OK)
3339                                 goto exit;
3340                 }
3341         } else {
3342                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3343                 rts_table =
3344                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3345                 for (i = 0; i < vpath_count; i++) {
3346
3347                         for (j = 0; j < max_entries;) {
3348
3349                                 data0 = 0;
3350                                 data1 = 0;
3351
3352                                 while (j < max_entries) {
3353                                         if (mtable[itable[j]] != i) {
3354                                                 j++;
3355                                                 continue;
3356                                         }
3357                                         vxge_hw_rts_rth_data0_data1_get(j,
3358                                                 &data0, &data1, 1, itable);
3359                                         j++;
3360                                         break;
3361                                 }
3362
3363                                 while (j < max_entries) {
3364                                         if (mtable[itable[j]] != i) {
3365                                                 j++;
3366                                                 continue;
3367                                         }
3368                                         vxge_hw_rts_rth_data0_data1_get(j,
3369                                                 &data0, &data1, 2, itable);
3370                                         j++;
3371                                         break;
3372                                 }
3373
3374                                 while (j < max_entries) {
3375                                         if (mtable[itable[j]] != i) {
3376                                                 j++;
3377                                                 continue;
3378                                         }
3379                                         vxge_hw_rts_rth_data0_data1_get(j,
3380                                                 &data0, &data1, 3, itable);
3381                                         j++;
3382                                         break;
3383                                 }
3384
3385                                 while (j < max_entries) {
3386                                         if (mtable[itable[j]] != i) {
3387                                                 j++;
3388                                                 continue;
3389                                         }
3390                                         vxge_hw_rts_rth_data0_data1_get(j,
3391                                                 &data0, &data1, 4, itable);
3392                                         j++;
3393                                         break;
3394                                 }
3395
3396                                 if (data0 != 0) {
3397                                         status = __vxge_hw_vpath_rts_table_set(
3398                                                         vpath_handles[i],
3399                                                         action, rts_table,
3400                                                         0, data0, data1);
3401
3402                                         if (status != VXGE_HW_OK)
3403                                                 goto exit;
3404                                 }
3405                         }
3406                 }
3407         }
3408 exit:
3409         return status;
3410 }
3411
3412 /**
3413  * vxge_hw_vpath_check_leak - Check for memory leak
3414  * @ringh: Handle to the ring object used for receive
3415  *
3416  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3417  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3418  * Returns: VXGE_HW_FAIL, if leak has occurred.
3419  *
3420  */
3421 enum vxge_hw_status
3422 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3423 {
3424         enum vxge_hw_status status = VXGE_HW_OK;
3425         u64 rxd_new_count, rxd_spat;
3426
3427         if (ring == NULL)
3428                 return status;
3429
3430         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3431         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3432         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3433
3434         if (rxd_new_count >= rxd_spat)
3435                 status = VXGE_HW_FAIL;
3436
3437         return status;
3438 }
3439
3440 /*
3441  * __vxge_hw_vpath_mgmt_read
3442  * This routine reads the vpath_mgmt registers
3443  */
3444 static enum vxge_hw_status
3445 __vxge_hw_vpath_mgmt_read(
3446         struct __vxge_hw_device *hldev,
3447         struct __vxge_hw_virtualpath *vpath)
3448 {
3449         u32 i, mtu = 0, max_pyld = 0;
3450         u64 val64;
3451         enum vxge_hw_status status = VXGE_HW_OK;
3452
3453         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3454
3455                 val64 = readq(&vpath->vpmgmt_reg->
3456                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3457                 max_pyld =
3458                         (u32)
3459                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3460                         (val64);
3461                 if (mtu < max_pyld)
3462                         mtu = max_pyld;
3463         }
3464
3465         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3466
3467         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3468
3469         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3470                 if (val64 & vxge_mBIT(i))
3471                         vpath->vsport_number = i;
3472         }
3473
3474         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3475
3476         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3477                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3478         else
3479                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3480
3481         return status;
3482 }
3483
3484 /*
3485  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3486  * This routine checks the vpath_rst_in_prog register to see if
3487  * adapter completed the reset process for the vpath
3488  */
3489 static enum vxge_hw_status
3490 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3491 {
3492         enum vxge_hw_status status;
3493
3494         status = __vxge_hw_device_register_poll(
3495                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3496                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3497                                 1 << (16 - vpath->vp_id)),
3498                         vpath->hldev->config.device_poll_millis);
3499
3500         return status;
3501 }
3502
3503 /*
3504  * __vxge_hw_vpath_reset
3505  * This routine resets the vpath on the device
3506  */
3507 static enum vxge_hw_status
3508 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3509 {
3510         u64 val64;
3511         enum vxge_hw_status status = VXGE_HW_OK;
3512
3513         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3514
3515         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3516                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3517
3518         return status;
3519 }
3520
3521 /*
3522  * __vxge_hw_vpath_sw_reset
3523  * This routine resets the vpath structures
3524  */
3525 static enum vxge_hw_status
3526 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3527 {
3528         enum vxge_hw_status status = VXGE_HW_OK;
3529         struct __vxge_hw_virtualpath *vpath;
3530
3531         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3532
3533         if (vpath->ringh) {
3534                 status = __vxge_hw_ring_reset(vpath->ringh);
3535                 if (status != VXGE_HW_OK)
3536                         goto exit;
3537         }
3538
3539         if (vpath->fifoh)
3540                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3541 exit:
3542         return status;
3543 }
3544
3545 /*
3546  * __vxge_hw_vpath_prc_configure
3547  * This routine configures the prc registers of virtual path using the config
3548  * passed
3549  */
3550 static void
3551 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3552 {
3553         u64 val64;
3554         struct __vxge_hw_virtualpath *vpath;
3555         struct vxge_hw_vp_config *vp_config;
3556         struct vxge_hw_vpath_reg __iomem *vp_reg;
3557
3558         vpath = &hldev->virtual_paths[vp_id];
3559         vp_reg = vpath->vp_reg;
3560         vp_config = vpath->vp_config;
3561
3562         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3563                 return;
3564
3565         val64 = readq(&vp_reg->prc_cfg1);
3566         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3567         writeq(val64, &vp_reg->prc_cfg1);
3568
3569         val64 = readq(&vpath->vp_reg->prc_cfg6);
3570         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3571         writeq(val64, &vpath->vp_reg->prc_cfg6);
3572
3573         val64 = readq(&vp_reg->prc_cfg7);
3574
3575         if (vpath->vp_config->ring.scatter_mode !=
3576                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3577
3578                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3579
3580                 switch (vpath->vp_config->ring.scatter_mode) {
3581                 case VXGE_HW_RING_SCATTER_MODE_A:
3582                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3583                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3584                         break;
3585                 case VXGE_HW_RING_SCATTER_MODE_B:
3586                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3587                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3588                         break;
3589                 case VXGE_HW_RING_SCATTER_MODE_C:
3590                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3591                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3592                         break;
3593                 }
3594         }
3595
3596         writeq(val64, &vp_reg->prc_cfg7);
3597
3598         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3599                                 __vxge_hw_ring_first_block_address_get(
3600                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3601
3602         val64 = readq(&vp_reg->prc_cfg4);
3603         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3604         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3605
3606         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3607                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3608
3609         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3610                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3611         else
3612                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3613
3614         writeq(val64, &vp_reg->prc_cfg4);
3615 }
3616
3617 /*
3618  * __vxge_hw_vpath_kdfc_configure
3619  * This routine configures the kdfc registers of virtual path using the
3620  * config passed
3621  */
3622 static enum vxge_hw_status
3623 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3624 {
3625         u64 val64;
3626         u64 vpath_stride;
3627         enum vxge_hw_status status = VXGE_HW_OK;
3628         struct __vxge_hw_virtualpath *vpath;
3629         struct vxge_hw_vpath_reg __iomem *vp_reg;
3630
3631         vpath = &hldev->virtual_paths[vp_id];
3632         vp_reg = vpath->vp_reg;
3633         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3634
3635         if (status != VXGE_HW_OK)
3636                 goto exit;
3637
3638         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3639
3640         vpath->max_kdfc_db =
3641                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3642                         val64+1)/2;
3643
3644         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3645
3646                 vpath->max_nofl_db = vpath->max_kdfc_db;
3647
3648                 if (vpath->max_nofl_db <
3649                         ((vpath->vp_config->fifo.memblock_size /
3650                         (vpath->vp_config->fifo.max_frags *
3651                         sizeof(struct vxge_hw_fifo_txd))) *
3652                         vpath->vp_config->fifo.fifo_blocks)) {
3653
3654                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3655                 }
3656                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3657                                 (vpath->max_nofl_db*2)-1);
3658         }
3659
3660         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3661
3662         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3663                 &vp_reg->kdfc_fifo_trpl_ctrl);
3664
3665         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3666
3667         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3668                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3669
3670         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3671                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3672 #ifndef __BIG_ENDIAN
3673                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3674 #endif
3675                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3676
3677         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3678         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3679         wmb();
3680         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3681
3682         vpath->nofl_db =
3683                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3684                 (hldev->kdfc + (vp_id *
3685                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3686                                         vpath_stride)));
3687 exit:
3688         return status;
3689 }
3690
3691 /*
3692  * __vxge_hw_vpath_mac_configure
3693  * This routine configures the mac of virtual path using the config passed
3694  */
3695 static enum vxge_hw_status
3696 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3697 {
3698         u64 val64;
3699         enum vxge_hw_status status = VXGE_HW_OK;
3700         struct __vxge_hw_virtualpath *vpath;
3701         struct vxge_hw_vp_config *vp_config;
3702         struct vxge_hw_vpath_reg __iomem *vp_reg;
3703
3704         vpath = &hldev->virtual_paths[vp_id];
3705         vp_reg = vpath->vp_reg;
3706         vp_config = vpath->vp_config;
3707
3708         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3709                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3710
3711         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3712
3713                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3714
3715                 if (vp_config->rpa_strip_vlan_tag !=
3716                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3717                         if (vp_config->rpa_strip_vlan_tag)
3718                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3719                         else
3720                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3721                 }
3722
3723                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3724                 val64 = readq(&vp_reg->rxmac_vcfg0);
3725
3726                 if (vp_config->mtu !=
3727                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3728                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3729                         if ((vp_config->mtu  +
3730                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3731                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3732                                         vp_config->mtu  +
3733                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3734                         else
3735                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3736                                         vpath->max_mtu);
3737                 }
3738
3739                 writeq(val64, &vp_reg->rxmac_vcfg0);
3740
3741                 val64 = readq(&vp_reg->rxmac_vcfg1);
3742
3743                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3744                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3745
3746                 if (hldev->config.rth_it_type ==
3747                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3748                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3749                                 0x2) |
3750                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3751                 }
3752
3753                 writeq(val64, &vp_reg->rxmac_vcfg1);
3754         }
3755         return status;
3756 }
3757
3758 /*
3759  * __vxge_hw_vpath_tim_configure
3760  * This routine configures the tim registers of virtual path using the config
3761  * passed
3762  */
3763 static enum vxge_hw_status
3764 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3765 {
3766         u64 val64;
3767         enum vxge_hw_status status = VXGE_HW_OK;
3768         struct __vxge_hw_virtualpath *vpath;
3769         struct vxge_hw_vpath_reg __iomem *vp_reg;
3770         struct vxge_hw_vp_config *config;
3771
3772         vpath = &hldev->virtual_paths[vp_id];
3773         vp_reg = vpath->vp_reg;
3774         config = vpath->vp_config;
3775
3776         writeq((u64)0, &vp_reg->tim_dest_addr);
3777         writeq((u64)0, &vp_reg->tim_vpath_map);
3778         writeq((u64)0, &vp_reg->tim_bitmap);
3779         writeq((u64)0, &vp_reg->tim_remap);
3780
3781         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3782                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3783                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3784                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3785
3786         val64 = readq(&vp_reg->tim_pci_cfg);
3787         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3788         writeq(val64, &vp_reg->tim_pci_cfg);
3789
3790         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3791
3792                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3793
3794                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3795                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3796                                 0x3ffffff);
3797                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3798                                         config->tti.btimer_val);
3799                 }
3800
3801                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3802
3803                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3804                         if (config->tti.timer_ac_en)
3805                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3806                         else
3807                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3808                 }
3809
3810                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3811                         if (config->tti.timer_ci_en)
3812                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3813                         else
3814                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3815                 }
3816
3817                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3818                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3819                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3820                                         config->tti.urange_a);
3821                 }
3822
3823                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3824                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3825                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3826                                         config->tti.urange_b);
3827                 }
3828
3829                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3830                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3831                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3832                                         config->tti.urange_c);
3833                 }
3834
3835                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3836                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3837
3838                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3839                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3840                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3841                                                 config->tti.uec_a);
3842                 }
3843
3844                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3845                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3846                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3847                                                 config->tti.uec_b);
3848                 }
3849
3850                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3851                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3852                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3853                                                 config->tti.uec_c);
3854                 }
3855
3856                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3857                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3858                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3859                                                 config->tti.uec_d);
3860                 }
3861
3862                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3863                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3864
3865                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3866                         if (config->tti.timer_ri_en)
3867                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3868                         else
3869                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3870                 }
3871
3872                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3873                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3874                                         0x3ffffff);
3875                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3876                                         config->tti.rtimer_val);
3877                 }
3878
3879                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3880                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3881                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3882                                         config->tti.util_sel);
3883                 }
3884
3885                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3886                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3887                                         0x3ffffff);
3888                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3889                                         config->tti.ltimer_val);
3890                 }
3891
3892                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3893         }
3894
3895         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3896
3897                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3898
3899                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3900                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3901                                         0x3ffffff);
3902                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3903                                         config->rti.btimer_val);
3904                 }
3905
3906                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3907
3908                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3909                         if (config->rti.timer_ac_en)
3910                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3911                         else
3912                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3913                 }
3914
3915                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3916                         if (config->rti.timer_ci_en)
3917                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3918                         else
3919                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3920                 }
3921
3922                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3923                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3924                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3925                                         config->rti.urange_a);
3926                 }
3927
3928                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3929                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3930                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3931                                         config->rti.urange_b);
3932                 }
3933
3934                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3935                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3936                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3937                                         config->rti.urange_c);
3938                 }
3939
3940                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3941                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3942
3943                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3944                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3945                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3946                                                 config->rti.uec_a);
3947                 }
3948
3949                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3950                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3951                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3952                                                 config->rti.uec_b);
3953                 }
3954
3955                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3956                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3957                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3958                                                 config->rti.uec_c);
3959                 }
3960
3961                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3962                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3963                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3964                                                 config->rti.uec_d);
3965                 }
3966
3967                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3968                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3969
3970                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3971                         if (config->rti.timer_ri_en)
3972                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3973                         else
3974                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3975                 }
3976
3977                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3978                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3979                                         0x3ffffff);
3980                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3981                                         config->rti.rtimer_val);
3982                 }
3983
3984                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3985                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3986                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3987                                         config->rti.util_sel);
3988                 }
3989
3990                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3991                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3992                                         0x3ffffff);
3993                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3994                                         config->rti.ltimer_val);
3995                 }
3996
3997                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3998         }
3999
4000         val64 = 0;
4001         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4002         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4003         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4004         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4005         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4006         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4007
4008         return status;
4009 }
4010
4011 void
4012 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4013 {
4014         struct __vxge_hw_virtualpath *vpath;
4015         struct vxge_hw_vpath_reg __iomem *vp_reg;
4016         struct vxge_hw_vp_config *config;
4017         u64 val64;
4018
4019         vpath = &hldev->virtual_paths[vp_id];
4020         vp_reg = vpath->vp_reg;
4021         config = vpath->vp_config;
4022
4023         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4024                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4025
4026                 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4027                         config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4028                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4029                         writeq(val64,
4030                         &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4031                 }
4032         }
4033 }
4034 /*
4035  * __vxge_hw_vpath_initialize
4036  * This routine is the final phase of init which initializes the
4037  * registers of the vpath using the configuration passed.
4038  */
4039 static enum vxge_hw_status
4040 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4041 {
4042         u64 val64;
4043         u32 val32;
4044         enum vxge_hw_status status = VXGE_HW_OK;
4045         struct __vxge_hw_virtualpath *vpath;
4046         struct vxge_hw_vpath_reg __iomem *vp_reg;
4047
4048         vpath = &hldev->virtual_paths[vp_id];
4049
4050         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4051                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4052                 goto exit;
4053         }
4054         vp_reg = vpath->vp_reg;
4055
4056         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4057
4058         if (status != VXGE_HW_OK)
4059                 goto exit;
4060
4061         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4062
4063         if (status != VXGE_HW_OK)
4064                 goto exit;
4065
4066         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4067
4068         if (status != VXGE_HW_OK)
4069                 goto exit;
4070
4071         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4072
4073         if (status != VXGE_HW_OK)
4074                 goto exit;
4075
4076         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4077
4078         /* Get MRRS value from device control */
4079         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4080
4081         if (status == VXGE_HW_OK) {
4082                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4083                 val64 &=
4084                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4085                 val64 |=
4086                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4087
4088                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4089         }
4090
4091         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4092         val64 |=
4093             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4094                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4095
4096         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4097         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4098
4099 exit:
4100         return status;
4101 }
4102
4103 /*
4104  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4105  * This routine is the initial phase of init which resets the vpath and
4106  * initializes the software support structures.
4107  */
4108 static enum vxge_hw_status
4109 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4110                         struct vxge_hw_vp_config *config)
4111 {
4112         struct __vxge_hw_virtualpath *vpath;
4113         enum vxge_hw_status status = VXGE_HW_OK;
4114
4115         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4116                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4117                 goto exit;
4118         }
4119
4120         vpath = &hldev->virtual_paths[vp_id];
4121
4122         vpath->vp_id = vp_id;
4123         vpath->vp_open = VXGE_HW_VP_OPEN;
4124         vpath->hldev = hldev;
4125         vpath->vp_config = config;
4126         vpath->vp_reg = hldev->vpath_reg[vp_id];
4127         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4128
4129         __vxge_hw_vpath_reset(hldev, vp_id);
4130
4131         status = __vxge_hw_vpath_reset_check(vpath);
4132
4133         if (status != VXGE_HW_OK) {
4134                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4135                 goto exit;
4136         }
4137
4138         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4139
4140         if (status != VXGE_HW_OK) {
4141                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4142                 goto exit;
4143         }
4144
4145         INIT_LIST_HEAD(&vpath->vpath_handles);
4146
4147         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4148
4149         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4150                 hldev->tim_int_mask1, vp_id);
4151
4152         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4153
4154         if (status != VXGE_HW_OK)
4155                 __vxge_hw_vp_terminate(hldev, vp_id);
4156 exit:
4157         return status;
4158 }
4159
4160 /*
4161  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4162  * This routine closes all channels it opened and freeup memory
4163  */
4164 static void
4165 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4166 {
4167         struct __vxge_hw_virtualpath *vpath;
4168
4169         vpath = &hldev->virtual_paths[vp_id];
4170
4171         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4172                 goto exit;
4173
4174         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4175                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4176         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4177
4178         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4179 exit:
4180         return;
4181 }
4182
4183 /*
4184  * vxge_hw_vpath_mtu_set - Set MTU.
4185  * Set new MTU value. Example, to use jumbo frames:
4186  * vxge_hw_vpath_mtu_set(my_device, 9600);
4187  */
4188 enum vxge_hw_status
4189 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4190 {
4191         u64 val64;
4192         enum vxge_hw_status status = VXGE_HW_OK;
4193         struct __vxge_hw_virtualpath *vpath;
4194
4195         if (vp == NULL) {
4196                 status = VXGE_HW_ERR_INVALID_HANDLE;
4197                 goto exit;
4198         }
4199         vpath = vp->vpath;
4200
4201         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4202
4203         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4204                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4205
4206         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4207
4208         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4209         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4210
4211         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4212
4213         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4214
4215 exit:
4216         return status;
4217 }
4218
4219 /*
4220  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4221  * This function is used to open access to virtual path of an
4222  * adapter for offload, GRO operations. This function returns
4223  * synchronously.
4224  */
4225 enum vxge_hw_status
4226 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4227                    struct vxge_hw_vpath_attr *attr,
4228                    struct __vxge_hw_vpath_handle **vpath_handle)
4229 {
4230         struct __vxge_hw_virtualpath *vpath;
4231         struct __vxge_hw_vpath_handle *vp;
4232         enum vxge_hw_status status;
4233
4234         vpath = &hldev->virtual_paths[attr->vp_id];
4235
4236         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4237                 status = VXGE_HW_ERR_INVALID_STATE;
4238                 goto vpath_open_exit1;
4239         }
4240
4241         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4242                         &hldev->config.vp_config[attr->vp_id]);
4243
4244         if (status != VXGE_HW_OK)
4245                 goto vpath_open_exit1;
4246
4247         vp = (struct __vxge_hw_vpath_handle *)
4248                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4249         if (vp == NULL) {
4250                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4251                 goto vpath_open_exit2;
4252         }
4253
4254         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4255
4256         vp->vpath = vpath;
4257
4258         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4259                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4260                 if (status != VXGE_HW_OK)
4261                         goto vpath_open_exit6;
4262         }
4263
4264         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4265                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4266                 if (status != VXGE_HW_OK)
4267                         goto vpath_open_exit7;
4268
4269                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4270         }
4271
4272         vpath->fifoh->tx_intr_num =
4273                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4274                         VXGE_HW_VPATH_INTR_TX;
4275
4276         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4277                                 VXGE_HW_BLOCK_SIZE);
4278
4279         if (vpath->stats_block == NULL) {
4280                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4281                 goto vpath_open_exit8;
4282         }
4283
4284         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4285                         stats_block->memblock;
4286         memset(vpath->hw_stats, 0,
4287                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4288
4289         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4290                                                 vpath->hw_stats;
4291
4292         vpath->hw_stats_sav =
4293                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4294         memset(vpath->hw_stats_sav, 0,
4295                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4296
4297         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4298
4299         status = vxge_hw_vpath_stats_enable(vp);
4300         if (status != VXGE_HW_OK)
4301                 goto vpath_open_exit8;
4302
4303         list_add(&vp->item, &vpath->vpath_handles);
4304
4305         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4306
4307         *vpath_handle = vp;
4308
4309         attr->fifo_attr.userdata = vpath->fifoh;
4310         attr->ring_attr.userdata = vpath->ringh;
4311
4312         return VXGE_HW_OK;
4313
4314 vpath_open_exit8:
4315         if (vpath->ringh != NULL)
4316                 __vxge_hw_ring_delete(vp);
4317 vpath_open_exit7:
4318         if (vpath->fifoh != NULL)
4319                 __vxge_hw_fifo_delete(vp);
4320 vpath_open_exit6:
4321         vfree(vp);
4322 vpath_open_exit2:
4323         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4324 vpath_open_exit1:
4325
4326         return status;
4327 }
4328
4329 /**
4330  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4331  * (vpath) open
4332  * @vp: Handle got from previous vpath open
4333  *
4334  * This function is used to close access to virtual path opened
4335  * earlier.
4336  */
4337 void
4338 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4339 {
4340         struct __vxge_hw_virtualpath *vpath = NULL;
4341         u64 new_count, val64, val164;
4342         struct __vxge_hw_ring *ring;
4343
4344         vpath = vp->vpath;
4345         ring = vpath->ringh;
4346
4347         new_count = readq(&vpath->vp_reg->rxdmem_size);
4348         new_count &= 0x1fff;
4349         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4350
4351         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4352                 &vpath->vp_reg->prc_rxd_doorbell);
4353         readl(&vpath->vp_reg->prc_rxd_doorbell);
4354
4355         val164 /= 2;
4356         val64 = readq(&vpath->vp_reg->prc_cfg6);
4357         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4358         val64 &= 0x1ff;
4359
4360         /*
4361          * Each RxD is of 4 qwords
4362          */
4363         new_count -= (val64 + 1);
4364         val64 = min(val164, new_count) / 4;
4365
4366         ring->rxds_limit = min(ring->rxds_limit, val64);
4367         if (ring->rxds_limit < 4)
4368                 ring->rxds_limit = 4;
4369 }
4370
4371 /*
4372  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4373  * This function is used to close access to virtual path opened
4374  * earlier.
4375  */
4376 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4377 {
4378         struct __vxge_hw_virtualpath *vpath = NULL;
4379         struct __vxge_hw_device *devh = NULL;
4380         u32 vp_id = vp->vpath->vp_id;
4381         u32 is_empty = TRUE;
4382         enum vxge_hw_status status = VXGE_HW_OK;
4383
4384         vpath = vp->vpath;
4385         devh = vpath->hldev;
4386
4387         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4388                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4389                 goto vpath_close_exit;
4390         }
4391
4392         list_del(&vp->item);
4393
4394         if (!list_empty(&vpath->vpath_handles)) {
4395                 list_add(&vp->item, &vpath->vpath_handles);
4396                 is_empty = FALSE;
4397         }
4398
4399         if (!is_empty) {
4400                 status = VXGE_HW_FAIL;
4401                 goto vpath_close_exit;
4402         }
4403
4404         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4405
4406         if (vpath->ringh != NULL)
4407                 __vxge_hw_ring_delete(vp);
4408
4409         if (vpath->fifoh != NULL)
4410                 __vxge_hw_fifo_delete(vp);
4411
4412         if (vpath->stats_block != NULL)
4413                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4414
4415         vfree(vp);
4416
4417         __vxge_hw_vp_terminate(devh, vp_id);
4418
4419         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4420
4421 vpath_close_exit:
4422         return status;
4423 }
4424
4425 /*
4426  * vxge_hw_vpath_reset - Resets vpath
4427  * This function is used to request a reset of vpath
4428  */
4429 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4430 {
4431         enum vxge_hw_status status;
4432         u32 vp_id;
4433         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4434
4435         vp_id = vpath->vp_id;
4436
4437         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4438                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4439                 goto exit;
4440         }
4441
4442         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4443         if (status == VXGE_HW_OK)
4444                 vpath->sw_stats->soft_reset_cnt++;
4445 exit:
4446         return status;
4447 }
4448
4449 /*
4450  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4451  * This function poll's for the vpath reset completion and re initializes
4452  * the vpath.
4453  */
4454 enum vxge_hw_status
4455 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4456 {
4457         struct __vxge_hw_virtualpath *vpath = NULL;
4458         enum vxge_hw_status status;
4459         struct __vxge_hw_device *hldev;
4460         u32 vp_id;
4461
4462         vp_id = vp->vpath->vp_id;
4463         vpath = vp->vpath;
4464         hldev = vpath->hldev;
4465
4466         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4467                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4468                 goto exit;
4469         }
4470
4471         status = __vxge_hw_vpath_reset_check(vpath);
4472         if (status != VXGE_HW_OK)
4473                 goto exit;
4474
4475         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4476         if (status != VXGE_HW_OK)
4477                 goto exit;
4478
4479         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4480         if (status != VXGE_HW_OK)
4481                 goto exit;
4482
4483         if (vpath->ringh != NULL)
4484                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4485
4486         memset(vpath->hw_stats, 0,
4487                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4488
4489         memset(vpath->hw_stats_sav, 0,
4490                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4491
4492         writeq(vpath->stats_block->dma_addr,
4493                 &vpath->vp_reg->stats_cfg);
4494
4495         status = vxge_hw_vpath_stats_enable(vp);
4496
4497 exit:
4498         return status;
4499 }
4500
4501 /*
4502  * vxge_hw_vpath_enable - Enable vpath.
4503  * This routine clears the vpath reset thereby enabling a vpath
4504  * to start forwarding frames and generating interrupts.
4505  */
4506 void
4507 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4508 {
4509         struct __vxge_hw_device *hldev;
4510         u64 val64;
4511
4512         hldev = vp->vpath->hldev;
4513
4514         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4515                 1 << (16 - vp->vpath->vp_id));
4516
4517         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4518                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4519 }
4520
4521 /*
4522  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4523  * Enable the DMA vpath statistics. The function is to be called to re-enable
4524  * the adapter to update stats into the host memory
4525  */
4526 static enum vxge_hw_status
4527 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4528 {
4529         enum vxge_hw_status status = VXGE_HW_OK;
4530         struct __vxge_hw_virtualpath *vpath;
4531
4532         vpath = vp->vpath;
4533
4534         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4535                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4536                 goto exit;
4537         }
4538
4539         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4540                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4541
4542         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4543 exit:
4544         return status;
4545 }
4546
4547 /*
4548  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4549  *                           and offset and perform an operation
4550  */
4551 static enum vxge_hw_status
4552 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4553                              u32 operation, u32 offset, u64 *stat)
4554 {
4555         u64 val64;
4556         enum vxge_hw_status status = VXGE_HW_OK;
4557         struct vxge_hw_vpath_reg __iomem *vp_reg;
4558
4559         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4560                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4561                 goto vpath_stats_access_exit;
4562         }
4563
4564         vp_reg = vpath->vp_reg;
4565
4566         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4567                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4568                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4569
4570         status = __vxge_hw_pio_mem_write64(val64,
4571                                 &vp_reg->xmac_stats_access_cmd,
4572                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4573                                 vpath->hldev->config.device_poll_millis);
4574
4575         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4576                 *stat = readq(&vp_reg->xmac_stats_access_data);
4577         else
4578                 *stat = 0;
4579
4580 vpath_stats_access_exit:
4581         return status;
4582 }
4583
4584 /*
4585  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4586  */
4587 static enum vxge_hw_status
4588 __vxge_hw_vpath_xmac_tx_stats_get(
4589         struct __vxge_hw_virtualpath *vpath,
4590         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4591 {
4592         u64 *val64;
4593         int i;
4594         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4595         enum vxge_hw_status status = VXGE_HW_OK;
4596
4597         val64 = (u64 *) vpath_tx_stats;
4598
4599         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4600                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4601                 goto exit;
4602         }
4603
4604         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4605                 status = __vxge_hw_vpath_stats_access(vpath,
4606                                         VXGE_HW_STATS_OP_READ,
4607                                         offset, val64);
4608                 if (status != VXGE_HW_OK)
4609                         goto exit;
4610                 offset++;
4611                 val64++;
4612         }
4613 exit:
4614         return status;
4615 }
4616
4617 /*
4618  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4619  */
4620 static enum vxge_hw_status
4621 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4622                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4623 {
4624         u64 *val64;
4625         enum vxge_hw_status status = VXGE_HW_OK;
4626         int i;
4627         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4628         val64 = (u64 *) vpath_rx_stats;
4629
4630         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4631                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4632                 goto exit;
4633         }
4634         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4635                 status = __vxge_hw_vpath_stats_access(vpath,
4636                                         VXGE_HW_STATS_OP_READ,
4637                                         offset >> 3, val64);
4638                 if (status != VXGE_HW_OK)
4639                         goto exit;
4640
4641                 offset += 8;
4642                 val64++;
4643         }
4644 exit:
4645         return status;
4646 }
4647
4648 /*
4649  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4650  */
4651 static enum vxge_hw_status
4652 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4653                           struct vxge_hw_vpath_stats_hw_info *hw_stats)
4654 {
4655         u64 val64;
4656         enum vxge_hw_status status = VXGE_HW_OK;
4657         struct vxge_hw_vpath_reg __iomem *vp_reg;
4658
4659         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4660                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4661                 goto exit;
4662         }
4663         vp_reg = vpath->vp_reg;
4664
4665         val64 = readq(&vp_reg->vpath_debug_stats0);
4666         hw_stats->ini_num_mwr_sent =
4667                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4668
4669         val64 = readq(&vp_reg->vpath_debug_stats1);
4670         hw_stats->ini_num_mrd_sent =
4671                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4672
4673         val64 = readq(&vp_reg->vpath_debug_stats2);
4674         hw_stats->ini_num_cpl_rcvd =
4675                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4676
4677         val64 = readq(&vp_reg->vpath_debug_stats3);
4678         hw_stats->ini_num_mwr_byte_sent =
4679                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4680
4681         val64 = readq(&vp_reg->vpath_debug_stats4);
4682         hw_stats->ini_num_cpl_byte_rcvd =
4683                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4684
4685         val64 = readq(&vp_reg->vpath_debug_stats5);
4686         hw_stats->wrcrdtarb_xoff =
4687                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4688
4689         val64 = readq(&vp_reg->vpath_debug_stats6);
4690         hw_stats->rdcrdtarb_xoff =
4691                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4692
4693         val64 = readq(&vp_reg->vpath_genstats_count01);
4694         hw_stats->vpath_genstats_count0 =
4695         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4696                 val64);
4697
4698         val64 = readq(&vp_reg->vpath_genstats_count01);
4699         hw_stats->vpath_genstats_count1 =
4700         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4701                 val64);
4702
4703         val64 = readq(&vp_reg->vpath_genstats_count23);
4704         hw_stats->vpath_genstats_count2 =
4705         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4706                 val64);
4707
4708         val64 = readq(&vp_reg->vpath_genstats_count01);
4709         hw_stats->vpath_genstats_count3 =
4710         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4711                 val64);
4712
4713         val64 = readq(&vp_reg->vpath_genstats_count4);
4714         hw_stats->vpath_genstats_count4 =
4715         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4716                 val64);
4717
4718         val64 = readq(&vp_reg->vpath_genstats_count5);
4719         hw_stats->vpath_genstats_count5 =
4720         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4721                 val64);
4722
4723         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4724         if (status != VXGE_HW_OK)
4725                 goto exit;
4726
4727         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4728         if (status != VXGE_HW_OK)
4729                 goto exit;
4730
4731         VXGE_HW_VPATH_STATS_PIO_READ(
4732                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4733
4734         hw_stats->prog_event_vnum0 =
4735                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4736
4737         hw_stats->prog_event_vnum1 =
4738                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4739
4740         VXGE_HW_VPATH_STATS_PIO_READ(
4741                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4742
4743         hw_stats->prog_event_vnum2 =
4744                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4745
4746         hw_stats->prog_event_vnum3 =
4747                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4748
4749         val64 = readq(&vp_reg->rx_multi_cast_stats);
4750         hw_stats->rx_multi_cast_frame_discard =
4751                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4752
4753         val64 = readq(&vp_reg->rx_frm_transferred);
4754         hw_stats->rx_frm_transferred =
4755                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4756
4757         val64 = readq(&vp_reg->rxd_returned);
4758         hw_stats->rxd_returned =
4759                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4760
4761         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4762         hw_stats->rx_mpa_len_fail_frms =
4763                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4764         hw_stats->rx_mpa_mrk_fail_frms =
4765                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4766         hw_stats->rx_mpa_crc_fail_frms =
4767                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4768
4769         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4770         hw_stats->rx_permitted_frms =
4771                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4772         hw_stats->rx_vp_reset_discarded_frms =
4773         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4774         hw_stats->rx_wol_frms =
4775                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4776
4777         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4778         hw_stats->tx_vp_reset_discarded_frms =
4779         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4780                 val64);
4781 exit:
4782         return status;
4783 }
4784
4785
4786 static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4787                                         unsigned long size)
4788 {
4789         gfp_t flags;
4790         void *vaddr;
4791
4792         if (in_interrupt())
4793                 flags = GFP_ATOMIC | GFP_DMA;
4794         else
4795                 flags = GFP_KERNEL | GFP_DMA;
4796
4797         vaddr = kmalloc((size), flags);
4798
4799         vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4800 }
4801
4802 static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4803                              struct pci_dev **p_dma_acch)
4804 {
4805         unsigned long misaligned = *(unsigned long *)p_dma_acch;
4806         u8 *tmp = (u8 *)vaddr;
4807         tmp -= misaligned;
4808         kfree((void *)tmp);
4809 }
4810
4811 /*
4812  * __vxge_hw_blockpool_create - Create block pool
4813  */
4814
4815 enum vxge_hw_status
4816 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4817                            struct __vxge_hw_blockpool *blockpool,
4818                            u32 pool_size,
4819                            u32 pool_max)
4820 {
4821         u32 i;
4822         struct __vxge_hw_blockpool_entry *entry = NULL;
4823         void *memblock;
4824         dma_addr_t dma_addr;
4825         struct pci_dev *dma_handle;
4826         struct pci_dev *acc_handle;
4827         enum vxge_hw_status status = VXGE_HW_OK;
4828
4829         if (blockpool == NULL) {
4830                 status = VXGE_HW_FAIL;
4831                 goto blockpool_create_exit;
4832         }
4833
4834         blockpool->hldev = hldev;
4835         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4836         blockpool->pool_size = 0;
4837         blockpool->pool_max = pool_max;
4838         blockpool->req_out = 0;
4839
4840         INIT_LIST_HEAD(&blockpool->free_block_list);
4841         INIT_LIST_HEAD(&blockpool->free_entry_list);
4842
4843         for (i = 0; i < pool_size + pool_max; i++) {
4844                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4845                                 GFP_KERNEL);
4846                 if (entry == NULL) {
4847                         __vxge_hw_blockpool_destroy(blockpool);
4848                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4849                         goto blockpool_create_exit;
4850                 }
4851                 list_add(&entry->item, &blockpool->free_entry_list);
4852         }
4853
4854         for (i = 0; i < pool_size; i++) {
4855
4856                 memblock = vxge_os_dma_malloc(
4857                                 hldev->pdev,
4858                                 VXGE_HW_BLOCK_SIZE,
4859                                 &dma_handle,
4860                                 &acc_handle);
4861
4862                 if (memblock == NULL) {
4863                         __vxge_hw_blockpool_destroy(blockpool);
4864                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4865                         goto blockpool_create_exit;
4866                 }
4867
4868                 dma_addr = pci_map_single(hldev->pdev, memblock,
4869                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4870
4871                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4872                                 dma_addr))) {
4873
4874                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4875                         __vxge_hw_blockpool_destroy(blockpool);
4876                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4877                         goto blockpool_create_exit;
4878                 }
4879
4880                 if (!list_empty(&blockpool->free_entry_list))
4881                         entry = (struct __vxge_hw_blockpool_entry *)
4882                                 list_first_entry(&blockpool->free_entry_list,
4883                                         struct __vxge_hw_blockpool_entry,
4884                                         item);
4885
4886                 if (entry == NULL)
4887                         entry =
4888                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4889                                         GFP_KERNEL);
4890                 if (entry != NULL) {
4891                         list_del(&entry->item);
4892                         entry->length = VXGE_HW_BLOCK_SIZE;
4893                         entry->memblock = memblock;
4894                         entry->dma_addr = dma_addr;
4895                         entry->acc_handle = acc_handle;
4896                         entry->dma_handle = dma_handle;
4897                         list_add(&entry->item,
4898                                           &blockpool->free_block_list);
4899                         blockpool->pool_size++;
4900                 } else {
4901                         __vxge_hw_blockpool_destroy(blockpool);
4902                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4903                         goto blockpool_create_exit;
4904                 }
4905         }
4906
4907 blockpool_create_exit:
4908         return status;
4909 }
4910
4911 /*
4912  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4913  */
4914
4915 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4916 {
4917
4918         struct __vxge_hw_device *hldev;
4919         struct list_head *p, *n;
4920         u16 ret;
4921
4922         if (blockpool == NULL) {
4923                 ret = 1;
4924                 goto exit;
4925         }
4926
4927         hldev = blockpool->hldev;
4928
4929         list_for_each_safe(p, n, &blockpool->free_block_list) {
4930
4931                 pci_unmap_single(hldev->pdev,
4932                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4933                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4934                         PCI_DMA_BIDIRECTIONAL);
4935
4936                 vxge_os_dma_free(hldev->pdev,
4937                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4938                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4939
4940                 list_del(
4941                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4942                 kfree(p);
4943                 blockpool->pool_size--;
4944         }
4945
4946         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4947                 list_del(
4948                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4949                 kfree((void *)p);
4950         }
4951         ret = 0;
4952 exit:
4953         return;
4954 }
4955
4956 /*
4957  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4958  */
4959 static
4960 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4961 {
4962         u32 nreq = 0, i;
4963
4964         if ((blockpool->pool_size  +  blockpool->req_out) <
4965                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4966                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4967                 blockpool->req_out += nreq;
4968         }
4969
4970         for (i = 0; i < nreq; i++)
4971                 vxge_os_dma_malloc_async(
4972                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4973                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4974 }
4975
4976 /*
4977  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4978  */
4979 static
4980 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4981 {
4982         struct list_head *p, *n;
4983
4984         list_for_each_safe(p, n, &blockpool->free_block_list) {
4985
4986                 if (blockpool->pool_size < blockpool->pool_max)
4987                         break;
4988
4989                 pci_unmap_single(
4990                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4991                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4992                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4993                         PCI_DMA_BIDIRECTIONAL);
4994
4995                 vxge_os_dma_free(
4996                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4997                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4998                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4999
5000                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5001
5002                 list_add(p, &blockpool->free_entry_list);
5003
5004                 blockpool->pool_size--;
5005
5006         }
5007 }
5008
5009 /*
5010  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5011  * Adds a block to block pool
5012  */
5013 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5014                                         void *block_addr,
5015                                         u32 length,
5016                                         struct pci_dev *dma_h,
5017                                         struct pci_dev *acc_handle)
5018 {
5019         struct __vxge_hw_blockpool  *blockpool;
5020         struct __vxge_hw_blockpool_entry  *entry = NULL;
5021         dma_addr_t dma_addr;
5022         enum vxge_hw_status status = VXGE_HW_OK;
5023         u32 req_out;
5024
5025         blockpool = &devh->block_pool;
5026
5027         if (block_addr == NULL) {
5028                 blockpool->req_out--;
5029                 status = VXGE_HW_FAIL;
5030                 goto exit;
5031         }
5032
5033         dma_addr = pci_map_single(devh->pdev, block_addr, length,
5034                                 PCI_DMA_BIDIRECTIONAL);
5035
5036         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5037
5038                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5039                 blockpool->req_out--;
5040                 status = VXGE_HW_FAIL;
5041                 goto exit;
5042         }
5043
5044
5045         if (!list_empty(&blockpool->free_entry_list))
5046                 entry = (struct __vxge_hw_blockpool_entry *)
5047                         list_first_entry(&blockpool->free_entry_list,
5048                                 struct __vxge_hw_blockpool_entry,
5049                                 item);
5050
5051         if (entry == NULL)
5052                 entry = (struct __vxge_hw_blockpool_entry *)
5053                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5054         else
5055                 list_del(&entry->item);
5056
5057         if (entry != NULL) {
5058                 entry->length = length;
5059                 entry->memblock = block_addr;
5060                 entry->dma_addr = dma_addr;
5061                 entry->acc_handle = acc_handle;
5062                 entry->dma_handle = dma_h;
5063                 list_add(&entry->item, &blockpool->free_block_list);
5064                 blockpool->pool_size++;
5065                 status = VXGE_HW_OK;
5066         } else
5067                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5068
5069         blockpool->req_out--;
5070
5071         req_out = blockpool->req_out;
5072 exit:
5073         return;
5074 }
5075
5076 /*
5077  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5078  * Allocates a block of memory of given size, either from block pool
5079  * or by calling vxge_os_dma_malloc()
5080  */
5081 void *
5082 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5083                                 struct vxge_hw_mempool_dma *dma_object)
5084 {
5085         struct __vxge_hw_blockpool_entry *entry = NULL;
5086         struct __vxge_hw_blockpool  *blockpool;
5087         void *memblock = NULL;
5088         enum vxge_hw_status status = VXGE_HW_OK;
5089
5090         blockpool = &devh->block_pool;
5091
5092         if (size != blockpool->block_size) {
5093
5094                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5095                                                 &dma_object->handle,
5096                                                 &dma_object->acc_handle);
5097
5098                 if (memblock == NULL) {
5099                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5100                         goto exit;
5101                 }
5102
5103                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5104                                         PCI_DMA_BIDIRECTIONAL);
5105
5106                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5107                                 dma_object->addr))) {
5108                         vxge_os_dma_free(devh->pdev, memblock,
5109                                 &dma_object->acc_handle);
5110                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5111                         goto exit;
5112                 }
5113
5114         } else {
5115
5116                 if (!list_empty(&blockpool->free_block_list))
5117                         entry = (struct __vxge_hw_blockpool_entry *)
5118                                 list_first_entry(&blockpool->free_block_list,
5119                                         struct __vxge_hw_blockpool_entry,
5120                                         item);
5121
5122                 if (entry != NULL) {
5123                         list_del(&entry->item);
5124                         dma_object->addr = entry->dma_addr;
5125                         dma_object->handle = entry->dma_handle;
5126                         dma_object->acc_handle = entry->acc_handle;
5127                         memblock = entry->memblock;
5128
5129                         list_add(&entry->item,
5130                                 &blockpool->free_entry_list);
5131                         blockpool->pool_size--;
5132                 }
5133
5134                 if (memblock != NULL)
5135                         __vxge_hw_blockpool_blocks_add(blockpool);
5136         }
5137 exit:
5138         return memblock;
5139 }
5140
5141 /*
5142  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5143                                 __vxge_hw_blockpool_malloc
5144  */
5145 void
5146 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5147                         void *memblock, u32 size,
5148                         struct vxge_hw_mempool_dma *dma_object)
5149 {
5150         struct __vxge_hw_blockpool_entry *entry = NULL;
5151         struct __vxge_hw_blockpool  *blockpool;
5152         enum vxge_hw_status status = VXGE_HW_OK;
5153
5154         blockpool = &devh->block_pool;
5155
5156         if (size != blockpool->block_size) {
5157                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5158                         PCI_DMA_BIDIRECTIONAL);
5159                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5160         } else {
5161
5162                 if (!list_empty(&blockpool->free_entry_list))
5163                         entry = (struct __vxge_hw_blockpool_entry *)
5164                                 list_first_entry(&blockpool->free_entry_list,
5165                                         struct __vxge_hw_blockpool_entry,
5166                                         item);
5167
5168                 if (entry == NULL)
5169                         entry = (struct __vxge_hw_blockpool_entry *)
5170                                 vmalloc(sizeof(
5171                                         struct __vxge_hw_blockpool_entry));
5172                 else
5173                         list_del(&entry->item);
5174
5175                 if (entry != NULL) {
5176                         entry->length = size;
5177                         entry->memblock = memblock;
5178                         entry->dma_addr = dma_object->addr;
5179                         entry->acc_handle = dma_object->acc_handle;
5180                         entry->dma_handle = dma_object->handle;
5181                         list_add(&entry->item,
5182                                         &blockpool->free_block_list);
5183                         blockpool->pool_size++;
5184                         status = VXGE_HW_OK;
5185                 } else
5186                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5187
5188                 if (status == VXGE_HW_OK)
5189                         __vxge_hw_blockpool_blocks_remove(blockpool);
5190         }
5191 }
5192
5193 /*
5194  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5195  * This function allocates a block from block pool or from the system
5196  */
5197 struct __vxge_hw_blockpool_entry *
5198 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5199 {
5200         struct __vxge_hw_blockpool_entry *entry = NULL;
5201         struct __vxge_hw_blockpool  *blockpool;
5202
5203         blockpool = &devh->block_pool;
5204
5205         if (size == blockpool->block_size) {
5206
5207                 if (!list_empty(&blockpool->free_block_list))
5208                         entry = (struct __vxge_hw_blockpool_entry *)
5209                                 list_first_entry(&blockpool->free_block_list,
5210                                         struct __vxge_hw_blockpool_entry,
5211                                         item);
5212
5213                 if (entry != NULL) {
5214                         list_del(&entry->item);
5215                         blockpool->pool_size--;
5216                 }
5217         }
5218
5219         if (entry != NULL)
5220                 __vxge_hw_blockpool_blocks_add(blockpool);
5221
5222         return entry;
5223 }
5224
5225 /*
5226  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5227  * @devh: Hal device
5228  * @entry: Entry of block to be freed
5229  *
5230  * This function frees a block from block pool
5231  */
5232 void
5233 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5234                         struct __vxge_hw_blockpool_entry *entry)
5235 {
5236         struct __vxge_hw_blockpool  *blockpool;
5237
5238         blockpool = &devh->block_pool;
5239
5240         if (entry->length == blockpool->block_size) {
5241                 list_add(&entry->item, &blockpool->free_block_list);
5242                 blockpool->pool_size++;
5243         }
5244
5245         __vxge_hw_blockpool_blocks_remove(blockpool);
5246 }