]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/vxge/vxge-config.h
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / drivers / net / vxge / vxge-config.h
CommitLineData
40a3a915
RV
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
926bd900 10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
40a3a915 11 * Virtualized Server Adapter.
926bd900 12 * Copyright(c) 2002-2010 Exar Corp.
40a3a915
RV
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/list.h>
5a0e3ad6 17#include <linux/slab.h>
40a3a915
RV
18
19#ifndef VXGE_CACHE_LINE_SIZE
20#define VXGE_CACHE_LINE_SIZE 128
21#endif
22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
33#endif
34
35#define VXGE_HW_MIN_MTU 68
36#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500
38
39#ifdef VXGE_DEBUG_ASSERT
40
41/**
42 * vxge_assert
43 * @test: C-condition to check
44 * @fmt: printf like format string
45 *
46 * This function implements traditional assert. By default assertions
47 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
48 * compilation
49 * time.
50 */
51#define vxge_assert(test) { \
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else
56#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */
58
59/**
60 * enum enum vxge_debug_level
61 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
64 * going to be logged out. Very noisy.
65 *
66 * This enumeration going to be used to switch between different
67 * debug levels during runtime if DEBUG macro defined during
68 * compilation. If DEBUG macro not defined than code will be
69 * compiled out.
70 */
71enum vxge_debug_level {
72 VXGE_NONE = 0,
73 VXGE_TRACE = 1,
74 VXGE_ERR = 2
75};
76
77#define NULL_VPID 0xFFFFFFFF
78#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79#define VXGE_DEBUG_MODULE_MASK 0xffffffff
80#define VXGE_DEBUG_TRACE_MASK 0xffffffff
81#define VXGE_DEBUG_ERR_MASK 0xffffffff
82#define VXGE_DEBUG_MASK 0x000001ff
83#else
84#define VXGE_DEBUG_MODULE_MASK 0x20000000
85#define VXGE_DEBUG_TRACE_MASK 0x20000000
86#define VXGE_DEBUG_ERR_MASK 0x20000000
87#define VXGE_DEBUG_MASK 0x00000001
88#endif
89
90/*
91 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93 *
94 * This enumeration going to be used to distinguish modules
95 * or libraries during compilation and runtime. Makefile must declare
96 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97 */
98#define VXGE_COMPONENT_LL 0x20000000
99#define VXGE_COMPONENT_ALL 0xffffffff
100
101#define VXGE_HW_BASE_INF 100
102#define VXGE_HW_BASE_ERR 200
103#define VXGE_HW_BASE_BADCFG 300
104
105enum vxge_hw_status {
106 VXGE_HW_OK = 0,
107 VXGE_HW_FAIL = 1,
108 VXGE_HW_PENDING = 2,
109 VXGE_HW_COMPLETIONS_REMAIN = 3,
110
111 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
112 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
113
114 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
115 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
116 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
117 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
118 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
119 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
120 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
121 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
122 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
123 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
124 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
125 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
126 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
127 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
128 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
129 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
130 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
131 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
132 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
133 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
134 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
135 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
136
137 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
138 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
139 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
140 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
141 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
142 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
143 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
144
145 VXGE_HW_EOF_TRACE_BUF = -1
146};
147
148/**
149 * enum enum vxge_hw_device_link_state - Link state enumeration.
150 * @VXGE_HW_LINK_NONE: Invalid link state.
151 * @VXGE_HW_LINK_DOWN: Link is down.
152 * @VXGE_HW_LINK_UP: Link is up.
153 *
154 */
155enum vxge_hw_device_link_state {
156 VXGE_HW_LINK_NONE,
157 VXGE_HW_LINK_DOWN,
158 VXGE_HW_LINK_UP
159};
160
161/**
162 * struct vxge_hw_device_date - Date Format
163 * @day: Day
164 * @month: Month
165 * @year: Year
166 * @date: Date in string format
167 *
168 * Structure for returning date
169 */
170
171#define VXGE_HW_FW_STRLEN 32
172struct vxge_hw_device_date {
173 u32 day;
174 u32 month;
175 u32 year;
176 char date[VXGE_HW_FW_STRLEN];
177};
178
179struct vxge_hw_device_version {
180 u32 major;
181 u32 minor;
182 u32 build;
183 char version[VXGE_HW_FW_STRLEN];
184};
185
40a3a915
RV
186/**
187 * struct vxge_hw_fifo_config - Configuration of fifo.
188 * @enable: Is this fifo to be commissioned
189 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
190 * blocks per queue.
191 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
192 * transmit operation).
193 * No more than 256 transmit buffers can be specified.
194 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
195 * bytes. Setting @memblock_size to page size ensures
196 * by-page allocation of descriptors. 128K bytes is the
197 * maximum supported block size.
198 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
199 * (e.g., to align on a cache line).
200 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
201 * Use 0 otherwise.
202 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
203 * which generally improves latency of the host bridge operation
204 * (see PCI specification). For valid values please refer
205 * to struct vxge_hw_fifo_config{} in the driver sources.
206 * Configuration of all Titan fifos.
207 * Note: Valid (min, max) range for each attribute is specified in the body of
208 * the struct vxge_hw_fifo_config{} structure.
209 */
210struct vxge_hw_fifo_config {
211 u32 enable;
212#define VXGE_HW_FIFO_ENABLE 1
213#define VXGE_HW_FIFO_DISABLE 0
214
215 u32 fifo_blocks;
216#define VXGE_HW_MIN_FIFO_BLOCKS 2
217#define VXGE_HW_MAX_FIFO_BLOCKS 128
218
219 u32 max_frags;
220#define VXGE_HW_MIN_FIFO_FRAGS 1
221#define VXGE_HW_MAX_FIFO_FRAGS 256
222
223 u32 memblock_size;
224#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
225#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
226#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
227
228 u32 alignment_size;
229#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
230#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
231#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
232
233 u32 intr;
234#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
235#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
236#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
237
238 u32 no_snoop_bits;
239#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
240#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
241#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
242#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
243#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
244
245};
246/**
247 * struct vxge_hw_ring_config - Ring configurations.
248 * @enable: Is this ring to be commissioned
249 * @ring_blocks: Numbers of RxD blocks in the ring
250 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
251 * to Titan User Guide.
252 * @scatter_mode: Titan supports two receive scatter modes: A and B.
253 * For details please refer to Titan User Guide.
254 * @rx_timer_val: The number of 32ns periods that would be counted between two
255 * timer interrupts.
256 * @greedy_return: If Set it forces the device to return absolutely all RxD
257 * that are consumed and still on board when a timer interrupt
258 * triggers. If Clear, then if the device has already returned
259 * RxD before current timer interrupt trigerred and after the
260 * previous timer interrupt triggered, then the device is not
261 * forced to returned the rest of the consumed RxD that it has
262 * on board which account for a byte count less than the one
263 * programmed into PRC_CFG6.RXD_CRXDT field
264 * @rx_timer_ci: TBD
265 * @backoff_interval_us: Time (in microseconds), after which Titan
266 * tries to download RxDs posted by the host.
267 * Note that the "backoff" does not happen if host posts receive
268 * descriptors in the timely fashion.
269 * Ring configuration.
270 */
271struct vxge_hw_ring_config {
272 u32 enable;
273#define VXGE_HW_RING_ENABLE 1
274#define VXGE_HW_RING_DISABLE 0
275#define VXGE_HW_RING_DEFAULT 1
276
277 u32 ring_blocks;
278#define VXGE_HW_MIN_RING_BLOCKS 1
279#define VXGE_HW_MAX_RING_BLOCKS 128
280#define VXGE_HW_DEF_RING_BLOCKS 2
281
282 u32 buffer_mode;
283#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
284#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
285#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
286#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
287
288 u32 scatter_mode;
289#define VXGE_HW_RING_SCATTER_MODE_A 0
290#define VXGE_HW_RING_SCATTER_MODE_B 1
291#define VXGE_HW_RING_SCATTER_MODE_C 2
292#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
293
294 u64 rxds_limit;
295#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
296};
297
298/**
299 * struct vxge_hw_vp_config - Configuration of virtual path
300 * @vp_id: Virtual Path Id
301 * @min_bandwidth: Minimum Guaranteed bandwidth
302 * @ring: See struct vxge_hw_ring_config{}.
303 * @fifo: See struct vxge_hw_fifo_config{}.
304 * @tti: Configuration of interrupt associated with Transmit.
305 * see struct vxge_hw_tim_intr_config();
306 * @rti: Configuration of interrupt associated with Receive.
307 * see struct vxge_hw_tim_intr_config();
308 * @mtu: mtu size used on this port.
309 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
310 * remove the VLAN tag from all received tagged frames that are not
311 * replicated at the internal L2 switch.
312 * 0 - Do not strip the VLAN tag.
313 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
314 * always placed into the RxDMA descriptor.
315 *
316 * This structure is used by the driver to pass the configuration parameters to
317 * configure Virtual Path.
318 */
319struct vxge_hw_vp_config {
320 u32 vp_id;
321
322#define VXGE_HW_VPATH_PRIORITY_MIN 0
323#define VXGE_HW_VPATH_PRIORITY_MAX 16
324#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
325
326 u32 min_bandwidth;
327#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
328#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
329#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
330
331 struct vxge_hw_ring_config ring;
332 struct vxge_hw_fifo_config fifo;
333 struct vxge_hw_tim_intr_config tti;
334 struct vxge_hw_tim_intr_config rti;
335
336 u32 mtu;
337#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
338#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
339#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
340
341 u32 rpa_strip_vlan_tag;
342#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
343#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
344#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
345
346};
347/**
348 * struct vxge_hw_device_config - Device configuration.
349 * @dma_blockpool_initial: Initial size of DMA Pool
350 * @dma_blockpool_max: Maximum blocks in DMA pool
351 * @intr_mode: Line, or MSI-X interrupt.
352 *
353 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
354 * @rth_it_type: RTH IT table programming type
355 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
356 * @vp_config: Configuration for virtual paths
357 * @device_poll_millis: Specify the interval (in mulliseconds)
358 * to wait for register reads
359 *
360 * Titan configuration.
361 * Contains per-device configuration parameters, including:
362 * - stats sampling interval, etc.
363 *
364 * In addition, struct vxge_hw_device_config{} includes "subordinate"
365 * configurations, including:
366 * - fifos and rings;
367 * - MAC (done at firmware level).
368 *
369 * See Titan User Guide for more details.
370 * Note: Valid (min, max) range for each attribute is specified in the body of
371 * the struct vxge_hw_device_config{} structure. Please refer to the
372 * corresponding include file.
373 * See also: struct vxge_hw_tim_intr_config{}.
374 */
375struct vxge_hw_device_config {
376 u32 dma_blockpool_initial;
377 u32 dma_blockpool_max;
378#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
379#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
380#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
381#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
382
383#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
384
385 u32 intr_mode;
386#define VXGE_HW_INTR_MODE_IRQLINE 0
387#define VXGE_HW_INTR_MODE_MSIX 1
388#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
389
390#define VXGE_HW_INTR_MODE_DEF 0
391
392 u32 rth_en;
393#define VXGE_HW_RTH_DISABLE 0
394#define VXGE_HW_RTH_ENABLE 1
395#define VXGE_HW_RTH_DEFAULT 0
396
397 u32 rth_it_type;
398#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
399#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
400#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
401
402 u32 rts_mac_en;
403#define VXGE_HW_RTS_MAC_DISABLE 0
404#define VXGE_HW_RTS_MAC_ENABLE 1
405#define VXGE_HW_RTS_MAC_DEFAULT 0
406
407 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
408
409 u32 device_poll_millis;
410#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
411#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
412#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
413
414};
415
416/**
417 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
418 * @devh: HW device handle.
419 * Link-up notification callback provided by the driver.
420 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
421 *
422 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
423 * vxge_hw_driver_initialize().
424 */
425
426/**
427 * function vxge_uld_link_down_f - Link-Down callback provided by
428 * driver.
429 * @devh: HW device handle.
430 *
431 * Link-Down notification callback provided by the driver.
432 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
433 *
434 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
435 * vxge_hw_driver_initialize().
436 */
437
438/**
439 * function vxge_uld_crit_err_f - Critical Error notification callback.
440 * @devh: HW device handle.
441 * (typically - at HW device iinitialization time).
442 * @type: Enumerated hw error, e.g.: double ECC.
443 * @serr_data: Titan status.
444 * @ext_data: Extended data. The contents depends on the @type.
445 *
446 * Link-Down notification callback provided by the driver.
447 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
448 *
449 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
450 * vxge_hw_driver_initialize().
451 */
452
453/**
454 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
455 * @link_up: See vxge_uld_link_up_f{}.
456 * @link_down: See vxge_uld_link_down_f{}.
457 * @crit_err: See vxge_uld_crit_err_f{}.
458 *
459 * Driver slow-path (per-driver) callbacks.
460 * Implemented by driver and provided to HW via
461 * vxge_hw_driver_initialize().
462 * Note that these callbacks are not mandatory: HW will not invoke
463 * a callback if NULL is specified.
464 *
465 * See also: vxge_hw_driver_initialize().
466 */
467struct vxge_hw_uld_cbs {
468
469 void (*link_up)(struct __vxge_hw_device *devh);
470 void (*link_down)(struct __vxge_hw_device *devh);
471 void (*crit_err)(struct __vxge_hw_device *devh,
472 enum vxge_hw_event type, u64 ext_data);
473};
474
475/*
476 * struct __vxge_hw_blockpool_entry - Block private data structure
477 * @item: List header used to link.
478 * @length: Length of the block
479 * @memblock: Virtual address block
480 * @dma_addr: DMA Address of the block.
481 * @dma_handle: DMA handle of the block.
482 * @acc_handle: DMA acc handle
483 *
484 * Block is allocated with a header to put the blocks into list.
485 *
486 */
487struct __vxge_hw_blockpool_entry {
488 struct list_head item;
489 u32 length;
490 void *memblock;
491 dma_addr_t dma_addr;
492 struct pci_dev *dma_handle;
493 struct pci_dev *acc_handle;
494};
495
496/*
497 * struct __vxge_hw_blockpool - Block Pool
498 * @hldev: HW device
499 * @block_size: size of each block.
500 * @Pool_size: Number of blocks in the pool
501 * @pool_max: Maximum number of blocks above which to free additional blocks
502 * @req_out: Number of block requests with OS out standing
503 * @free_block_list: List of free blocks
504 *
505 * Block pool contains the DMA blocks preallocated.
506 *
507 */
508struct __vxge_hw_blockpool {
509 struct __vxge_hw_device *hldev;
510 u32 block_size;
511 u32 pool_size;
512 u32 pool_max;
513 u32 req_out;
514 struct list_head free_block_list;
515 struct list_head free_entry_list;
516};
517
518/*
519 * enum enum __vxge_hw_channel_type - Enumerated channel types.
520 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
521 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
522 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
523 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
524 * (and recognized) channel types. Currently: 2.
525 *
526 * Enumerated channel types. Currently there are only two link-layer
527 * channels - Titan fifo and Titan ring. In the future the list will grow.
528 */
529enum __vxge_hw_channel_type {
530 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
531 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
532 VXGE_HW_CHANNEL_TYPE_RING = 2,
533 VXGE_HW_CHANNEL_TYPE_MAX = 3
534};
535
536/*
537 * struct __vxge_hw_channel
538 * @item: List item; used to maintain a list of open channels.
539 * @type: Channel type. See enum vxge_hw_channel_type{}.
540 * @devh: Device handle. HW device object that contains _this_ channel.
541 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
542 * @length: Channel length. Currently allocated number of descriptors.
543 * The channel length "grows" when more descriptors get allocated.
544 * See _hw_mempool_grow.
545 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
546 * by driver for the subsequent send or receive operation.
547 * See vxge_hw_fifo_txdl_reserve(),
548 * vxge_hw_ring_rxd_reserve().
549 * @reserve_ptr: Current pointer in the resrve array
550 * @reserve_top: Reserve top gives the maximum number of dtrs available in
551 * reserve array.
552 * @work_arr: Work array. Contains descriptors posted to the channel.
553 * Note that at any point in time @work_arr contains 3 types of
554 * descriptors:
555 * 1) posted but not yet consumed by Titan device;
556 * 2) consumed but not yet completed;
557 * 3) completed but not yet freed
558 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
559 * @post_index: Post index. At any point in time points on the
560 * position in the channel, which'll contain next to-be-posted
561 * descriptor.
562 * @compl_index: Completion index. At any point in time points on the
563 * position in the channel, which will contain next
564 * to-be-completed descriptor.
565 * @free_arr: Free array. Contains completed descriptors that were freed
566 * (i.e., handed over back to HW) by driver.
567 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
568 * @free_ptr: current pointer in free array
569 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
570 * to store per-operation control information.
571 * @stats: Pointer to common statistics
572 * @userdata: Per-channel opaque (void*) user-defined context, which may be
573 * driver object, ULP connection, etc.
574 * Once channel is open, @userdata is passed back to user via
575 * vxge_hw_channel_callback_f.
576 *
577 * HW channel object.
578 *
579 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
580 */
581struct __vxge_hw_channel {
582 struct list_head item;
583 enum __vxge_hw_channel_type type;
584 struct __vxge_hw_device *devh;
585 struct __vxge_hw_vpath_handle *vph;
586 u32 length;
587 u32 vp_id;
588 void **reserve_arr;
589 u32 reserve_ptr;
590 u32 reserve_top;
591 void **work_arr;
592 u32 post_index ____cacheline_aligned;
593 u32 compl_index ____cacheline_aligned;
594 void **free_arr;
595 u32 free_ptr;
596 void **orig_arr;
597 u32 per_dtr_space;
598 void *userdata;
599 struct vxge_hw_common_reg __iomem *common_reg;
600 u32 first_vp_id;
601 struct vxge_hw_vpath_stats_sw_common_info *stats;
602
603} ____cacheline_aligned;
604
605/*
606 * struct __vxge_hw_virtualpath - Virtual Path
607 *
608 * @vp_id: Virtual path id
609 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
610 * @hldev: Hal device
611 * @vp_config: Virtual Path Config
612 * @vp_reg: VPATH Register map address in BAR0
613 * @vpmgmt_reg: VPATH_MGMT register map address
614 * @max_mtu: Max mtu that can be supported
615 * @vsport_number: vsport attached to this vpath
616 * @max_kdfc_db: Maximum kernel mode doorbells
617 * @max_nofl_db: Maximum non offload doorbells
618 * @tx_intr_num: Interrupt Number associated with the TX
619
620 * @ringh: Ring Queue
621 * @fifoh: FIFO Queue
622 * @vpath_handles: Virtual Path handles list
623 * @stats_block: Memory for DMAing stats
624 * @stats: Vpath statistics
625 *
626 * Virtual path structure to encapsulate the data related to a virtual path.
627 * Virtual paths are allocated by the HW upon getting configuration from the
628 * driver and inserted into the list of virtual paths.
629 */
630struct __vxge_hw_virtualpath {
631 u32 vp_id;
632
633 u32 vp_open;
634#define VXGE_HW_VP_NOT_OPEN 0
635#define VXGE_HW_VP_OPEN 1
636
637 struct __vxge_hw_device *hldev;
638 struct vxge_hw_vp_config *vp_config;
639 struct vxge_hw_vpath_reg __iomem *vp_reg;
640 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
641 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
642
643 u32 max_mtu;
644 u32 vsport_number;
645 u32 max_kdfc_db;
646 u32 max_nofl_db;
647
648 struct __vxge_hw_ring *____cacheline_aligned ringh;
649 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
650 struct list_head vpath_handles;
651 struct __vxge_hw_blockpool_entry *stats_block;
652 struct vxge_hw_vpath_stats_hw_info *hw_stats;
653 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
654 struct vxge_hw_vpath_stats_sw_info *sw_stats;
655};
656
657/*
658 * struct __vxge_hw_vpath_handle - List item to store callback information
659 * @item: List head to keep the item in linked list
660 * @vpath: Virtual path to which this item belongs
661 *
662 * This structure is used to store the callback information.
663 */
664struct __vxge_hw_vpath_handle{
665 struct list_head item;
666 struct __vxge_hw_virtualpath *vpath;
667};
668
669/*
670 * struct __vxge_hw_device
671 *
672 * HW device object.
673 */
674/**
675 * struct __vxge_hw_device - Hal device object
676 * @magic: Magic Number
677 * @device_id: PCI Device Id of the adapter
678 * @major_revision: PCI Device major revision
679 * @minor_revision: PCI Device minor revision
680 * @bar0: BAR0 virtual address.
40a3a915
RV
681 * @pdev: Physical device handle
682 * @config: Confguration passed by the LL driver at initialization
683 * @link_state: Link state
684 *
685 * HW device object. Represents Titan adapter
686 */
687struct __vxge_hw_device {
688 u32 magic;
689#define VXGE_HW_DEVICE_MAGIC 0x12345678
690#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
691 u16 device_id;
692 u8 major_revision;
693 u8 minor_revision;
694 void __iomem *bar0;
40a3a915
RV
695 struct pci_dev *pdev;
696 struct net_device *ndev;
697 struct vxge_hw_device_config config;
698 enum vxge_hw_device_link_state link_state;
699
700 struct vxge_hw_uld_cbs uld_callbacks;
701
702 u32 host_type;
703 u32 func_id;
704 u32 access_rights;
705#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
706#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
707#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
708 struct vxge_hw_legacy_reg __iomem *legacy_reg;
709 struct vxge_hw_toc_reg __iomem *toc_reg;
710 struct vxge_hw_common_reg __iomem *common_reg;
711 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
712 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
713 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
714 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
715 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
716 struct vxge_hw_vpath_reg __iomem *vpath_reg \
717 [VXGE_HW_TITAN_VPATH_REG_SPACES];
718 u8 __iomem *kdfc;
719 u8 __iomem *usdc;
720 struct __vxge_hw_virtualpath virtual_paths \
721 [VXGE_HW_MAX_VIRTUAL_PATHS];
722 u64 vpath_assignments;
723 u64 vpaths_deployed;
724 u32 first_vp_id;
725 u64 tim_int_mask0[4];
726 u32 tim_int_mask1[4];
727
728 struct __vxge_hw_blockpool block_pool;
729 struct vxge_hw_device_stats stats;
730 u32 debug_module_mask;
731 u32 debug_level;
732 u32 level_err;
733 u32 level_trace;
734};
735
736#define VXGE_HW_INFO_LEN 64
737/**
738 * struct vxge_hw_device_hw_info - Device information
739 * @host_type: Host Type
740 * @func_id: Function Id
741 * @vpath_mask: vpath bit mask
742 * @fw_version: Firmware version
743 * @fw_date: Firmware Date
744 * @flash_version: Firmware version
745 * @flash_date: Firmware Date
746 * @mac_addrs: Mac addresses for each vpath
747 * @mac_addr_masks: Mac address masks for each vpath
748 *
749 * Returns the vpath mask that has the bits set for each vpath allocated
750 * for the driver and the first mac address for each vpath
751 */
752struct vxge_hw_device_hw_info {
753 u32 host_type;
754#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
755#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
756#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
757#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
758#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
759#define VXGE_HW_SR_VH_FUNCTION0 5
760#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
761#define VXGE_HW_VH_NORMAL_FUNCTION 7
762 u64 function_mode;
cb27ec60
SH
763#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
764#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
40a3a915
RV
765#define VXGE_HW_FUNCTION_MODE_SRIOV 2
766#define VXGE_HW_FUNCTION_MODE_MRIOV 3
cb27ec60
SH
767#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
768#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
769#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
770#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
771#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
772#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
773#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
774
40a3a915
RV
775 u32 func_id;
776 u64 vpath_mask;
777 struct vxge_hw_device_version fw_version;
778 struct vxge_hw_device_date fw_date;
779 struct vxge_hw_device_version flash_version;
780 struct vxge_hw_device_date flash_date;
781 u8 serial_number[VXGE_HW_INFO_LEN];
782 u8 part_number[VXGE_HW_INFO_LEN];
783 u8 product_desc[VXGE_HW_INFO_LEN];
784 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
785 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
786};
787
788/**
789 * struct vxge_hw_device_attr - Device memory spaces.
790 * @bar0: BAR0 virtual address.
40a3a915
RV
791 * @pdev: PCI device object.
792 *
7975d1ee 793 * Device memory spaces. Includes configuration, BAR0 etc. per device
40a3a915
RV
794 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
795 */
796struct vxge_hw_device_attr {
797 void __iomem *bar0;
40a3a915
RV
798 struct pci_dev *pdev;
799 struct vxge_hw_uld_cbs uld_callbacks;
800};
801
802#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
803
804#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
805 if (i < 16) { \
806 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
807 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
808 } \
809 else { \
810 m1[0] = 0x80000000; \
811 m1[1] = 0x40000000; \
812 } \
813}
814
815#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
816 if (i < 16) { \
817 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
818 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
819 } \
820 else { \
821 m1[0] = 0; \
822 m1[1] = 0; \
823 } \
824}
825
826#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
827 status = vxge_hw_mrpcim_stats_access(hldev, \
828 VXGE_HW_STATS_OP_READ, \
829 loc, \
830 offset, \
831 &val64); \
832 \
833 if (status != VXGE_HW_OK) \
834 return status; \
835}
836
837#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
838 status = __vxge_hw_vpath_stats_access(vpath, \
839 VXGE_HW_STATS_OP_READ, \
840 offset, \
841 &val64); \
842 if (status != VXGE_HW_OK) \
843 return status; \
844}
845
846/*
847 * struct __vxge_hw_ring - Ring channel.
848 * @channel: Channel "base" of this ring, the common part of all HW
849 * channels.
850 * @mempool: Memory pool, the pool from which descriptors get allocated.
851 * (See vxge_hw_mm.h).
852 * @config: Ring configuration, part of device configuration
853 * (see struct vxge_hw_device_config{}).
854 * @ring_length: Length of the ring
855 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
856 * as per Titan User Guide.
857 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
858 * 1-buffer mode descriptor is 32 byte long, etc.
859 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
860 * per-descriptor data (e.g., DMA handle for Solaris)
861 * @per_rxd_space: Per rxd space requested by driver
862 * @rxds_per_block: Number of descriptors per hardware-defined RxD
863 * block. Depends on the (1-, 3-, 5-) buffer mode.
864 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
865 * usage. Not to confuse with @rxd_priv_size.
866 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
867 * @callback: Channel completion callback. HW invokes the callback when there
868 * are new completions on that channel. In many implementations
869 * the @callback executes in the hw interrupt context.
870 * @rxd_init: Channel's descriptor-initialize callback.
871 * See vxge_hw_ring_rxd_init_f{}.
872 * If not NULL, HW invokes the callback when opening
873 * the ring.
874 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
875 * HW invokes the callback when closing the corresponding channel.
876 * See also vxge_hw_channel_rxd_term_f{}.
877 * @stats: Statistics for ring
878 * Ring channel.
879 *
880 * Note: The structure is cache line aligned to better utilize
881 * CPU cache performance.
882 */
883struct __vxge_hw_ring {
884 struct __vxge_hw_channel channel;
885 struct vxge_hw_mempool *mempool;
886 struct vxge_hw_vpath_reg __iomem *vp_reg;
887 struct vxge_hw_common_reg __iomem *common_reg;
888 u32 ring_length;
889 u32 buffer_mode;
890 u32 rxd_size;
891 u32 rxd_priv_size;
892 u32 per_rxd_space;
893 u32 rxds_per_block;
894 u32 rxdblock_priv_size;
895 u32 cmpl_cnt;
896 u32 vp_id;
897 u32 doorbell_cnt;
898 u32 total_db_cnt;
899 u64 rxds_limit;
900
901 enum vxge_hw_status (*callback)(
902 struct __vxge_hw_ring *ringh,
903 void *rxdh,
904 u8 t_code,
905 void *userdata);
906
907 enum vxge_hw_status (*rxd_init)(
908 void *rxdh,
909 void *userdata);
910
911 void (*rxd_term)(
912 void *rxdh,
913 enum vxge_hw_rxd_state state,
914 void *userdata);
915
916 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
917 struct vxge_hw_ring_config *config;
918} ____cacheline_aligned;
919
920/**
921 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
922 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
923 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
924 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
925 * device.
926 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
927 * filling-in and posting later.
928 *
929 * Titan/HW descriptor states.
930 *
931 */
932enum vxge_hw_txdl_state {
933 VXGE_HW_TXDL_STATE_NONE = 0,
934 VXGE_HW_TXDL_STATE_AVAIL = 1,
935 VXGE_HW_TXDL_STATE_POSTED = 2,
936 VXGE_HW_TXDL_STATE_FREED = 3
937};
938/*
939 * struct __vxge_hw_fifo - Fifo.
940 * @channel: Channel "base" of this fifo, the common part of all HW
941 * channels.
942 * @mempool: Memory pool, from which descriptors get allocated.
943 * @config: Fifo configuration, part of device configuration
944 * (see struct vxge_hw_device_config{}).
945 * @interrupt_type: Interrupt type to be used
946 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
947 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
948 * on TxDL please refer to Titan UG.
949 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
950 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
951 * @priv_size: Per-Tx descriptor space reserved for driver
952 * usage.
953 * @per_txdl_space: Per txdl private space for the driver
954 * @callback: Fifo completion callback. HW invokes the callback when there
955 * are new completions on that fifo. In many implementations
956 * the @callback executes in the hw interrupt context.
957 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
958 * HW invokes the callback when closing the corresponding fifo.
959 * See also vxge_hw_fifo_txdl_term_f{}.
960 * @stats: Statistics of this fifo
961 *
962 * Fifo channel.
963 * Note: The structure is cache line aligned.
964 */
965struct __vxge_hw_fifo {
966 struct __vxge_hw_channel channel;
967 struct vxge_hw_mempool *mempool;
968 struct vxge_hw_fifo_config *config;
969 struct vxge_hw_vpath_reg __iomem *vp_reg;
970 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
971 u64 interrupt_type;
972 u32 no_snoop_bits;
973 u32 txdl_per_memblock;
974 u32 txdl_size;
975 u32 priv_size;
976 u32 per_txdl_space;
977 u32 vp_id;
978 u32 tx_intr_num;
979
980 enum vxge_hw_status (*callback)(
981 struct __vxge_hw_fifo *fifo_handle,
982 void *txdlh,
983 enum vxge_hw_fifo_tcode t_code,
984 void *userdata,
ff67df55
BL
985 struct sk_buff ***skb_ptr,
986 int nr_skb,
987 int *more);
40a3a915
RV
988
989 void (*txdl_term)(
990 void *txdlh,
991 enum vxge_hw_txdl_state state,
992 void *userdata);
993
994 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
995} ____cacheline_aligned;
996
997/*
998 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
999 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1000 * @dma_handle: DMA handle used to map the descriptor onto device.
1001 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1002 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1003 * Each memblock is a contiguous block of DMA-able memory.
1004 * @frags: Total number of fragments (that is, contiguous data buffers)
1005 * carried by this TxDL.
1006 * @align_vaddr_start: Aligned virtual address start
1007 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1008 * alignement. Used to place one or more mis-aligned fragments
1009 * @align_dma_addr: DMA address translated from the @align_vaddr.
1010 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1011 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1012 * @align_dma_offset: The current offset into the @align_vaddr area.
1013 * Grows while filling the descriptor, gets reset.
1014 * @align_used_frags: Number of fragments used.
1015 * @alloc_frags: Total number of fragments allocated.
1016 * @unused: TODO
1017 * @next_txdl_priv: (TODO).
1018 * @first_txdp: (TODO).
1019 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1020 * TxDL list.
1021 * @txdlh: Corresponding txdlh to this TxDL.
1022 * @memblock: Pointer to the TxDL memory block or memory page.
1023 * on the next send operation.
1024 * @dma_object: DMA address and handle of the memory block that contains
1025 * the descriptor. This member is used only in the "checked"
1026 * version of the HW (to enforce certain assertions);
1027 * otherwise it gets compiled out.
1028 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1029 *
1030 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1031 * information associated with the descriptor. Note that driver can ask HW
1032 * to allocate additional per-descriptor space for its own (driver-specific)
1033 * purposes.
1034 *
1035 * See also: struct vxge_hw_ring_rxd_priv{}.
1036 */
1037struct __vxge_hw_fifo_txdl_priv {
1038 dma_addr_t dma_addr;
1039 struct pci_dev *dma_handle;
1040 ptrdiff_t dma_offset;
1041 u32 frags;
1042 u8 *align_vaddr_start;
1043 u8 *align_vaddr;
1044 dma_addr_t align_dma_addr;
1045 struct pci_dev *align_dma_handle;
1046 struct pci_dev *align_dma_acch;
1047 ptrdiff_t align_dma_offset;
1048 u32 align_used_frags;
1049 u32 alloc_frags;
1050 u32 unused;
1051 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1052 struct vxge_hw_fifo_txd *first_txdp;
1053 void *memblock;
1054};
1055
1056/*
1057 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1058 * @control_0: Bits 0 to 7 - Doorbell type.
1059 * Bits 8 to 31 - Reserved.
1060 * Bits 32 to 39 - The highest TxD in this TxDL.
1061 * Bits 40 to 47 - Reserved.
1062 * Bits 48 to 55 - Reserved.
1063 * Bits 56 to 63 - No snoop flags.
1064 * @txdl_ptr: The starting location of the TxDL in host memory.
1065 *
1066 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1067 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1068 * part of a doorbell write. Consumed by the adapter but is not written by the
1069 * adapter.
1070 */
1071struct __vxge_hw_non_offload_db_wrapper {
1072 u64 control_0;
1073#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1074#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1075#define VXGE_HW_NODBW_TYPE_NODBW 0
1076
1077#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1078#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1079
1080#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1081#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1082#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1083#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1084
1085 u64 txdl_ptr;
1086};
1087
1088/*
1089 * TX Descriptor
1090 */
1091
1092/**
1093 * struct vxge_hw_fifo_txd - Transmit Descriptor
1094 * @control_0: Bits 0 to 6 - Reserved.
1095 * Bit 7 - List Ownership. This field should be initialized
1096 * to '1' by the driver before the transmit list pointer is
1097 * written to the adapter. This field will be set to '0' by the
1098 * adapter once it has completed transmitting the frame or frames in
1099 * the list. Note - This field is only valid in TxD0. Additionally,
1100 * for multi-list sequences, the driver should not release any
1101 * buffers until the ownership of the last list in the multi-list
1102 * sequence has been returned to the host.
1103 * Bits 8 to 11 - Reserved
1104 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1105 * TxD0. It is used to describe the status of the transmit data
1106 * buffer transfer. This field is always overwritten by the
1107 * adapter, so this field may be initialized to any value.
1108 * Bits 16 to 17 - Host steering. This field allows the host to
1109 * override the selection of the physical transmit port.
1110 * Attention:
1111 * Normal sounds as if learned from the switch rather than from
1112 * the aggregation algorythms.
1113 * 00: Normal. Use Destination/MAC Address
1114 * lookup to determine the transmit port.
1115 * 01: Send on physical Port1.
1116 * 10: Send on physical Port0.
1117 * 11: Send on both ports.
1118 * Bits 18 to 21 - Reserved
1119 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1120 * is used to describe how individual buffers comprise a frame.
1121 * 10: First descriptor of a frame.
1122 * 00: Middle of a multi-descriptor frame.
1123 * 01: Last descriptor of a frame.
1124 * 11: First and last descriptor of a frame (the entire frame
1125 * resides in a single buffer).
1126 * For multi-descriptor frames, the only valid gather code sequence
1127 * is {10, [00], 01}. In other words, the descriptors must be placed
1128 * in the list in the correct order.
1129 * Bits 24 to 27 - Reserved
1130 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1131 * definition. Only valid in TxD0. This field allows the host to
1132 * indicate the Ethernet encapsulation of an outbound LSO packet.
1133 * 00 - classic mode (best guess)
1134 * 01 - LLC
1135 * 10 - SNAP
1136 * 11 - DIX
1137 * If "classic mode" is selected, the adapter will attempt to
1138 * decode the frame's Ethernet encapsulation by examining the L/T
1139 * field as follows:
1140 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1141 * if packet is IPv4 or IPv6.
1142 * 0x8870 Jumbo-SNAP encoding.
1143 * 0x0800 IPv4 DIX encoding
1144 * 0x86DD IPv6 DIX encoding
1145 * others illegal encapsulation
1146 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1147 * Set to 1 to perform segmentation offload for TCP/UDP.
1148 * This field is valid only in TxD0.
1149 * Bits 31 to 33 - Reserved.
1150 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1151 * This field is meaningful only when LSO_Control is non-zero.
1152 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1153 * TCP segment described by this TxDL will be sent as a series of
1154 * TCP segments each of which contains no more than LSO_MSS
1155 * payload bytes.
1156 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1157 * UDP datagram described by this TxDL will be sent as a series of
1158 * UDP datagrams each of which contains no more than LSO_MSS
1159 * payload bytes.
1160 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1161 * or TCP payload, with the exception of the last, which will have
1162 * <= LSO_MSS bytes of payload.
1163 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1164 * buffer to be read by the adapter. This field is written by the
1165 * host. A value of 0 is illegal.
1166 * Bits 32 to 63 - This value is written by the adapter upon
1167 * completion of a UDP or TCP LSO operation and indicates the number
1168 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1169 * returned for any non-LSO operation.
1170 * @control_1: Bits 0 to 4 - Reserved.
1171 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1172 * offload. This field is only valid in the first TxD of a frame.
1173 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1174 * This field is only valid in the first TxD of a frame (the TxD's
1175 * gather code must be 10 or 11). The driver should only set this
1176 * bit if it can guarantee that TCP is present.
1177 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1178 * This field is only valid in the first TxD of a frame (the TxD's
1179 * gather code must be 10 or 11). The driver should only set this
1180 * bit if it can guarantee that UDP is present.
1181 * Bits 8 to 14 - Reserved.
1182 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1183 * instruct the adapter to insert the VLAN tag specified by the
1184 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1185 * a frame.
1186 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1187 * to be inserted into the frame by the adapter (the first two bytes
1188 * of a VLAN tag are always 0x8100). This field is only valid if the
1189 * Tx_VLAN_Enable field is set to '1'.
1190 * Bits 32 to 33 - Reserved.
1191 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1192 * number the frame associated with. This field is written by the
1193 * host. It is only valid in the first TxD of a frame.
1194 * Bits 40 to 42 - Reserved.
1195 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1196 * functions. This field is valid only in the first TxD
1197 * of a frame.
1198 * Bits 44 to 45 - Reserved.
1199 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1200 * generate an interrupt as soon as all of the frames in the list
1201 * have been transmitted. In order to have per-frame interrupts,
1202 * the driver should place a maximum of one frame per list. This
1203 * field is only valid in the first TxD of a frame.
1204 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1205 * to count the frame toward the utilization interrupt specified in
1206 * the Tx_Int_Number field. This field is only valid in the first
1207 * TxD of a frame.
1208 * Bits 48 to 63 - Reserved.
1209 * @buffer_pointer: Buffer start address.
1210 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1211 * Titan descriptor prior to posting the latter on the fifo
1212 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1213 * to the driver with each completed descriptor.
1214 *
1215 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1216 * (list) of TxDs. * For more details please refer to Titan User Guide,
1217 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1218 */
1219struct vxge_hw_fifo_txd {
1220 u64 control_0;
1221#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1222
1223#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1224#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1225#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1226
1227
1228#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1229#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1230#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1231
1232
1233#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1234
1235#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1236
1237#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1238
1239 u64 control_1;
1240#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1241#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1242#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1243#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1244
1245#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1246
1247#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1248
1249#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1250#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1251
1252 u64 buffer_pointer;
1253
1254 u64 host_control;
1255};
1256
1257/**
1258 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1259 * @host_control: This field is exclusively for host use and is "readonly"
1260 * from the adapter's perspective.
1261 * @control_0:Bits 0 to 6 - RTH_Bucket get
1262 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1263 * by the host, and is set to 0 by the adapter.
1264 * 0 - Host owns RxD and buffer.
1265 * 1 - The adapter owns RxD and buffer.
1266 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1267 * received frame meets all of the criteria for fast path processing.
1268 * The required criteria are as follows:
1269 * !SYN &
1270 * (Transfer_Code == "Transfer OK") &
1271 * (!Is_IP_Fragment) &
1272 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1273 * (Is_IPv6)) &
1274 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1275 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1276 * computed _L4_checksum == 0x0000)))
1277 * (same meaning for all RxD buffer modes)
1278 * Bit 9 - L3 Checksum Correct
1279 * Bit 10 - L4 Checksum Correct
1280 * Bit 11 - Reserved
1281 * Bit 12 to 15 - This field is written by the adapter. It is
1282 * used to report the status of the frame transfer to the host.
1283 * 0x0 - Transfer OK
1284 * 0x4 - RDA Failure During Transfer
1285 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1286 * 0x6 - Frame integrity error (FCS or ECC).
1287 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1288 * appropriately sized and data loss occurred.
1289 * 0x8 - Internal ECC Error. RxD corrupted.
1290 * 0x9 - IPv4 Checksum error
1291 * 0xA - TCP/UDP Checksum error
1292 * 0xF - Unknown Error or Multiple Error. Indicates an
1293 * unknown problem or that more than one of transfer codes is set.
1294 * Bit 16 - SYN The adapter sets this field to indicate that
1295 * the incoming frame contained a TCP segment with its SYN bit
1296 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1297 * modes)
1298 * Bit 17 - Is ICMP
1299 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1300 * Socket Pair Direct Match Table and the frame was steered based
1301 * on SPDM.
1302 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1303 * Indirection Table and the frame was steered based on hash
1304 * indirection.
1305 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1306 * type) that was used to calculate the hash.
1307 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1308 * tagged.
1309 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1310 * of the received frame.
1311 * 0x0 - Ethernet DIX
1312 * 0x1 - LLC
1313 * 0x2 - SNAP (includes Jumbo-SNAP)
1314 * 0x3 - IPX
1315 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1316 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1317 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1318 * IP packet.
1319 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1320 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1321 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1322 * arrived with the frame. If the resulting computed IPv4 header
1323 * checksum for the frame did not produce the expected 0xFFFF value,
1324 * then the transfer code would be set to 0x9.
1325 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1326 * arrived with the frame. If the resulting computed TCP/UDP checksum
1327 * for the frame did not produce the expected 0xFFFF value, then the
1328 * transfer code would be set to 0xA.
1329 * @control_1:Bits 0 to 1 - Reserved
1330 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1331 * eventually overwritten by the adapter. The host writes the
1332 * available buffer size in bytes when it passes the descriptor to
1333 * the adapter. When a frame is delivered the host, the adapter
1334 * populates this field with the number of bytes written into the
1335 * buffer. The largest supported buffer is 16, 383 bytes.
1336 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1337 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1338 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1339 * of the VLAN tag, if one was detected by the adapter. This field is
1340 * populated even if VLAN-tag stripping is enabled.
1341 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1342 *
1343 * One buffer mode RxD for ring structure
1344 */
1345struct vxge_hw_ring_rxd_1 {
1346 u64 host_control;
1347 u64 control_0;
1348#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1349
1350#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1351
1352#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1353
1354#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1355
1356#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1357
1358#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1359#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1360
1361#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1362
1363#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1364
1365#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1366
1367#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1368
1369#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1370
1371#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1372
1373#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1374
1375#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1376
1377#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1378
1379#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1380
1381#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1382
1383 u64 control_1;
1384
1385#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1386#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1387#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1388
1389#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1390
1391#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1392
1393 u64 buffer0_ptr;
1394};
1395
1396enum vxge_hw_rth_algoritms {
1397 RTH_ALG_JENKINS = 0,
1398 RTH_ALG_MS_RSS = 1,
1399 RTH_ALG_CRC32C = 2
1400};
1401
1402/**
1403 * struct vxge_hw_rth_hash_types - RTH hash types.
1404 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1405 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1406 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1407 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1408 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1409 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1410 *
1411 * Used to pass RTH hash types to rts_rts_set.
1412 *
1413 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1414 */
1415struct vxge_hw_rth_hash_types {
1416 u8 hash_type_tcpipv4_en;
1417 u8 hash_type_ipv4_en;
1418 u8 hash_type_tcpipv6_en;
1419 u8 hash_type_ipv6_en;
1420 u8 hash_type_tcpipv6ex_en;
1421 u8 hash_type_ipv6ex_en;
1422};
1423
40a3a915
RV
1424void vxge_hw_device_debug_set(
1425 struct __vxge_hw_device *devh,
1426 enum vxge_debug_level level,
1427 u32 mask);
1428
1429u32
1430vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1431
1432u32
1433vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1434
40a3a915
RV
1435/**
1436 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1437 * @buf_mode: Buffer mode (1, 3 or 5)
1438 *
1439 * This function returns the size of RxD for given buffer mode
1440 */
1441static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1442{
1443 return sizeof(struct vxge_hw_ring_rxd_1);
1444}
1445
1446/**
1447 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1448 * @buf_mode: Buffer mode (1 buffer mode only)
1449 *
1450 * This function returns the number of RxD for RxD block for given buffer mode
1451 */
1452static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1453{
1454 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1455 sizeof(struct vxge_hw_ring_rxd_1));
1456}
1457
1458/**
1459 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1460 * @rxdh: Descriptor handle.
1461 * @dma_pointer: DMA address of a single receive buffer this descriptor
1462 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1463 * the receive buffer should be already mapped to the device
1464 * @size: Size of the receive @dma_pointer buffer.
1465 *
1466 * Prepare 1-buffer-mode Rx descriptor for posting
1467 * (via vxge_hw_ring_rxd_post()).
1468 *
1469 * This inline helper-function does not return any parameters and always
1470 * succeeds.
1471 *
1472 */
1473static inline
1474void vxge_hw_ring_rxd_1b_set(
1475 void *rxdh,
1476 dma_addr_t dma_pointer,
1477 u32 size)
1478{
1479 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1480 rxdp->buffer0_ptr = dma_pointer;
1481 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1482 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1483}
1484
1485/**
1486 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1487 * descriptor.
1488 * @vpath_handle: Virtual Path handle.
1489 * @rxdh: Descriptor handle.
1490 * @dma_pointer: DMA address of a single receive buffer this descriptor
1491 * carries. Returned by HW.
1492 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1493 *
1494 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1495 * This inline helper-function uses completed descriptor to populate receive
1496 * buffer pointer and other "out" parameters. The function always succeeds.
1497 *
1498 */
1499static inline
1500void vxge_hw_ring_rxd_1b_get(
1501 struct __vxge_hw_ring *ring_handle,
1502 void *rxdh,
1503 u32 *pkt_length)
1504{
1505 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1506
1507 *pkt_length =
1508 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1509}
1510
1511/**
1512 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1513 * a completed receive descriptor for 1b mode.
1514 * @vpath_handle: Virtual Path handle.
1515 * @rxdh: Descriptor handle.
1516 * @rxd_info: Descriptor information
1517 *
1518 * Retrieve extended information associated with a completed receive descriptor.
1519 *
1520 */
1521static inline
1522void vxge_hw_ring_rxd_1b_info_get(
1523 struct __vxge_hw_ring *ring_handle,
1524 void *rxdh,
1525 struct vxge_hw_ring_rxd_info *rxd_info)
1526{
1527
1528 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1529 rxd_info->syn_flag =
1530 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1531 rxd_info->is_icmp =
1532 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1533 rxd_info->fast_path_eligible =
1534 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1535 rxd_info->l3_cksum_valid =
1536 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1537 rxd_info->l3_cksum =
1538 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1539 rxd_info->l4_cksum_valid =
1540 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1541 rxd_info->l4_cksum =
a419aef8 1542 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
40a3a915
RV
1543 rxd_info->frame =
1544 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1545 rxd_info->proto =
1546 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1547 rxd_info->is_vlan =
1548 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1549 rxd_info->vlan =
1550 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1551 rxd_info->rth_bucket =
1552 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1553 rxd_info->rth_it_hit =
1554 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1555 rxd_info->rth_spdm_hit =
1556 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1557 rxd_info->rth_hash_type =
1558 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1559 rxd_info->rth_value =
1560 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1561}
1562
1563/**
1564 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1565 * of 1b mode 3b mode ring.
1566 * @rxdh: Descriptor handle.
1567 *
1568 * Returns: private driver info associated with the descriptor.
1569 * driver requests per-descriptor space via vxge_hw_ring_attr.
1570 *
1571 */
1572static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1573{
1574 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1575 return (void *)(size_t)rxdp->host_control;
1576}
1577
1578/**
1579 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1580 * @txdlh: Descriptor handle.
1581 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1582 * and/or TCP and/or UDP.
1583 *
1584 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1585 * descriptor.
1586 * This API is part of the preparation of the transmit descriptor for posting
1587 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1588 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1589 * and vxge_hw_fifo_txdl_buffer_set().
1590 * All these APIs fill in the fields of the fifo descriptor,
1591 * in accordance with the Titan specification.
1592 *
1593 */
1594static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1595{
1596 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1597 txdp->control_1 |= cksum_bits;
1598}
1599
1600/**
1601 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1602 * @txdlh: Descriptor handle.
1603 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1604 * driver, which in turn inserts the MSS into the @txdlh.
1605 *
1606 * This API is part of the preparation of the transmit descriptor for posting
1607 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1608 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1609 * and vxge_hw_fifo_txdl_cksum_set_bits().
1610 * All these APIs fill in the fields of the fifo descriptor,
1611 * in accordance with the Titan specification.
1612 *
1613 */
1614static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1615{
1616 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1617
1618 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1619 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1620}
1621
1622/**
1623 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1624 * @txdlh: Descriptor handle.
1625 * @vlan_tag: 16bit VLAN tag.
1626 *
1627 * Insert VLAN tag into specified transmit descriptor.
1628 * The actual insertion of the tag into outgoing frame is done by the hardware.
1629 */
1630static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1631{
1632 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1633
1634 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1635 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1636}
1637
1638/**
1639 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1640 * @txdlh: Descriptor handle.
1641 *
1642 * Retrieve per-descriptor private data.
1643 * Note that driver requests per-descriptor space via
1644 * struct vxge_hw_fifo_attr passed to
1645 * vxge_hw_vpath_open().
1646 *
1647 * Returns: private driver data associated with the descriptor.
1648 */
1649static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1650{
1651 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1652
1653 return (void *)(size_t)txdp->host_control;
1654}
1655
1656/**
1657 * struct vxge_hw_ring_attr - Ring open "template".
1658 * @callback: Ring completion callback. HW invokes the callback when there
1659 * are new completions on that ring. In many implementations
1660 * the @callback executes in the hw interrupt context.
1661 * @rxd_init: Ring's descriptor-initialize callback.
1662 * See vxge_hw_ring_rxd_init_f{}.
1663 * If not NULL, HW invokes the callback when opening
1664 * the ring.
1665 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1666 * HW invokes the callback when closing the corresponding ring.
1667 * See also vxge_hw_ring_rxd_term_f{}.
1668 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1669 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1670 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1671 * reserved by HW per each receive descriptor.
1672 * Can be used to store
1673 * and retrieve on completion, information specific
1674 * to the driver.
1675 *
1676 * Ring open "template". User fills the structure with ring
1677 * attributes and passes it to vxge_hw_vpath_open().
1678 */
1679struct vxge_hw_ring_attr {
1680 enum vxge_hw_status (*callback)(
1681 struct __vxge_hw_ring *ringh,
1682 void *rxdh,
1683 u8 t_code,
1684 void *userdata);
1685
1686 enum vxge_hw_status (*rxd_init)(
1687 void *rxdh,
1688 void *userdata);
1689
1690 void (*rxd_term)(
1691 void *rxdh,
1692 enum vxge_hw_rxd_state state,
1693 void *userdata);
1694
1695 void *userdata;
1696 u32 per_rxd_space;
1697};
1698
1699/**
1700 * function vxge_hw_fifo_callback_f - FIFO callback.
1701 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1702 * descriptors.
1703 * @txdlh: First completed descriptor.
1704 * @txdl_priv: Pointer to per txdl space allocated
1705 * @t_code: Transfer code, as per Titan User Guide.
1706 * Returned by HW.
1707 * @host_control: Opaque 64bit data stored by driver inside the Titan
1708 * descriptor prior to posting the latter on the fifo
1709 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1710 * as is to the driver with each completed descriptor.
1711 * @userdata: Opaque per-fifo data specified at fifo open
1712 * time, via vxge_hw_vpath_open().
1713 *
1714 * Fifo completion callback (type declaration). A single per-fifo
1715 * callback is specified at fifo open time, via
1716 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1717 * of the Interrupt Service Routine.
1718 *
1719 * Fifo callback gets called by HW if, and only if, there is at least
1720 * one new completion on a given fifo. Upon processing the first @txdlh driver
1721 * is _supposed_ to continue consuming completions using:
1722 * - vxge_hw_fifo_txdl_next_completed()
1723 *
1724 * Note that failure to process new completions in a timely fashion
1725 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1726 *
1727 * Non-zero @t_code means failure to process transmit descriptor.
1728 *
1729 * In the "transmit" case the failure could happen, for instance, when the
1730 * link is down, in which case Titan completes the descriptor because it
1731 * is not able to send the data out.
1732 *
1733 * For details please refer to Titan User Guide.
1734 *
1735 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1736 */
1737/**
1738 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1739 * @txdlh: First completed descriptor.
1740 * @txdl_priv: Pointer to per txdl space allocated
1741 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1742 * @userdata: Per-fifo user data (a.k.a. context) specified at
1743 * fifo open time, via vxge_hw_vpath_open().
1744 *
1745 * Terminate descriptor callback. Unless NULL is specified in the
1746 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1747 * HW invokes the callback as part of closing fifo, prior to
1748 * de-allocating the ring and associated data structures
1749 * (including descriptors).
1750 * driver should utilize the callback to (for instance) unmap
1751 * and free DMA data buffers associated with the posted (state =
1752 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1753 * as well as other relevant cleanup functions.
1754 *
1755 * See also: struct vxge_hw_fifo_attr{}
1756 */
1757/**
1758 * struct vxge_hw_fifo_attr - Fifo open "template".
1759 * @callback: Fifo completion callback. HW invokes the callback when there
1760 * are new completions on that fifo. In many implementations
1761 * the @callback executes in the hw interrupt context.
1762 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1763 * HW invokes the callback when closing the corresponding fifo.
1764 * See also vxge_hw_fifo_txdl_term_f{}.
1765 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1766 * user as one of the @callback, and @txdl_term arguments.
1767 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1768 * reserved by HW per each transmit descriptor. Can be used to
1769 * store, and retrieve on completion, information specific
1770 * to the driver.
1771 *
1772 * Fifo open "template". User fills the structure with fifo
1773 * attributes and passes it to vxge_hw_vpath_open().
1774 */
1775struct vxge_hw_fifo_attr {
1776
1777 enum vxge_hw_status (*callback)(
1778 struct __vxge_hw_fifo *fifo_handle,
1779 void *txdlh,
1780 enum vxge_hw_fifo_tcode t_code,
1781 void *userdata,
ff67df55
BL
1782 struct sk_buff ***skb_ptr,
1783 int nr_skb, int *more);
40a3a915
RV
1784
1785 void (*txdl_term)(
1786 void *txdlh,
1787 enum vxge_hw_txdl_state state,
1788 void *userdata);
1789
1790 void *userdata;
1791 u32 per_txdl_space;
1792};
1793
1794/**
1795 * struct vxge_hw_vpath_attr - Attributes of virtual path
1796 * @vp_id: Identifier of Virtual Path
1797 * @ring_attr: Attributes of ring for non-offload receive
1798 * @fifo_attr: Attributes of fifo for non-offload transmit
1799 *
1800 * Attributes of virtual path. This structure is passed as parameter
1801 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1802 */
1803struct vxge_hw_vpath_attr {
1804 u32 vp_id;
1805 struct vxge_hw_ring_attr ring_attr;
1806 struct vxge_hw_fifo_attr fifo_attr;
1807};
1808
40a3a915
RV
1809enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1810 void __iomem *bar0,
1811 struct vxge_hw_device_hw_info *hw_info);
1812
40a3a915
RV
1813enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1814 struct vxge_hw_device_config *device_config);
1815
1816/**
1817 * vxge_hw_device_link_state_get - Get link state.
1818 * @devh: HW device handle.
1819 *
1820 * Get link state.
1821 * Returns: link state.
1822 */
1823static inline
1824enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1825 struct __vxge_hw_device *devh)
1826{
1827 return devh->link_state;
1828}
1829
1830void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1831
1832const u8 *
1833vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1834
1835u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1836
1837const u8 *
1838vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1839
1840enum vxge_hw_status __devinit vxge_hw_device_initialize(
1841 struct __vxge_hw_device **devh,
1842 struct vxge_hw_device_attr *attr,
1843 struct vxge_hw_device_config *device_config);
1844
1845enum vxge_hw_status vxge_hw_device_getpause_data(
1846 struct __vxge_hw_device *devh,
1847 u32 port,
1848 u32 *tx,
1849 u32 *rx);
1850
1851enum vxge_hw_status vxge_hw_device_setpause_data(
1852 struct __vxge_hw_device *devh,
1853 u32 port,
1854 u32 tx,
1855 u32 rx);
1856
1857static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1858 unsigned long size,
1859 struct pci_dev **p_dmah,
1860 struct pci_dev **p_dma_acch)
1861{
1862 gfp_t flags;
1863 void *vaddr;
1864 unsigned long misaligned = 0;
47231f7f 1865 int realloc_flag = 0;
40a3a915
RV
1866 *p_dma_acch = *p_dmah = NULL;
1867
1868 if (in_interrupt())
1869 flags = GFP_ATOMIC | GFP_DMA;
1870 else
1871 flags = GFP_KERNEL | GFP_DMA;
47231f7f 1872realloc:
40a3a915
RV
1873 vaddr = kmalloc((size), flags);
1874 if (vaddr == NULL)
1875 return vaddr;
47231f7f 1876 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
40a3a915 1877 VXGE_CACHE_LINE_SIZE);
47231f7f
SH
1878 if (realloc_flag)
1879 goto out;
1880
1881 if (misaligned) {
1882 /* misaligned, free current one and try allocating
1883 * size + VXGE_CACHE_LINE_SIZE memory
1884 */
1885 kfree((void *) vaddr);
1886 size += VXGE_CACHE_LINE_SIZE;
1887 realloc_flag = 1;
1888 goto realloc;
1889 }
1890out:
40a3a915
RV
1891 *(unsigned long *)p_dma_acch = misaligned;
1892 vaddr = (void *)((u8 *)vaddr + misaligned);
1893 return vaddr;
1894}
1895
40a3a915
RV
1896/*
1897 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1898 */
1899static inline void*
1900__vxge_hw_mempool_item_priv(
1901 struct vxge_hw_mempool *mempool,
1902 u32 memblock_idx,
1903 void *item,
1904 u32 *memblock_item_idx)
1905{
1906 ptrdiff_t offset;
1907 void *memblock = mempool->memblocks_arr[memblock_idx];
1908
1909
1910 offset = (u32)((u8 *)item - (u8 *)memblock);
1911 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1912
1913 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1914 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1915
1916 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1917 (*memblock_item_idx) * mempool->items_priv_size;
1918}
1919
40a3a915
RV
1920/*
1921 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1922 * for the fifo.
1923 * @fifo: Fifo
1924 * @txdp: Poniter to a TxD
1925 */
1926static inline struct __vxge_hw_fifo_txdl_priv *
1927__vxge_hw_fifo_txdl_priv(
1928 struct __vxge_hw_fifo *fifo,
1929 struct vxge_hw_fifo_txd *txdp)
1930{
1931 return (struct __vxge_hw_fifo_txdl_priv *)
1932 (((char *)((ulong)txdp->host_control)) +
1933 fifo->per_txdl_space);
1934}
1935
1936enum vxge_hw_status vxge_hw_vpath_open(
1937 struct __vxge_hw_device *devh,
1938 struct vxge_hw_vpath_attr *attr,
1939 struct __vxge_hw_vpath_handle **vpath_handle);
1940
40a3a915
RV
1941enum vxge_hw_status vxge_hw_vpath_close(
1942 struct __vxge_hw_vpath_handle *vpath_handle);
1943
1944enum vxge_hw_status
1945vxge_hw_vpath_reset(
1946 struct __vxge_hw_vpath_handle *vpath_handle);
1947
1948enum vxge_hw_status
1949vxge_hw_vpath_recover_from_reset(
1950 struct __vxge_hw_vpath_handle *vpath_handle);
1951
1952void
1953vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
1954
1955enum vxge_hw_status
1956vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
1957
1958enum vxge_hw_status vxge_hw_vpath_mtu_set(
1959 struct __vxge_hw_vpath_handle *vpath_handle,
1960 u32 new_mtu);
1961
40a3a915
RV
1962void
1963vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
1964
40a3a915
RV
1965
1966#ifndef readq
1967static inline u64 readq(void __iomem *addr)
1968{
1969 u64 ret = 0;
1970 ret = readl(addr + 4);
1971 ret <<= 32;
1972 ret |= readl(addr);
1973
1974 return ret;
1975}
1976#endif
1977
1978#ifndef writeq
1979static inline void writeq(u64 val, void __iomem *addr)
1980{
1981 writel((u32) (val), addr);
1982 writel((u32) (val >> 32), (addr + 4));
1983}
1984#endif
1985
1986static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
1987{
1988 writel(val, addr + 4);
1989}
1990
1991static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
1992{
1993 writel(val, addr);
1994}
1995
40a3a915
RV
1996enum vxge_hw_status
1997vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
1998
fa41fd10
SH
1999enum vxge_hw_status
2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
42821a5b 2001
40a3a915
RV
2002/**
2003 * vxge_debug
2004 * @level: level of debug verbosity.
2005 * @mask: mask for the debug
2006 * @buf: Circular buffer for tracing
2007 * @fmt: printf like format string
2008 *
2009 * Provides logging facilities. Can be customized on per-module
2010 * basis or/and with debug levels. Input parameters, except
2011 * module and level, are the same as posix printf. This function
2012 * may be compiled out if DEBUG macro was never defined.
2013 * See also: enum vxge_debug_level{}.
2014 */
2015
2016#define vxge_trace_aux(level, mask, fmt, ...) \
2017{\
2018 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2019}
2020
2021#define vxge_debug(module, level, mask, fmt, ...) { \
2022if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2023 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2024 if ((mask & VXGE_DEBUG_MASK) == mask)\
2025 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2026} \
2027}
2028
2029#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2030#define vxge_debug_ll(level, mask, fmt, ...) \
2031{\
2032 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
2033}
2034
2035#else
2036#define vxge_debug_ll(level, mask, fmt, ...)
2037#endif
2038
2039enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2040 struct __vxge_hw_vpath_handle **vpath_handles,
2041 u32 vpath_count,
2042 u8 *mtable,
2043 u8 *itable,
2044 u32 itable_size);
2045
2046enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2047 struct __vxge_hw_vpath_handle *vpath_handle,
2048 enum vxge_hw_rth_algoritms algorithm,
2049 struct vxge_hw_rth_hash_types *hash_type,
2050 u16 bucket_size);
2051
cb27ec60
SH
2052enum vxge_hw_status
2053__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
40a3a915 2054#endif