]>
Commit | Line | Data |
---|---|---|
92915f71 GR |
1 | /******************************************************************************* |
2 | ||
3 | Intel 82599 Virtual Function driver | |
66c87bd5 | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
92915f71 GR |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | ||
26 | *******************************************************************************/ | |
27 | ||
28 | #ifndef _IXGBEVF_H_ | |
29 | #define _IXGBEVF_H_ | |
30 | ||
31 | #include <linux/types.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/io.h> | |
34 | #include <linux/netdevice.h> | |
35 | ||
36 | #include "vf.h" | |
37 | ||
38 | /* wrapper around a pointer to a socket buffer, | |
39 | * so a DMA handle can be stored along with the buffer */ | |
40 | struct ixgbevf_tx_buffer { | |
41 | struct sk_buff *skb; | |
42 | dma_addr_t dma; | |
43 | unsigned long time_stamp; | |
44 | u16 length; | |
45 | u16 next_to_watch; | |
46 | u16 mapped_as_page; | |
47 | }; | |
48 | ||
49 | struct ixgbevf_rx_buffer { | |
50 | struct sk_buff *skb; | |
51 | dma_addr_t dma; | |
52 | struct page *page; | |
53 | dma_addr_t page_dma; | |
54 | unsigned int page_offset; | |
55 | }; | |
56 | ||
57 | struct ixgbevf_ring { | |
58 | struct ixgbevf_adapter *adapter; /* backlink */ | |
59 | void *desc; /* descriptor ring memory */ | |
60 | dma_addr_t dma; /* phys. address of descriptor ring */ | |
61 | unsigned int size; /* length in bytes */ | |
62 | unsigned int count; /* amount of descriptors */ | |
63 | unsigned int next_to_use; | |
64 | unsigned int next_to_clean; | |
65 | ||
66 | int queue_index; /* needed for multiqueue queue management */ | |
67 | union { | |
68 | struct ixgbevf_tx_buffer *tx_buffer_info; | |
69 | struct ixgbevf_rx_buffer *rx_buffer_info; | |
70 | }; | |
71 | ||
72 | u16 head; | |
73 | u16 tail; | |
74 | ||
75 | unsigned int total_bytes; | |
76 | unsigned int total_packets; | |
77 | ||
78 | u16 reg_idx; /* holds the special value that gets the hardware register | |
79 | * offset associated with this ring, which is different | |
80 | * for DCB and RSS modes */ | |
81 | ||
82 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | |
83 | /* cpu for tx queue */ | |
84 | int cpu; | |
85 | #endif | |
86 | ||
87 | u64 v_idx; /* maps directly to the index for this ring in the hardware | |
88 | * vector array, can also be used for finding the bit in EICR | |
89 | * and friends that represents the vector for this ring */ | |
90 | ||
91 | u16 work_limit; /* max work per interrupt */ | |
92 | u16 rx_buf_len; | |
93 | }; | |
94 | ||
95 | enum ixgbevf_ring_f_enum { | |
96 | RING_F_NONE = 0, | |
97 | RING_F_ARRAY_SIZE /* must be last in enum set */ | |
98 | }; | |
99 | ||
100 | struct ixgbevf_ring_feature { | |
101 | int indices; | |
102 | int mask; | |
103 | }; | |
104 | ||
105 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | |
106 | #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | |
107 | ||
108 | #define MAX_RX_QUEUES 1 | |
109 | #define MAX_TX_QUEUES 1 | |
110 | ||
111 | #define IXGBEVF_DEFAULT_TXD 1024 | |
112 | #define IXGBEVF_DEFAULT_RXD 512 | |
113 | #define IXGBEVF_MAX_TXD 4096 | |
114 | #define IXGBEVF_MIN_TXD 64 | |
115 | #define IXGBEVF_MAX_RXD 4096 | |
116 | #define IXGBEVF_MIN_RXD 64 | |
117 | ||
118 | /* Supported Rx Buffer Sizes */ | |
119 | #define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */ | |
120 | #define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */ | |
121 | #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ | |
122 | #define IXGBEVF_RXBUFFER_2048 2048 | |
123 | #define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ | |
124 | ||
125 | #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 | |
126 | ||
127 | #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) | |
128 | ||
129 | #define IXGBE_TX_FLAGS_CSUM (u32)(1) | |
130 | #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) | |
131 | #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) | |
132 | #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) | |
133 | #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) | |
134 | #define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) | |
135 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 | |
136 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | |
137 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | |
138 | ||
139 | /* MAX_MSIX_Q_VECTORS of these are allocated, | |
140 | * but we only use one per queue-specific vector. | |
141 | */ | |
142 | struct ixgbevf_q_vector { | |
143 | struct ixgbevf_adapter *adapter; | |
144 | struct napi_struct napi; | |
145 | DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ | |
146 | DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ | |
147 | u8 rxr_count; /* Rx ring count assigned to this vector */ | |
148 | u8 txr_count; /* Tx ring count assigned to this vector */ | |
149 | u8 tx_itr; | |
150 | u8 rx_itr; | |
151 | u32 eitr; | |
152 | int v_idx; /* vector index in list */ | |
153 | }; | |
154 | ||
155 | /* Helper macros to switch between ints/sec and what the register uses. | |
156 | * And yes, it's the same math going both ways. The lowest value | |
157 | * supported by all of the ixgbe hardware is 8. | |
158 | */ | |
159 | #define EITR_INTS_PER_SEC_TO_REG(_eitr) \ | |
160 | ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) | |
161 | #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG | |
162 | ||
163 | #define IXGBE_DESC_UNUSED(R) \ | |
164 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | |
165 | (R)->next_to_clean - (R)->next_to_use - 1) | |
166 | ||
167 | #define IXGBE_RX_DESC_ADV(R, i) \ | |
168 | (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) | |
169 | #define IXGBE_TX_DESC_ADV(R, i) \ | |
170 | (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) | |
171 | #define IXGBE_TX_CTXTDESC_ADV(R, i) \ | |
172 | (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) | |
173 | ||
174 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 | |
175 | ||
176 | #define OTHER_VECTOR 1 | |
177 | #define NON_Q_VECTORS (OTHER_VECTOR) | |
178 | ||
179 | #define MAX_MSIX_Q_VECTORS 2 | |
180 | #define MAX_MSIX_COUNT 2 | |
181 | ||
182 | #define MIN_MSIX_Q_VECTORS 2 | |
183 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) | |
184 | ||
185 | /* board specific private data structure */ | |
186 | struct ixgbevf_adapter { | |
187 | struct timer_list watchdog_timer; | |
188 | #ifdef NETIF_F_HW_VLAN_TX | |
189 | struct vlan_group *vlgrp; | |
190 | #endif | |
191 | u16 bd_number; | |
192 | struct work_struct reset_task; | |
193 | struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; | |
194 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; | |
195 | ||
196 | /* Interrupt Throttle Rate */ | |
197 | u32 itr_setting; | |
198 | u16 eitr_low; | |
199 | u16 eitr_high; | |
200 | ||
201 | /* TX */ | |
202 | struct ixgbevf_ring *tx_ring; /* One per active queue */ | |
203 | int num_tx_queues; | |
204 | u64 restart_queue; | |
205 | u64 hw_csum_tx_good; | |
206 | u64 lsc_int; | |
207 | u64 hw_tso_ctxt; | |
208 | u64 hw_tso6_ctxt; | |
209 | u32 tx_timeout_count; | |
210 | bool detect_tx_hung; | |
211 | ||
212 | /* RX */ | |
213 | struct ixgbevf_ring *rx_ring; /* One per active queue */ | |
214 | int num_rx_queues; | |
215 | int num_rx_pools; /* == num_rx_queues in 82598 */ | |
216 | int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ | |
217 | u64 hw_csum_rx_error; | |
218 | u64 hw_rx_no_dma_resources; | |
219 | u64 hw_csum_rx_good; | |
220 | u64 non_eop_descs; | |
221 | int num_msix_vectors; | |
222 | int max_msix_q_vectors; /* true count of q_vectors for device */ | |
223 | struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE]; | |
224 | struct msix_entry *msix_entries; | |
225 | ||
226 | u64 rx_hdr_split; | |
227 | u32 alloc_rx_page_failed; | |
228 | u32 alloc_rx_buff_failed; | |
229 | ||
230 | /* Some features need tri-state capability, | |
231 | * thus the additional *_CAPABLE flags. | |
232 | */ | |
233 | u32 flags; | |
234 | #define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) | |
235 | #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) | |
236 | #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2) | |
237 | #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) | |
238 | #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) | |
239 | #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) | |
240 | #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6) | |
241 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) | |
242 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8) | |
243 | /* OS defined structs */ | |
244 | struct net_device *netdev; | |
245 | struct pci_dev *pdev; | |
92915f71 GR |
246 | |
247 | /* structs defined in ixgbe_vf.h */ | |
248 | struct ixgbe_hw hw; | |
249 | u16 msg_enable; | |
250 | struct ixgbevf_hw_stats stats; | |
251 | u64 zero_base; | |
252 | /* Interrupt Throttle Rate */ | |
253 | u32 eitr_param; | |
254 | ||
255 | unsigned long state; | |
256 | u32 *config_space; | |
257 | u64 tx_busy; | |
258 | unsigned int tx_ring_count; | |
259 | unsigned int rx_ring_count; | |
260 | ||
261 | u32 link_speed; | |
262 | bool link_up; | |
263 | unsigned long link_check_timeout; | |
264 | ||
265 | struct work_struct watchdog_task; | |
266 | bool netdev_registered; | |
267 | bool dev_closed; | |
268 | }; | |
269 | ||
270 | enum ixbgevf_state_t { | |
271 | __IXGBEVF_TESTING, | |
272 | __IXGBEVF_RESETTING, | |
273 | __IXGBEVF_DOWN | |
274 | }; | |
275 | ||
276 | enum ixgbevf_boards { | |
277 | board_82599_vf, | |
278 | }; | |
279 | ||
280 | extern struct ixgbevf_info ixgbevf_vf_info; | |
281 | extern struct ixgbe_mac_operations ixgbevf_mbx_ops; | |
282 | ||
283 | /* needed by ethtool.c */ | |
284 | extern char ixgbevf_driver_name[]; | |
285 | extern const char ixgbevf_driver_version[]; | |
286 | ||
287 | extern int ixgbevf_up(struct ixgbevf_adapter *adapter); | |
288 | extern void ixgbevf_down(struct ixgbevf_adapter *adapter); | |
289 | extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); | |
290 | extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); | |
291 | extern void ixgbevf_set_ethtool_ops(struct net_device *netdev); | |
292 | extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, | |
293 | struct ixgbevf_ring *); | |
294 | extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, | |
295 | struct ixgbevf_ring *); | |
296 | extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, | |
297 | struct ixgbevf_ring *); | |
298 | extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, | |
299 | struct ixgbevf_ring *); | |
300 | extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); | |
301 | ||
302 | #ifdef ETHTOOL_OPS_COMPAT | |
303 | extern int ethtool_ioctl(struct ifreq *ifr); | |
304 | ||
305 | #endif | |
306 | extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); | |
307 | extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); | |
308 | ||
309 | #ifdef DEBUG | |
310 | extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); | |
311 | #define hw_dbg(hw, format, arg...) \ | |
312 | printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) | |
313 | #else | |
314 | #define hw_dbg(hw, format, arg...) do {} while (0) | |
315 | #endif | |
316 | ||
317 | #endif /* _IXGBEVF_H_ */ |