]>
Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
19 | ||
20 | MODULE_VERSION(DRV_VER); | |
21 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
22 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | |
23 | MODULE_AUTHOR("ServerEngines Corporation"); | |
24 | MODULE_LICENSE("GPL"); | |
25 | ||
26 | static unsigned int rx_frag_size = 2048; | |
27 | module_param(rx_frag_size, uint, S_IRUGO); | |
28 | MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); | |
29 | ||
30 | #define BE_VENDOR_ID 0x19a2 | |
31 | #define BE2_DEVICE_ID_1 0x0211 | |
32 | static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { | |
33 | { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) }, | |
34 | { 0 } | |
35 | }; | |
36 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
37 | ||
38 | static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) | |
39 | { | |
40 | struct be_dma_mem *mem = &q->dma_mem; | |
41 | if (mem->va) | |
42 | pci_free_consistent(adapter->pdev, mem->size, | |
43 | mem->va, mem->dma); | |
44 | } | |
45 | ||
46 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |
47 | u16 len, u16 entry_size) | |
48 | { | |
49 | struct be_dma_mem *mem = &q->dma_mem; | |
50 | ||
51 | memset(q, 0, sizeof(*q)); | |
52 | q->len = len; | |
53 | q->entry_size = entry_size; | |
54 | mem->size = len * entry_size; | |
55 | mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); | |
56 | if (!mem->va) | |
57 | return -1; | |
58 | memset(mem->va, 0, mem->size); | |
59 | return 0; | |
60 | } | |
61 | ||
62 | static inline void *queue_head_node(struct be_queue_info *q) | |
63 | { | |
64 | return q->dma_mem.va + q->head * q->entry_size; | |
65 | } | |
66 | ||
67 | static inline void *queue_tail_node(struct be_queue_info *q) | |
68 | { | |
69 | return q->dma_mem.va + q->tail * q->entry_size; | |
70 | } | |
71 | ||
72 | static inline void queue_head_inc(struct be_queue_info *q) | |
73 | { | |
74 | index_inc(&q->head, q->len); | |
75 | } | |
76 | ||
77 | static inline void queue_tail_inc(struct be_queue_info *q) | |
78 | { | |
79 | index_inc(&q->tail, q->len); | |
80 | } | |
81 | ||
82 | static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) | |
83 | { | |
84 | u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; | |
85 | u32 reg = ioread32(addr); | |
86 | u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
87 | if (!enabled && enable) { | |
88 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
89 | } else if (enabled && !enable) { | |
90 | reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
91 | } else { | |
92 | printk(KERN_WARNING DRV_NAME | |
93 | ": bad value in membar_int_ctrl reg=0x%x\n", reg); | |
94 | return; | |
95 | } | |
96 | iowrite32(reg, addr); | |
97 | } | |
98 | ||
99 | static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) | |
100 | { | |
101 | u32 val = 0; | |
102 | val |= qid & DB_RQ_RING_ID_MASK; | |
103 | val |= posted << DB_RQ_NUM_POSTED_SHIFT; | |
104 | iowrite32(val, ctrl->db + DB_RQ_OFFSET); | |
105 | } | |
106 | ||
107 | static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) | |
108 | { | |
109 | u32 val = 0; | |
110 | val |= qid & DB_TXULP_RING_ID_MASK; | |
111 | val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; | |
112 | iowrite32(val, ctrl->db + DB_TXULP1_OFFSET); | |
113 | } | |
114 | ||
115 | static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid, | |
116 | bool arm, bool clear_int, u16 num_popped) | |
117 | { | |
118 | u32 val = 0; | |
119 | val |= qid & DB_EQ_RING_ID_MASK; | |
120 | if (arm) | |
121 | val |= 1 << DB_EQ_REARM_SHIFT; | |
122 | if (clear_int) | |
123 | val |= 1 << DB_EQ_CLR_SHIFT; | |
124 | val |= 1 << DB_EQ_EVNT_SHIFT; | |
125 | val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; | |
126 | iowrite32(val, ctrl->db + DB_EQ_OFFSET); | |
127 | } | |
128 | ||
129 | static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, | |
130 | bool arm, u16 num_popped) | |
131 | { | |
132 | u32 val = 0; | |
133 | val |= qid & DB_CQ_RING_ID_MASK; | |
134 | if (arm) | |
135 | val |= 1 << DB_CQ_REARM_SHIFT; | |
136 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; | |
137 | iowrite32(val, ctrl->db + DB_CQ_OFFSET); | |
138 | } | |
139 | ||
140 | ||
141 | static int be_mac_addr_set(struct net_device *netdev, void *p) | |
142 | { | |
143 | struct be_adapter *adapter = netdev_priv(netdev); | |
144 | struct sockaddr *addr = p; | |
145 | int status = 0; | |
146 | ||
147 | if (netif_running(netdev)) { | |
148 | status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle, | |
149 | adapter->pmac_id); | |
150 | if (status) | |
151 | return status; | |
152 | ||
153 | status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data, | |
154 | adapter->if_handle, &adapter->pmac_id); | |
155 | } | |
156 | ||
157 | if (!status) | |
158 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
159 | ||
160 | return status; | |
161 | } | |
162 | ||
163 | static void netdev_stats_update(struct be_adapter *adapter) | |
164 | { | |
165 | struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); | |
166 | struct be_rxf_stats *rxf_stats = &hw_stats->rxf; | |
167 | struct be_port_rxf_stats *port_stats = | |
168 | &rxf_stats->port[adapter->port_num]; | |
169 | struct net_device_stats *dev_stats = &adapter->stats.net_stats; | |
170 | ||
171 | dev_stats->rx_packets = port_stats->rx_total_frames; | |
172 | dev_stats->tx_packets = port_stats->tx_unicastframes + | |
173 | port_stats->tx_multicastframes + port_stats->tx_broadcastframes; | |
174 | dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | | |
175 | (u64) port_stats->rx_bytes_lsd; | |
176 | dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 | | |
177 | (u64) port_stats->tx_bytes_lsd; | |
178 | ||
179 | /* bad pkts received */ | |
180 | dev_stats->rx_errors = port_stats->rx_crc_errors + | |
181 | port_stats->rx_alignment_symbol_errors + | |
182 | port_stats->rx_in_range_errors + | |
183 | port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; | |
184 | ||
185 | /* packet transmit problems */ | |
186 | dev_stats->tx_errors = 0; | |
187 | ||
188 | /* no space in linux buffers */ | |
189 | dev_stats->rx_dropped = 0; | |
190 | ||
191 | /* no space available in linux */ | |
192 | dev_stats->tx_dropped = 0; | |
193 | ||
194 | dev_stats->multicast = port_stats->tx_multicastframes; | |
195 | dev_stats->collisions = 0; | |
196 | ||
197 | /* detailed rx errors */ | |
198 | dev_stats->rx_length_errors = port_stats->rx_in_range_errors + | |
199 | port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; | |
200 | /* receive ring buffer overflow */ | |
201 | dev_stats->rx_over_errors = 0; | |
202 | dev_stats->rx_crc_errors = port_stats->rx_crc_errors; | |
203 | ||
204 | /* frame alignment errors */ | |
205 | dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; | |
206 | /* receiver fifo overrun */ | |
207 | /* drops_no_pbuf is no per i/f, it's per BE card */ | |
208 | dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + | |
209 | port_stats->rx_input_fifo_overflow + | |
210 | rxf_stats->rx_drops_no_pbuf; | |
211 | /* receiver missed packetd */ | |
212 | dev_stats->rx_missed_errors = 0; | |
213 | /* detailed tx_errors */ | |
214 | dev_stats->tx_aborted_errors = 0; | |
215 | dev_stats->tx_carrier_errors = 0; | |
216 | dev_stats->tx_fifo_errors = 0; | |
217 | dev_stats->tx_heartbeat_errors = 0; | |
218 | dev_stats->tx_window_errors = 0; | |
219 | } | |
220 | ||
221 | static void be_link_status_update(struct be_adapter *adapter) | |
222 | { | |
223 | struct be_link_info *prev = &adapter->link; | |
224 | struct be_link_info now = { 0 }; | |
225 | struct net_device *netdev = adapter->netdev; | |
226 | ||
227 | be_cmd_link_status_query(&adapter->ctrl, &now); | |
228 | ||
229 | /* If link came up or went down */ | |
230 | if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || | |
231 | prev->speed == PHY_LINK_SPEED_ZERO)) { | |
232 | if (now.speed == PHY_LINK_SPEED_ZERO) { | |
233 | netif_stop_queue(netdev); | |
234 | netif_carrier_off(netdev); | |
235 | printk(KERN_INFO "%s: Link down\n", netdev->name); | |
236 | } else { | |
237 | netif_start_queue(netdev); | |
238 | netif_carrier_on(netdev); | |
239 | printk(KERN_INFO "%s: Link up\n", netdev->name); | |
240 | } | |
241 | } | |
242 | *prev = now; | |
243 | } | |
244 | ||
245 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ | |
246 | static void be_rx_eqd_update(struct be_adapter *adapter) | |
247 | { | |
6b7c5b94 SP |
248 | struct be_ctrl_info *ctrl = &adapter->ctrl; |
249 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
250 | struct be_drvr_stats *stats = &adapter->stats.drvr_stats; | |
4097f663 SP |
251 | ulong now = jiffies; |
252 | u32 eqd; | |
253 | ||
254 | if (!rx_eq->enable_aic) | |
255 | return; | |
256 | ||
257 | /* Wrapped around */ | |
258 | if (time_before(now, stats->rx_fps_jiffies)) { | |
259 | stats->rx_fps_jiffies = now; | |
260 | return; | |
261 | } | |
6b7c5b94 SP |
262 | |
263 | /* Update once a second */ | |
4097f663 | 264 | if ((now - stats->rx_fps_jiffies) < HZ) |
6b7c5b94 SP |
265 | return; |
266 | ||
267 | stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / | |
4097f663 | 268 | ((now - stats->rx_fps_jiffies) / HZ); |
6b7c5b94 | 269 | |
4097f663 | 270 | stats->rx_fps_jiffies = now; |
6b7c5b94 SP |
271 | stats->be_prev_rx_frags = stats->be_rx_frags; |
272 | eqd = stats->be_rx_fps / 110000; | |
273 | eqd = eqd << 3; | |
274 | if (eqd > rx_eq->max_eqd) | |
275 | eqd = rx_eq->max_eqd; | |
276 | if (eqd < rx_eq->min_eqd) | |
277 | eqd = rx_eq->min_eqd; | |
278 | if (eqd < 10) | |
279 | eqd = 0; | |
280 | if (eqd != rx_eq->cur_eqd) | |
281 | be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd); | |
282 | ||
283 | rx_eq->cur_eqd = eqd; | |
284 | } | |
285 | ||
6b7c5b94 SP |
286 | static struct net_device_stats *be_get_stats(struct net_device *dev) |
287 | { | |
288 | struct be_adapter *adapter = netdev_priv(dev); | |
289 | ||
290 | return &adapter->stats.net_stats; | |
291 | } | |
292 | ||
4097f663 SP |
293 | static void be_tx_rate_update(struct be_adapter *adapter) |
294 | { | |
295 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
296 | ulong now = jiffies; | |
297 | ||
298 | /* Wrapped around? */ | |
299 | if (time_before(now, stats->be_tx_jiffies)) { | |
300 | stats->be_tx_jiffies = now; | |
301 | return; | |
302 | } | |
303 | ||
304 | /* Update tx rate once in two seconds */ | |
305 | if ((now - stats->be_tx_jiffies) > 2 * HZ) { | |
306 | u32 r; | |
307 | r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) / | |
308 | ((now - stats->be_tx_jiffies) / HZ); | |
309 | r = r / 1000000; /* M bytes/s */ | |
310 | stats->be_tx_rate = r * 8; /* M bits/s */ | |
311 | stats->be_tx_jiffies = now; | |
312 | stats->be_tx_bytes_prev = stats->be_tx_bytes; | |
313 | } | |
314 | } | |
315 | ||
6b7c5b94 SP |
316 | static void be_tx_stats_update(struct be_adapter *adapter, |
317 | u32 wrb_cnt, u32 copied, bool stopped) | |
318 | { | |
4097f663 | 319 | struct be_drvr_stats *stats = drvr_stats(adapter); |
6b7c5b94 SP |
320 | stats->be_tx_reqs++; |
321 | stats->be_tx_wrbs += wrb_cnt; | |
322 | stats->be_tx_bytes += copied; | |
323 | if (stopped) | |
324 | stats->be_tx_stops++; | |
6b7c5b94 SP |
325 | } |
326 | ||
327 | /* Determine number of WRB entries needed to xmit data in an skb */ | |
328 | static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) | |
329 | { | |
330 | int cnt = 0; | |
331 | while (skb) { | |
332 | if (skb->len > skb->data_len) | |
333 | cnt++; | |
334 | cnt += skb_shinfo(skb)->nr_frags; | |
335 | skb = skb_shinfo(skb)->frag_list; | |
336 | } | |
337 | /* to account for hdr wrb */ | |
338 | cnt++; | |
339 | if (cnt & 1) { | |
340 | /* add a dummy to make it an even num */ | |
341 | cnt++; | |
342 | *dummy = true; | |
343 | } else | |
344 | *dummy = false; | |
345 | BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); | |
346 | return cnt; | |
347 | } | |
348 | ||
349 | static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) | |
350 | { | |
351 | wrb->frag_pa_hi = upper_32_bits(addr); | |
352 | wrb->frag_pa_lo = addr & 0xFFFFFFFF; | |
353 | wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; | |
354 | } | |
355 | ||
356 | static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, | |
357 | bool vlan, u32 wrb_cnt, u32 len) | |
358 | { | |
359 | memset(hdr, 0, sizeof(*hdr)); | |
360 | ||
361 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); | |
362 | ||
363 | if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { | |
364 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); | |
365 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, | |
366 | hdr, skb_shinfo(skb)->gso_size); | |
367 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
368 | if (is_tcp_pkt(skb)) | |
369 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); | |
370 | else if (is_udp_pkt(skb)) | |
371 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); | |
372 | } | |
373 | ||
374 | if (vlan && vlan_tx_tag_present(skb)) { | |
375 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); | |
376 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, | |
377 | hdr, vlan_tx_tag_get(skb)); | |
378 | } | |
379 | ||
380 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); | |
381 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1); | |
382 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); | |
383 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | |
384 | } | |
385 | ||
386 | ||
387 | static int make_tx_wrbs(struct be_adapter *adapter, | |
388 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) | |
389 | { | |
390 | u64 busaddr; | |
391 | u32 i, copied = 0; | |
392 | struct pci_dev *pdev = adapter->pdev; | |
393 | struct sk_buff *first_skb = skb; | |
394 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
395 | struct be_eth_wrb *wrb; | |
396 | struct be_eth_hdr_wrb *hdr; | |
397 | ||
398 | atomic_add(wrb_cnt, &txq->used); | |
399 | hdr = queue_head_node(txq); | |
400 | queue_head_inc(txq); | |
401 | ||
402 | while (skb) { | |
403 | if (skb->len > skb->data_len) { | |
404 | int len = skb->len - skb->data_len; | |
405 | busaddr = pci_map_single(pdev, skb->data, len, | |
406 | PCI_DMA_TODEVICE); | |
407 | wrb = queue_head_node(txq); | |
408 | wrb_fill(wrb, busaddr, len); | |
409 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
410 | queue_head_inc(txq); | |
411 | copied += len; | |
412 | } | |
413 | ||
414 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
415 | struct skb_frag_struct *frag = | |
416 | &skb_shinfo(skb)->frags[i]; | |
417 | busaddr = pci_map_page(pdev, frag->page, | |
418 | frag->page_offset, | |
419 | frag->size, PCI_DMA_TODEVICE); | |
420 | wrb = queue_head_node(txq); | |
421 | wrb_fill(wrb, busaddr, frag->size); | |
422 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
423 | queue_head_inc(txq); | |
424 | copied += frag->size; | |
425 | } | |
426 | skb = skb_shinfo(skb)->frag_list; | |
427 | } | |
428 | ||
429 | if (dummy_wrb) { | |
430 | wrb = queue_head_node(txq); | |
431 | wrb_fill(wrb, 0, 0); | |
432 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
433 | queue_head_inc(txq); | |
434 | } | |
435 | ||
436 | wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, | |
437 | wrb_cnt, copied); | |
438 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | |
439 | ||
440 | return copied; | |
441 | } | |
442 | ||
443 | static int be_xmit(struct sk_buff *skb, struct net_device *netdev) | |
444 | { | |
445 | struct be_adapter *adapter = netdev_priv(netdev); | |
446 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | |
447 | struct be_queue_info *txq = &tx_obj->q; | |
448 | u32 wrb_cnt = 0, copied = 0; | |
449 | u32 start = txq->head; | |
450 | bool dummy_wrb, stopped = false; | |
451 | ||
452 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); | |
453 | ||
454 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); | |
455 | ||
456 | /* record the sent skb in the sent_skb table */ | |
457 | BUG_ON(tx_obj->sent_skb_list[start]); | |
458 | tx_obj->sent_skb_list[start] = skb; | |
459 | ||
460 | /* Ensure that txq has space for the next skb; Else stop the queue | |
461 | * *BEFORE* ringing the tx doorbell, so that we serialze the | |
462 | * tx compls of the current transmit which'll wake up the queue | |
463 | */ | |
464 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { | |
465 | netif_stop_queue(netdev); | |
466 | stopped = true; | |
467 | } | |
468 | ||
469 | be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt); | |
470 | ||
471 | netdev->trans_start = jiffies; | |
472 | ||
473 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); | |
474 | return NETDEV_TX_OK; | |
475 | } | |
476 | ||
477 | static int be_change_mtu(struct net_device *netdev, int new_mtu) | |
478 | { | |
479 | struct be_adapter *adapter = netdev_priv(netdev); | |
480 | if (new_mtu < BE_MIN_MTU || | |
481 | new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { | |
482 | dev_info(&adapter->pdev->dev, | |
483 | "MTU must be between %d and %d bytes\n", | |
484 | BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); | |
485 | return -EINVAL; | |
486 | } | |
487 | dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", | |
488 | netdev->mtu, new_mtu); | |
489 | netdev->mtu = new_mtu; | |
490 | return 0; | |
491 | } | |
492 | ||
493 | /* | |
494 | * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, | |
495 | * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, | |
496 | * set the BE in promiscuous VLAN mode. | |
497 | */ | |
1ab1ab75 | 498 | static void be_vid_config(struct net_device *netdev) |
6b7c5b94 SP |
499 | { |
500 | struct be_adapter *adapter = netdev_priv(netdev); | |
501 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; | |
502 | u16 ntags = 0, i; | |
503 | ||
504 | if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { | |
505 | /* Construct VLAN Table to give to HW */ | |
506 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | |
507 | if (adapter->vlan_tag[i]) { | |
508 | vtag[ntags] = cpu_to_le16(i); | |
509 | ntags++; | |
510 | } | |
511 | } | |
512 | be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, | |
513 | vtag, ntags, 1, 0); | |
514 | } else { | |
515 | be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, | |
516 | NULL, 0, 1, 1); | |
517 | } | |
518 | } | |
519 | ||
520 | static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) | |
521 | { | |
522 | struct be_adapter *adapter = netdev_priv(netdev); | |
523 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
524 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
525 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
526 | ||
527 | be_eq_notify(ctrl, rx_eq->q.id, false, false, 0); | |
528 | be_eq_notify(ctrl, tx_eq->q.id, false, false, 0); | |
529 | adapter->vlan_grp = grp; | |
530 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | |
531 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | |
532 | } | |
533 | ||
534 | static void be_vlan_add_vid(struct net_device *netdev, u16 vid) | |
535 | { | |
536 | struct be_adapter *adapter = netdev_priv(netdev); | |
537 | ||
538 | adapter->num_vlans++; | |
539 | adapter->vlan_tag[vid] = 1; | |
540 | ||
1ab1ab75 | 541 | be_vid_config(netdev); |
6b7c5b94 SP |
542 | } |
543 | ||
544 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | |
545 | { | |
546 | struct be_adapter *adapter = netdev_priv(netdev); | |
547 | ||
548 | adapter->num_vlans--; | |
549 | adapter->vlan_tag[vid] = 0; | |
550 | ||
551 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | |
1ab1ab75 | 552 | be_vid_config(netdev); |
6b7c5b94 SP |
553 | } |
554 | ||
555 | static void be_set_multicast_filter(struct net_device *netdev) | |
556 | { | |
557 | struct be_adapter *adapter = netdev_priv(netdev); | |
558 | struct dev_mc_list *mc_ptr; | |
559 | u8 mac_addr[32][ETH_ALEN]; | |
560 | int i = 0; | |
561 | ||
562 | if (netdev->flags & IFF_ALLMULTI) { | |
563 | /* set BE in Multicast promiscuous */ | |
564 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
565 | adapter->if_handle, NULL, 0, true); | |
566 | return; | |
567 | } | |
568 | ||
569 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | |
570 | memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); | |
571 | if (++i >= 32) { | |
572 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
573 | adapter->if_handle, &mac_addr[0][0], i, false); | |
574 | i = 0; | |
575 | } | |
576 | ||
577 | } | |
578 | ||
579 | if (i) { | |
580 | /* reset the promiscuous mode also. */ | |
581 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
582 | adapter->if_handle, &mac_addr[0][0], i, false); | |
583 | } | |
584 | } | |
585 | ||
586 | static void be_set_multicast_list(struct net_device *netdev) | |
587 | { | |
588 | struct be_adapter *adapter = netdev_priv(netdev); | |
589 | ||
590 | if (netdev->flags & IFF_PROMISC) { | |
591 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); | |
592 | } else { | |
593 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); | |
594 | be_set_multicast_filter(netdev); | |
595 | } | |
596 | } | |
597 | ||
4097f663 | 598 | static void be_rx_rate_update(struct be_adapter *adapter) |
6b7c5b94 | 599 | { |
4097f663 SP |
600 | struct be_drvr_stats *stats = drvr_stats(adapter); |
601 | ulong now = jiffies; | |
6b7c5b94 SP |
602 | u32 rate; |
603 | ||
4097f663 SP |
604 | /* Wrapped around */ |
605 | if (time_before(now, stats->be_rx_jiffies)) { | |
606 | stats->be_rx_jiffies = now; | |
607 | return; | |
608 | } | |
6b7c5b94 SP |
609 | |
610 | /* Update the rate once in two seconds */ | |
4097f663 | 611 | if ((now - stats->be_rx_jiffies) < 2 * HZ) |
6b7c5b94 SP |
612 | return; |
613 | ||
614 | rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) / | |
4097f663 SP |
615 | ((now - stats->be_rx_jiffies) / HZ); |
616 | rate = rate / 1000000; /* MB/Sec */ | |
617 | stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */ | |
618 | stats->be_rx_jiffies = now; | |
6b7c5b94 SP |
619 | stats->be_rx_bytes_prev = stats->be_rx_bytes; |
620 | } | |
621 | ||
4097f663 SP |
622 | static void be_rx_stats_update(struct be_adapter *adapter, |
623 | u32 pktsize, u16 numfrags) | |
624 | { | |
625 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
626 | ||
627 | stats->be_rx_compl++; | |
628 | stats->be_rx_frags += numfrags; | |
629 | stats->be_rx_bytes += pktsize; | |
630 | } | |
631 | ||
6b7c5b94 SP |
632 | static struct be_rx_page_info * |
633 | get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) | |
634 | { | |
635 | struct be_rx_page_info *rx_page_info; | |
636 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
637 | ||
638 | rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; | |
639 | BUG_ON(!rx_page_info->page); | |
640 | ||
641 | if (rx_page_info->last_page_user) | |
642 | pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), | |
643 | adapter->big_page_size, PCI_DMA_FROMDEVICE); | |
644 | ||
645 | atomic_dec(&rxq->used); | |
646 | return rx_page_info; | |
647 | } | |
648 | ||
649 | /* Throwaway the data in the Rx completion */ | |
650 | static void be_rx_compl_discard(struct be_adapter *adapter, | |
651 | struct be_eth_rx_compl *rxcp) | |
652 | { | |
653 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
654 | struct be_rx_page_info *page_info; | |
655 | u16 rxq_idx, i, num_rcvd; | |
656 | ||
657 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
658 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
659 | ||
660 | for (i = 0; i < num_rcvd; i++) { | |
661 | page_info = get_rx_page_info(adapter, rxq_idx); | |
662 | put_page(page_info->page); | |
663 | memset(page_info, 0, sizeof(*page_info)); | |
664 | index_inc(&rxq_idx, rxq->len); | |
665 | } | |
666 | } | |
667 | ||
668 | /* | |
669 | * skb_fill_rx_data forms a complete skb for an ether frame | |
670 | * indicated by rxcp. | |
671 | */ | |
672 | static void skb_fill_rx_data(struct be_adapter *adapter, | |
673 | struct sk_buff *skb, struct be_eth_rx_compl *rxcp) | |
674 | { | |
675 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
676 | struct be_rx_page_info *page_info; | |
677 | u16 rxq_idx, i, num_rcvd; | |
678 | u32 pktsize, hdr_len, curr_frag_len; | |
679 | u8 *start; | |
680 | ||
681 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
682 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
683 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
684 | ||
685 | page_info = get_rx_page_info(adapter, rxq_idx); | |
686 | ||
687 | start = page_address(page_info->page) + page_info->page_offset; | |
688 | prefetch(start); | |
689 | ||
690 | /* Copy data in the first descriptor of this completion */ | |
691 | curr_frag_len = min(pktsize, rx_frag_size); | |
692 | ||
693 | /* Copy the header portion into skb_data */ | |
694 | hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); | |
695 | memcpy(skb->data, start, hdr_len); | |
696 | skb->len = curr_frag_len; | |
697 | if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ | |
698 | /* Complete packet has now been moved to data */ | |
699 | put_page(page_info->page); | |
700 | skb->data_len = 0; | |
701 | skb->tail += curr_frag_len; | |
702 | } else { | |
703 | skb_shinfo(skb)->nr_frags = 1; | |
704 | skb_shinfo(skb)->frags[0].page = page_info->page; | |
705 | skb_shinfo(skb)->frags[0].page_offset = | |
706 | page_info->page_offset + hdr_len; | |
707 | skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; | |
708 | skb->data_len = curr_frag_len - hdr_len; | |
709 | skb->tail += hdr_len; | |
710 | } | |
711 | memset(page_info, 0, sizeof(*page_info)); | |
712 | ||
713 | if (pktsize <= rx_frag_size) { | |
714 | BUG_ON(num_rcvd != 1); | |
715 | return; | |
716 | } | |
717 | ||
718 | /* More frags present for this completion */ | |
719 | pktsize -= curr_frag_len; /* account for above copied frag */ | |
720 | for (i = 1; i < num_rcvd; i++) { | |
721 | index_inc(&rxq_idx, rxq->len); | |
722 | page_info = get_rx_page_info(adapter, rxq_idx); | |
723 | ||
724 | curr_frag_len = min(pktsize, rx_frag_size); | |
725 | ||
726 | skb_shinfo(skb)->frags[i].page = page_info->page; | |
727 | skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; | |
728 | skb_shinfo(skb)->frags[i].size = curr_frag_len; | |
729 | skb->len += curr_frag_len; | |
730 | skb->data_len += curr_frag_len; | |
731 | skb_shinfo(skb)->nr_frags++; | |
732 | pktsize -= curr_frag_len; | |
733 | ||
734 | memset(page_info, 0, sizeof(*page_info)); | |
735 | } | |
736 | ||
4097f663 | 737 | be_rx_stats_update(adapter, pktsize, num_rcvd); |
6b7c5b94 SP |
738 | return; |
739 | } | |
740 | ||
741 | /* Process the RX completion indicated by rxcp when LRO is disabled */ | |
742 | static void be_rx_compl_process(struct be_adapter *adapter, | |
743 | struct be_eth_rx_compl *rxcp) | |
744 | { | |
745 | struct sk_buff *skb; | |
746 | u32 vtp, vid; | |
747 | int l4_cksm; | |
748 | ||
749 | l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); | |
750 | vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | |
751 | ||
752 | skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); | |
753 | if (!skb) { | |
754 | if (net_ratelimit()) | |
755 | dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); | |
756 | be_rx_compl_discard(adapter, rxcp); | |
757 | return; | |
758 | } | |
759 | ||
760 | skb_reserve(skb, NET_IP_ALIGN); | |
761 | ||
762 | skb_fill_rx_data(adapter, skb, rxcp); | |
763 | ||
764 | if (l4_cksm && adapter->rx_csum) | |
765 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
766 | else | |
767 | skb->ip_summed = CHECKSUM_NONE; | |
768 | ||
769 | skb->truesize = skb->len + sizeof(struct sk_buff); | |
770 | skb->protocol = eth_type_trans(skb, adapter->netdev); | |
771 | skb->dev = adapter->netdev; | |
772 | ||
773 | if (vtp) { | |
774 | if (!adapter->vlan_grp || adapter->num_vlans == 0) { | |
775 | kfree_skb(skb); | |
776 | return; | |
777 | } | |
778 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
779 | vid = be16_to_cpu(vid); | |
780 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); | |
781 | } else { | |
782 | netif_receive_skb(skb); | |
783 | } | |
784 | ||
785 | adapter->netdev->last_rx = jiffies; | |
786 | ||
787 | return; | |
788 | } | |
789 | ||
790 | /* Process the RX completion indicated by rxcp when LRO is enabled */ | |
791 | static void be_rx_compl_process_lro(struct be_adapter *adapter, | |
792 | struct be_eth_rx_compl *rxcp) | |
793 | { | |
794 | struct be_rx_page_info *page_info; | |
795 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | |
796 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
797 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | |
798 | u16 i, rxq_idx = 0, vid; | |
799 | ||
800 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
801 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
802 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | |
803 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
804 | ||
805 | remaining = pkt_size; | |
806 | for (i = 0; i < num_rcvd; i++) { | |
807 | page_info = get_rx_page_info(adapter, rxq_idx); | |
808 | ||
809 | curr_frag_len = min(remaining, rx_frag_size); | |
810 | ||
811 | rx_frags[i].page = page_info->page; | |
812 | rx_frags[i].page_offset = page_info->page_offset; | |
813 | rx_frags[i].size = curr_frag_len; | |
814 | remaining -= curr_frag_len; | |
815 | ||
816 | index_inc(&rxq_idx, rxq->len); | |
817 | ||
818 | memset(page_info, 0, sizeof(*page_info)); | |
819 | } | |
820 | ||
821 | if (likely(!vlanf)) { | |
822 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, | |
823 | pkt_size, NULL, 0); | |
824 | } else { | |
825 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
826 | vid = be16_to_cpu(vid); | |
827 | ||
828 | if (!adapter->vlan_grp || adapter->num_vlans == 0) | |
829 | return; | |
830 | ||
831 | lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr, | |
832 | rx_frags, pkt_size, pkt_size, adapter->vlan_grp, | |
833 | vid, NULL, 0); | |
834 | } | |
835 | ||
4097f663 | 836 | be_rx_stats_update(adapter, pkt_size, num_rcvd); |
6b7c5b94 SP |
837 | return; |
838 | } | |
839 | ||
840 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | |
841 | { | |
842 | struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); | |
843 | ||
844 | if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) | |
845 | return NULL; | |
846 | ||
847 | be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); | |
848 | ||
849 | rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; | |
850 | ||
851 | queue_tail_inc(&adapter->rx_obj.cq); | |
852 | return rxcp; | |
853 | } | |
854 | ||
855 | static inline struct page *be_alloc_pages(u32 size) | |
856 | { | |
857 | gfp_t alloc_flags = GFP_ATOMIC; | |
858 | u32 order = get_order(size); | |
859 | if (order > 0) | |
860 | alloc_flags |= __GFP_COMP; | |
861 | return alloc_pages(alloc_flags, order); | |
862 | } | |
863 | ||
864 | /* | |
865 | * Allocate a page, split it to fragments of size rx_frag_size and post as | |
866 | * receive buffers to BE | |
867 | */ | |
868 | static void be_post_rx_frags(struct be_adapter *adapter) | |
869 | { | |
870 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | |
871 | struct be_rx_page_info *page_info = NULL; | |
872 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
873 | struct page *pagep = NULL; | |
874 | struct be_eth_rx_d *rxd; | |
875 | u64 page_dmaaddr = 0, frag_dmaaddr; | |
876 | u32 posted, page_offset = 0; | |
877 | ||
6b7c5b94 SP |
878 | page_info = &page_info_tbl[rxq->head]; |
879 | for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { | |
880 | if (!pagep) { | |
881 | pagep = be_alloc_pages(adapter->big_page_size); | |
882 | if (unlikely(!pagep)) { | |
883 | drvr_stats(adapter)->be_ethrx_post_fail++; | |
884 | break; | |
885 | } | |
886 | page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, | |
887 | adapter->big_page_size, | |
888 | PCI_DMA_FROMDEVICE); | |
889 | page_info->page_offset = 0; | |
890 | } else { | |
891 | get_page(pagep); | |
892 | page_info->page_offset = page_offset + rx_frag_size; | |
893 | } | |
894 | page_offset = page_info->page_offset; | |
895 | page_info->page = pagep; | |
896 | pci_unmap_addr_set(page_info, bus, page_dmaaddr); | |
897 | frag_dmaaddr = page_dmaaddr + page_info->page_offset; | |
898 | ||
899 | rxd = queue_head_node(rxq); | |
900 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | |
901 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | |
902 | queue_head_inc(rxq); | |
903 | ||
904 | /* Any space left in the current big page for another frag? */ | |
905 | if ((page_offset + rx_frag_size + rx_frag_size) > | |
906 | adapter->big_page_size) { | |
907 | pagep = NULL; | |
908 | page_info->last_page_user = true; | |
909 | } | |
910 | page_info = &page_info_tbl[rxq->head]; | |
911 | } | |
912 | if (pagep) | |
913 | page_info->last_page_user = true; | |
914 | ||
915 | if (posted) { | |
6b7c5b94 | 916 | atomic_add(posted, &rxq->used); |
ea1dae11 SP |
917 | be_rxq_notify(&adapter->ctrl, rxq->id, posted); |
918 | } else if (atomic_read(&rxq->used) == 0) { | |
919 | /* Let be_worker replenish when memory is available */ | |
920 | adapter->rx_post_starved = true; | |
6b7c5b94 SP |
921 | } |
922 | ||
923 | return; | |
924 | } | |
925 | ||
926 | static struct be_eth_tx_compl * | |
927 | be_tx_compl_get(struct be_adapter *adapter) | |
928 | { | |
929 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | |
930 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); | |
931 | ||
932 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | |
933 | return NULL; | |
934 | ||
935 | be_dws_le_to_cpu(txcp, sizeof(*txcp)); | |
936 | ||
937 | txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | |
938 | ||
939 | queue_tail_inc(tx_cq); | |
940 | return txcp; | |
941 | } | |
942 | ||
943 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | |
944 | { | |
945 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
946 | struct be_eth_wrb *wrb; | |
947 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | |
948 | struct sk_buff *sent_skb; | |
949 | u64 busaddr; | |
950 | u16 cur_index, num_wrbs = 0; | |
951 | ||
952 | cur_index = txq->tail; | |
953 | sent_skb = sent_skbs[cur_index]; | |
954 | BUG_ON(!sent_skb); | |
955 | sent_skbs[cur_index] = NULL; | |
956 | ||
957 | do { | |
958 | cur_index = txq->tail; | |
959 | wrb = queue_tail_node(txq); | |
960 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | |
961 | busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; | |
962 | if (busaddr != 0) { | |
963 | pci_unmap_single(adapter->pdev, busaddr, | |
964 | wrb->frag_len, PCI_DMA_TODEVICE); | |
965 | } | |
966 | num_wrbs++; | |
967 | queue_tail_inc(txq); | |
968 | } while (cur_index != last_index); | |
969 | ||
970 | atomic_sub(num_wrbs, &txq->used); | |
971 | ||
972 | kfree_skb(sent_skb); | |
973 | } | |
974 | ||
975 | static void be_rx_q_clean(struct be_adapter *adapter) | |
976 | { | |
977 | struct be_rx_page_info *page_info; | |
978 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
979 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
980 | struct be_eth_rx_compl *rxcp; | |
981 | u16 tail; | |
982 | ||
983 | /* First cleanup pending rx completions */ | |
984 | while ((rxcp = be_rx_compl_get(adapter)) != NULL) { | |
985 | be_rx_compl_discard(adapter, rxcp); | |
986 | be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1); | |
987 | } | |
988 | ||
989 | /* Then free posted rx buffer that were not used */ | |
990 | tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; | |
991 | for (; tail != rxq->head; index_inc(&tail, rxq->len)) { | |
992 | page_info = get_rx_page_info(adapter, tail); | |
993 | put_page(page_info->page); | |
994 | memset(page_info, 0, sizeof(*page_info)); | |
995 | } | |
996 | BUG_ON(atomic_read(&rxq->used)); | |
997 | } | |
998 | ||
999 | static void be_tx_q_clean(struct be_adapter *adapter) | |
1000 | { | |
1001 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | |
1002 | struct sk_buff *sent_skb; | |
1003 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
1004 | u16 last_index; | |
1005 | bool dummy_wrb; | |
1006 | ||
1007 | while (atomic_read(&txq->used)) { | |
1008 | sent_skb = sent_skbs[txq->tail]; | |
1009 | last_index = txq->tail; | |
1010 | index_adv(&last_index, | |
1011 | wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); | |
1012 | be_tx_compl_process(adapter, last_index); | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | static void be_tx_queues_destroy(struct be_adapter *adapter) | |
1017 | { | |
1018 | struct be_queue_info *q; | |
1019 | ||
1020 | q = &adapter->tx_obj.q; | |
1021 | if (q->created) | |
1022 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ); | |
1023 | be_queue_free(adapter, q); | |
1024 | ||
1025 | q = &adapter->tx_obj.cq; | |
1026 | if (q->created) | |
1027 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); | |
1028 | be_queue_free(adapter, q); | |
1029 | ||
1030 | /* No more tx completions can be rcvd now; clean up if there are | |
1031 | * any pending completions or pending tx requests */ | |
1032 | be_tx_q_clean(adapter); | |
1033 | ||
1034 | q = &adapter->tx_eq.q; | |
1035 | if (q->created) | |
1036 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); | |
1037 | be_queue_free(adapter, q); | |
1038 | } | |
1039 | ||
1040 | static int be_tx_queues_create(struct be_adapter *adapter) | |
1041 | { | |
1042 | struct be_queue_info *eq, *q, *cq; | |
1043 | ||
1044 | adapter->tx_eq.max_eqd = 0; | |
1045 | adapter->tx_eq.min_eqd = 0; | |
1046 | adapter->tx_eq.cur_eqd = 96; | |
1047 | adapter->tx_eq.enable_aic = false; | |
1048 | /* Alloc Tx Event queue */ | |
1049 | eq = &adapter->tx_eq.q; | |
1050 | if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) | |
1051 | return -1; | |
1052 | ||
1053 | /* Ask BE to create Tx Event queue */ | |
1054 | if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd)) | |
1055 | goto tx_eq_free; | |
1056 | /* Alloc TX eth compl queue */ | |
1057 | cq = &adapter->tx_obj.cq; | |
1058 | if (be_queue_alloc(adapter, cq, TX_CQ_LEN, | |
1059 | sizeof(struct be_eth_tx_compl))) | |
1060 | goto tx_eq_destroy; | |
1061 | ||
1062 | /* Ask BE to create Tx eth compl queue */ | |
1063 | if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3)) | |
1064 | goto tx_cq_free; | |
1065 | ||
1066 | /* Alloc TX eth queue */ | |
1067 | q = &adapter->tx_obj.q; | |
1068 | if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) | |
1069 | goto tx_cq_destroy; | |
1070 | ||
1071 | /* Ask BE to create Tx eth queue */ | |
1072 | if (be_cmd_txq_create(&adapter->ctrl, q, cq)) | |
1073 | goto tx_q_free; | |
1074 | return 0; | |
1075 | ||
1076 | tx_q_free: | |
1077 | be_queue_free(adapter, q); | |
1078 | tx_cq_destroy: | |
1079 | be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); | |
1080 | tx_cq_free: | |
1081 | be_queue_free(adapter, cq); | |
1082 | tx_eq_destroy: | |
1083 | be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); | |
1084 | tx_eq_free: | |
1085 | be_queue_free(adapter, eq); | |
1086 | return -1; | |
1087 | } | |
1088 | ||
1089 | static void be_rx_queues_destroy(struct be_adapter *adapter) | |
1090 | { | |
1091 | struct be_queue_info *q; | |
1092 | ||
1093 | q = &adapter->rx_obj.q; | |
1094 | if (q->created) { | |
1095 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ); | |
1096 | be_rx_q_clean(adapter); | |
1097 | } | |
1098 | be_queue_free(adapter, q); | |
1099 | ||
1100 | q = &adapter->rx_obj.cq; | |
1101 | if (q->created) | |
1102 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); | |
1103 | be_queue_free(adapter, q); | |
1104 | ||
1105 | q = &adapter->rx_eq.q; | |
1106 | if (q->created) | |
1107 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); | |
1108 | be_queue_free(adapter, q); | |
1109 | } | |
1110 | ||
1111 | static int be_rx_queues_create(struct be_adapter *adapter) | |
1112 | { | |
1113 | struct be_queue_info *eq, *q, *cq; | |
1114 | int rc; | |
1115 | ||
1116 | adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; | |
1117 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; | |
1118 | adapter->rx_eq.max_eqd = BE_MAX_EQD; | |
1119 | adapter->rx_eq.min_eqd = 0; | |
1120 | adapter->rx_eq.cur_eqd = 0; | |
1121 | adapter->rx_eq.enable_aic = true; | |
1122 | ||
1123 | /* Alloc Rx Event queue */ | |
1124 | eq = &adapter->rx_eq.q; | |
1125 | rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, | |
1126 | sizeof(struct be_eq_entry)); | |
1127 | if (rc) | |
1128 | return rc; | |
1129 | ||
1130 | /* Ask BE to create Rx Event queue */ | |
1131 | rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd); | |
1132 | if (rc) | |
1133 | goto rx_eq_free; | |
1134 | ||
1135 | /* Alloc RX eth compl queue */ | |
1136 | cq = &adapter->rx_obj.cq; | |
1137 | rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, | |
1138 | sizeof(struct be_eth_rx_compl)); | |
1139 | if (rc) | |
1140 | goto rx_eq_destroy; | |
1141 | ||
1142 | /* Ask BE to create Rx eth compl queue */ | |
1143 | rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3); | |
1144 | if (rc) | |
1145 | goto rx_cq_free; | |
1146 | ||
1147 | /* Alloc RX eth queue */ | |
1148 | q = &adapter->rx_obj.q; | |
1149 | rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); | |
1150 | if (rc) | |
1151 | goto rx_cq_destroy; | |
1152 | ||
1153 | /* Ask BE to create Rx eth queue */ | |
1154 | rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size, | |
1155 | BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); | |
1156 | if (rc) | |
1157 | goto rx_q_free; | |
1158 | ||
1159 | return 0; | |
1160 | rx_q_free: | |
1161 | be_queue_free(adapter, q); | |
1162 | rx_cq_destroy: | |
1163 | be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); | |
1164 | rx_cq_free: | |
1165 | be_queue_free(adapter, cq); | |
1166 | rx_eq_destroy: | |
1167 | be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); | |
1168 | rx_eq_free: | |
1169 | be_queue_free(adapter, eq); | |
1170 | return rc; | |
1171 | } | |
1172 | static bool event_get(struct be_eq_obj *eq_obj, u16 *rid) | |
1173 | { | |
1174 | struct be_eq_entry *entry = queue_tail_node(&eq_obj->q); | |
1175 | u32 evt = entry->evt; | |
1176 | ||
1177 | if (!evt) | |
1178 | return false; | |
1179 | ||
1180 | evt = le32_to_cpu(evt); | |
1181 | *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK; | |
1182 | entry->evt = 0; | |
1183 | queue_tail_inc(&eq_obj->q); | |
1184 | return true; | |
1185 | } | |
1186 | ||
1187 | static int event_handle(struct be_ctrl_info *ctrl, | |
1188 | struct be_eq_obj *eq_obj) | |
1189 | { | |
1190 | u16 rid = 0, num = 0; | |
1191 | ||
1192 | while (event_get(eq_obj, &rid)) | |
1193 | num++; | |
1194 | ||
1195 | /* We can see an interrupt and no event */ | |
1196 | be_eq_notify(ctrl, eq_obj->q.id, true, true, num); | |
1197 | if (num) | |
1198 | napi_schedule(&eq_obj->napi); | |
1199 | ||
1200 | return num; | |
1201 | } | |
1202 | ||
1203 | static irqreturn_t be_intx(int irq, void *dev) | |
1204 | { | |
1205 | struct be_adapter *adapter = dev; | |
1206 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1207 | int rx, tx; | |
1208 | ||
1209 | tx = event_handle(ctrl, &adapter->tx_eq); | |
1210 | rx = event_handle(ctrl, &adapter->rx_eq); | |
1211 | ||
1212 | if (rx || tx) | |
1213 | return IRQ_HANDLED; | |
1214 | else | |
1215 | return IRQ_NONE; | |
1216 | } | |
1217 | ||
1218 | static irqreturn_t be_msix_rx(int irq, void *dev) | |
1219 | { | |
1220 | struct be_adapter *adapter = dev; | |
1221 | ||
1222 | event_handle(&adapter->ctrl, &adapter->rx_eq); | |
1223 | ||
1224 | return IRQ_HANDLED; | |
1225 | } | |
1226 | ||
1227 | static irqreturn_t be_msix_tx(int irq, void *dev) | |
1228 | { | |
1229 | struct be_adapter *adapter = dev; | |
1230 | ||
1231 | event_handle(&adapter->ctrl, &adapter->tx_eq); | |
1232 | ||
1233 | return IRQ_HANDLED; | |
1234 | } | |
1235 | ||
1236 | static inline bool do_lro(struct be_adapter *adapter, | |
1237 | struct be_eth_rx_compl *rxcp) | |
1238 | { | |
1239 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); | |
1240 | int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | |
1241 | ||
1242 | if (err) | |
1243 | drvr_stats(adapter)->be_rxcp_err++; | |
1244 | ||
1245 | return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ? | |
1246 | false : true; | |
1247 | } | |
1248 | ||
1249 | int be_poll_rx(struct napi_struct *napi, int budget) | |
1250 | { | |
1251 | struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); | |
1252 | struct be_adapter *adapter = | |
1253 | container_of(rx_eq, struct be_adapter, rx_eq); | |
1254 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
1255 | struct be_eth_rx_compl *rxcp; | |
1256 | u32 work_done; | |
1257 | ||
1258 | for (work_done = 0; work_done < budget; work_done++) { | |
1259 | rxcp = be_rx_compl_get(adapter); | |
1260 | if (!rxcp) | |
1261 | break; | |
1262 | ||
1263 | if (do_lro(adapter, rxcp)) | |
1264 | be_rx_compl_process_lro(adapter, rxcp); | |
1265 | else | |
1266 | be_rx_compl_process(adapter, rxcp); | |
1267 | } | |
1268 | ||
1269 | lro_flush_all(&adapter->rx_obj.lro_mgr); | |
1270 | ||
1271 | /* Refill the queue */ | |
1272 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) | |
1273 | be_post_rx_frags(adapter); | |
1274 | ||
1275 | /* All consumed */ | |
1276 | if (work_done < budget) { | |
1277 | napi_complete(napi); | |
1278 | be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done); | |
1279 | } else { | |
1280 | /* More to be consumed; continue with interrupts disabled */ | |
1281 | be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done); | |
1282 | } | |
1283 | return work_done; | |
1284 | } | |
1285 | ||
1286 | /* For TX we don't honour budget; consume everything */ | |
1287 | int be_poll_tx(struct napi_struct *napi, int budget) | |
1288 | { | |
1289 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | |
1290 | struct be_adapter *adapter = | |
1291 | container_of(tx_eq, struct be_adapter, tx_eq); | |
1292 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | |
1293 | struct be_queue_info *tx_cq = &tx_obj->cq; | |
1294 | struct be_queue_info *txq = &tx_obj->q; | |
1295 | struct be_eth_tx_compl *txcp; | |
1296 | u32 num_cmpl = 0; | |
1297 | u16 end_idx; | |
1298 | ||
1299 | while ((txcp = be_tx_compl_get(adapter))) { | |
1300 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | |
1301 | wrb_index, txcp); | |
1302 | be_tx_compl_process(adapter, end_idx); | |
1303 | num_cmpl++; | |
1304 | } | |
1305 | ||
1306 | /* As Tx wrbs have been freed up, wake up netdev queue if | |
1307 | * it was stopped due to lack of tx wrbs. | |
1308 | */ | |
1309 | if (netif_queue_stopped(adapter->netdev) && | |
1310 | atomic_read(&txq->used) < txq->len / 2) { | |
1311 | netif_wake_queue(adapter->netdev); | |
1312 | } | |
1313 | ||
1314 | napi_complete(napi); | |
1315 | ||
1316 | be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); | |
1317 | ||
1318 | drvr_stats(adapter)->be_tx_events++; | |
1319 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | |
1320 | ||
1321 | return 1; | |
1322 | } | |
1323 | ||
ea1dae11 SP |
1324 | static void be_worker(struct work_struct *work) |
1325 | { | |
1326 | struct be_adapter *adapter = | |
1327 | container_of(work, struct be_adapter, work.work); | |
1328 | int status; | |
1329 | ||
1330 | /* Check link */ | |
1331 | be_link_status_update(adapter); | |
1332 | ||
1333 | /* Get Stats */ | |
1334 | status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); | |
1335 | if (!status) | |
1336 | netdev_stats_update(adapter); | |
1337 | ||
1338 | /* Set EQ delay */ | |
1339 | be_rx_eqd_update(adapter); | |
1340 | ||
4097f663 SP |
1341 | be_tx_rate_update(adapter); |
1342 | be_rx_rate_update(adapter); | |
1343 | ||
ea1dae11 SP |
1344 | if (adapter->rx_post_starved) { |
1345 | adapter->rx_post_starved = false; | |
1346 | be_post_rx_frags(adapter); | |
1347 | } | |
1348 | ||
1349 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | |
1350 | } | |
1351 | ||
6b7c5b94 SP |
1352 | static void be_msix_enable(struct be_adapter *adapter) |
1353 | { | |
1354 | int i, status; | |
1355 | ||
1356 | for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) | |
1357 | adapter->msix_entries[i].entry = i; | |
1358 | ||
1359 | status = pci_enable_msix(adapter->pdev, adapter->msix_entries, | |
1360 | BE_NUM_MSIX_VECTORS); | |
1361 | if (status == 0) | |
1362 | adapter->msix_enabled = true; | |
1363 | return; | |
1364 | } | |
1365 | ||
1366 | static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) | |
1367 | { | |
1368 | return adapter->msix_entries[eq_id - | |
1369 | 8 * adapter->ctrl.pci_func].vector; | |
1370 | } | |
1371 | ||
1372 | static int be_msix_register(struct be_adapter *adapter) | |
1373 | { | |
1374 | struct net_device *netdev = adapter->netdev; | |
1375 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1376 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1377 | int status, vec; | |
1378 | ||
1379 | sprintf(tx_eq->desc, "%s-tx", netdev->name); | |
1380 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1381 | status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); | |
1382 | if (status) | |
1383 | goto err; | |
1384 | ||
1385 | sprintf(rx_eq->desc, "%s-rx", netdev->name); | |
1386 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | |
1387 | status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter); | |
1388 | if (status) { /* Free TX IRQ */ | |
1389 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1390 | free_irq(vec, adapter); | |
1391 | goto err; | |
1392 | } | |
1393 | return 0; | |
1394 | err: | |
1395 | dev_warn(&adapter->pdev->dev, | |
1396 | "MSIX Request IRQ failed - err %d\n", status); | |
1397 | pci_disable_msix(adapter->pdev); | |
1398 | adapter->msix_enabled = false; | |
1399 | return status; | |
1400 | } | |
1401 | ||
1402 | static int be_irq_register(struct be_adapter *adapter) | |
1403 | { | |
1404 | struct net_device *netdev = adapter->netdev; | |
1405 | int status; | |
1406 | ||
1407 | if (adapter->msix_enabled) { | |
1408 | status = be_msix_register(adapter); | |
1409 | if (status == 0) | |
1410 | goto done; | |
1411 | } | |
1412 | ||
1413 | /* INTx */ | |
1414 | netdev->irq = adapter->pdev->irq; | |
1415 | status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, | |
1416 | adapter); | |
1417 | if (status) { | |
1418 | dev_err(&adapter->pdev->dev, | |
1419 | "INTx request IRQ failed - err %d\n", status); | |
1420 | return status; | |
1421 | } | |
1422 | done: | |
1423 | adapter->isr_registered = true; | |
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | static void be_irq_unregister(struct be_adapter *adapter) | |
1428 | { | |
1429 | struct net_device *netdev = adapter->netdev; | |
1430 | int vec; | |
1431 | ||
1432 | if (!adapter->isr_registered) | |
1433 | return; | |
1434 | ||
1435 | /* INTx */ | |
1436 | if (!adapter->msix_enabled) { | |
1437 | free_irq(netdev->irq, adapter); | |
1438 | goto done; | |
1439 | } | |
1440 | ||
1441 | /* MSIx */ | |
1442 | vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id); | |
1443 | free_irq(vec, adapter); | |
1444 | vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id); | |
1445 | free_irq(vec, adapter); | |
1446 | done: | |
1447 | adapter->isr_registered = false; | |
1448 | return; | |
1449 | } | |
1450 | ||
1451 | static int be_open(struct net_device *netdev) | |
1452 | { | |
1453 | struct be_adapter *adapter = netdev_priv(netdev); | |
1454 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1455 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1456 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1457 | u32 if_flags; | |
1458 | int status; | |
1459 | ||
1460 | if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | | |
1461 | BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | | |
1462 | BE_IF_FLAGS_PASS_L3L4_ERRORS; | |
1463 | status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr, | |
1464 | false/* pmac_invalid */, &adapter->if_handle, | |
1465 | &adapter->pmac_id); | |
1466 | if (status != 0) | |
1467 | goto do_none; | |
1468 | ||
1ab1ab75 SP |
1469 | be_vid_config(netdev); |
1470 | ||
6b7c5b94 SP |
1471 | status = be_cmd_set_flow_control(ctrl, true, true); |
1472 | if (status != 0) | |
1473 | goto if_destroy; | |
1474 | ||
1475 | status = be_tx_queues_create(adapter); | |
1476 | if (status != 0) | |
1477 | goto if_destroy; | |
1478 | ||
1479 | status = be_rx_queues_create(adapter); | |
1480 | if (status != 0) | |
1481 | goto tx_qs_destroy; | |
1482 | ||
1483 | /* First time posting */ | |
1484 | be_post_rx_frags(adapter); | |
1485 | ||
1486 | napi_enable(&rx_eq->napi); | |
1487 | napi_enable(&tx_eq->napi); | |
1488 | ||
1489 | be_irq_register(adapter); | |
1490 | ||
1491 | be_intr_set(ctrl, true); | |
1492 | ||
1493 | /* The evt queues are created in the unarmed state; arm them */ | |
1494 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | |
1495 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | |
1496 | ||
1497 | /* The compl queues are created in the unarmed state; arm them */ | |
1498 | be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); | |
1499 | be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0); | |
1500 | ||
1501 | be_link_status_update(adapter); | |
1502 | ||
1503 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | |
1504 | return 0; | |
1505 | ||
1506 | tx_qs_destroy: | |
1507 | be_tx_queues_destroy(adapter); | |
1508 | if_destroy: | |
1509 | be_cmd_if_destroy(ctrl, adapter->if_handle); | |
1510 | do_none: | |
1511 | return status; | |
1512 | } | |
1513 | ||
1514 | static int be_close(struct net_device *netdev) | |
1515 | { | |
1516 | struct be_adapter *adapter = netdev_priv(netdev); | |
1517 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1518 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1519 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1520 | int vec; | |
1521 | ||
1522 | cancel_delayed_work(&adapter->work); | |
1523 | ||
1524 | netif_stop_queue(netdev); | |
1525 | netif_carrier_off(netdev); | |
1526 | adapter->link.speed = PHY_LINK_SPEED_ZERO; | |
1527 | ||
1528 | be_intr_set(ctrl, false); | |
1529 | ||
1530 | if (adapter->msix_enabled) { | |
1531 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1532 | synchronize_irq(vec); | |
1533 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | |
1534 | synchronize_irq(vec); | |
1535 | } else { | |
1536 | synchronize_irq(netdev->irq); | |
1537 | } | |
1538 | be_irq_unregister(adapter); | |
1539 | ||
1540 | napi_disable(&rx_eq->napi); | |
1541 | napi_disable(&tx_eq->napi); | |
1542 | ||
1543 | be_rx_queues_destroy(adapter); | |
1544 | be_tx_queues_destroy(adapter); | |
1545 | ||
1546 | be_cmd_if_destroy(ctrl, adapter->if_handle); | |
1547 | return 0; | |
1548 | } | |
1549 | ||
1550 | static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | |
1551 | void **ip_hdr, void **tcpudp_hdr, | |
1552 | u64 *hdr_flags, void *priv) | |
1553 | { | |
1554 | struct ethhdr *eh; | |
1555 | struct vlan_ethhdr *veh; | |
1556 | struct iphdr *iph; | |
1557 | u8 *va = page_address(frag->page) + frag->page_offset; | |
1558 | unsigned long ll_hlen; | |
1559 | ||
1560 | prefetch(va); | |
1561 | eh = (struct ethhdr *)va; | |
1562 | *mac_hdr = eh; | |
1563 | ll_hlen = ETH_HLEN; | |
1564 | if (eh->h_proto != htons(ETH_P_IP)) { | |
1565 | if (eh->h_proto == htons(ETH_P_8021Q)) { | |
1566 | veh = (struct vlan_ethhdr *)va; | |
1567 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | |
1568 | return -1; | |
1569 | ||
1570 | ll_hlen += VLAN_HLEN; | |
1571 | } else { | |
1572 | return -1; | |
1573 | } | |
1574 | } | |
1575 | *hdr_flags = LRO_IPV4; | |
1576 | iph = (struct iphdr *)(va + ll_hlen); | |
1577 | *ip_hdr = iph; | |
1578 | if (iph->protocol != IPPROTO_TCP) | |
1579 | return -1; | |
1580 | *hdr_flags |= LRO_TCP; | |
1581 | *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); | |
1582 | ||
1583 | return 0; | |
1584 | } | |
1585 | ||
1586 | static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev) | |
1587 | { | |
1588 | struct net_lro_mgr *lro_mgr; | |
1589 | ||
1590 | lro_mgr = &adapter->rx_obj.lro_mgr; | |
1591 | lro_mgr->dev = netdev; | |
1592 | lro_mgr->features = LRO_F_NAPI; | |
1593 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | |
1594 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
1595 | lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; | |
1596 | lro_mgr->lro_arr = adapter->rx_obj.lro_desc; | |
1597 | lro_mgr->get_frag_header = be_get_frag_header; | |
1598 | lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME; | |
1599 | } | |
1600 | ||
1601 | static struct net_device_ops be_netdev_ops = { | |
1602 | .ndo_open = be_open, | |
1603 | .ndo_stop = be_close, | |
1604 | .ndo_start_xmit = be_xmit, | |
1605 | .ndo_get_stats = be_get_stats, | |
1606 | .ndo_set_rx_mode = be_set_multicast_list, | |
1607 | .ndo_set_mac_address = be_mac_addr_set, | |
1608 | .ndo_change_mtu = be_change_mtu, | |
1609 | .ndo_validate_addr = eth_validate_addr, | |
1610 | .ndo_vlan_rx_register = be_vlan_register, | |
1611 | .ndo_vlan_rx_add_vid = be_vlan_add_vid, | |
1612 | .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, | |
1613 | }; | |
1614 | ||
1615 | static void be_netdev_init(struct net_device *netdev) | |
1616 | { | |
1617 | struct be_adapter *adapter = netdev_priv(netdev); | |
1618 | ||
1619 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | |
1620 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | |
1621 | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; | |
1622 | ||
1623 | netdev->flags |= IFF_MULTICAST; | |
1624 | ||
1625 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); | |
1626 | ||
1627 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | |
1628 | ||
1629 | be_lro_init(adapter, netdev); | |
1630 | ||
1631 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, | |
1632 | BE_NAPI_WEIGHT); | |
1633 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, | |
1634 | BE_NAPI_WEIGHT); | |
1635 | ||
1636 | netif_carrier_off(netdev); | |
1637 | netif_stop_queue(netdev); | |
1638 | } | |
1639 | ||
1640 | static void be_unmap_pci_bars(struct be_adapter *adapter) | |
1641 | { | |
1642 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1643 | if (ctrl->csr) | |
1644 | iounmap(ctrl->csr); | |
1645 | if (ctrl->db) | |
1646 | iounmap(ctrl->db); | |
1647 | if (ctrl->pcicfg) | |
1648 | iounmap(ctrl->pcicfg); | |
1649 | } | |
1650 | ||
1651 | static int be_map_pci_bars(struct be_adapter *adapter) | |
1652 | { | |
1653 | u8 __iomem *addr; | |
1654 | ||
1655 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | |
1656 | pci_resource_len(adapter->pdev, 2)); | |
1657 | if (addr == NULL) | |
1658 | return -ENOMEM; | |
1659 | adapter->ctrl.csr = addr; | |
1660 | ||
1661 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), | |
1662 | 128 * 1024); | |
1663 | if (addr == NULL) | |
1664 | goto pci_map_err; | |
1665 | adapter->ctrl.db = addr; | |
1666 | ||
1667 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | |
1668 | pci_resource_len(adapter->pdev, 1)); | |
1669 | if (addr == NULL) | |
1670 | goto pci_map_err; | |
1671 | adapter->ctrl.pcicfg = addr; | |
1672 | ||
1673 | return 0; | |
1674 | pci_map_err: | |
1675 | be_unmap_pci_bars(adapter); | |
1676 | return -ENOMEM; | |
1677 | } | |
1678 | ||
1679 | ||
1680 | static void be_ctrl_cleanup(struct be_adapter *adapter) | |
1681 | { | |
1682 | struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced; | |
1683 | ||
1684 | be_unmap_pci_bars(adapter); | |
1685 | ||
1686 | if (mem->va) | |
1687 | pci_free_consistent(adapter->pdev, mem->size, | |
1688 | mem->va, mem->dma); | |
1689 | } | |
1690 | ||
1691 | /* Initialize the mbox required to send cmds to BE */ | |
1692 | static int be_ctrl_init(struct be_adapter *adapter) | |
1693 | { | |
1694 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1695 | struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; | |
1696 | struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; | |
1697 | int status; | |
1698 | u32 val; | |
1699 | ||
1700 | status = be_map_pci_bars(adapter); | |
1701 | if (status) | |
1702 | return status; | |
1703 | ||
1704 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | |
1705 | mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, | |
1706 | mbox_mem_alloc->size, &mbox_mem_alloc->dma); | |
1707 | if (!mbox_mem_alloc->va) { | |
1708 | be_unmap_pci_bars(adapter); | |
1709 | return -1; | |
1710 | } | |
1711 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | |
1712 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | |
1713 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | |
1714 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | |
1715 | spin_lock_init(&ctrl->cmd_lock); | |
1716 | ||
1717 | val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); | |
1718 | ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & | |
1719 | MEMBAR_CTRL_INT_CTRL_PFUNC_MASK; | |
1720 | return 0; | |
1721 | } | |
1722 | ||
1723 | static void be_stats_cleanup(struct be_adapter *adapter) | |
1724 | { | |
1725 | struct be_stats_obj *stats = &adapter->stats; | |
1726 | struct be_dma_mem *cmd = &stats->cmd; | |
1727 | ||
1728 | if (cmd->va) | |
1729 | pci_free_consistent(adapter->pdev, cmd->size, | |
1730 | cmd->va, cmd->dma); | |
1731 | } | |
1732 | ||
1733 | static int be_stats_init(struct be_adapter *adapter) | |
1734 | { | |
1735 | struct be_stats_obj *stats = &adapter->stats; | |
1736 | struct be_dma_mem *cmd = &stats->cmd; | |
1737 | ||
1738 | cmd->size = sizeof(struct be_cmd_req_get_stats); | |
1739 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | |
1740 | if (cmd->va == NULL) | |
1741 | return -1; | |
1742 | return 0; | |
1743 | } | |
1744 | ||
1745 | static void __devexit be_remove(struct pci_dev *pdev) | |
1746 | { | |
1747 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1748 | if (!adapter) | |
1749 | return; | |
1750 | ||
1751 | unregister_netdev(adapter->netdev); | |
1752 | ||
1753 | be_stats_cleanup(adapter); | |
1754 | ||
1755 | be_ctrl_cleanup(adapter); | |
1756 | ||
1757 | if (adapter->msix_enabled) { | |
1758 | pci_disable_msix(adapter->pdev); | |
1759 | adapter->msix_enabled = false; | |
1760 | } | |
1761 | ||
1762 | pci_set_drvdata(pdev, NULL); | |
1763 | pci_release_regions(pdev); | |
1764 | pci_disable_device(pdev); | |
1765 | ||
1766 | free_netdev(adapter->netdev); | |
1767 | } | |
1768 | ||
1769 | static int be_hw_up(struct be_adapter *adapter) | |
1770 | { | |
1771 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1772 | int status; | |
1773 | ||
1774 | status = be_cmd_POST(ctrl); | |
1775 | if (status) | |
1776 | return status; | |
1777 | ||
1778 | status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver); | |
1779 | if (status) | |
1780 | return status; | |
1781 | ||
1782 | status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num); | |
1783 | return status; | |
1784 | } | |
1785 | ||
1786 | static int __devinit be_probe(struct pci_dev *pdev, | |
1787 | const struct pci_device_id *pdev_id) | |
1788 | { | |
1789 | int status = 0; | |
1790 | struct be_adapter *adapter; | |
1791 | struct net_device *netdev; | |
1792 | struct be_ctrl_info *ctrl; | |
1793 | u8 mac[ETH_ALEN]; | |
1794 | ||
1795 | status = pci_enable_device(pdev); | |
1796 | if (status) | |
1797 | goto do_none; | |
1798 | ||
1799 | status = pci_request_regions(pdev, DRV_NAME); | |
1800 | if (status) | |
1801 | goto disable_dev; | |
1802 | pci_set_master(pdev); | |
1803 | ||
1804 | netdev = alloc_etherdev(sizeof(struct be_adapter)); | |
1805 | if (netdev == NULL) { | |
1806 | status = -ENOMEM; | |
1807 | goto rel_reg; | |
1808 | } | |
1809 | adapter = netdev_priv(netdev); | |
1810 | adapter->pdev = pdev; | |
1811 | pci_set_drvdata(pdev, adapter); | |
1812 | adapter->netdev = netdev; | |
1813 | ||
1814 | be_msix_enable(adapter); | |
1815 | ||
1816 | status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
1817 | if (!status) { | |
1818 | netdev->features |= NETIF_F_HIGHDMA; | |
1819 | } else { | |
1820 | status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
1821 | if (status) { | |
1822 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | |
1823 | goto free_netdev; | |
1824 | } | |
1825 | } | |
1826 | ||
1827 | ctrl = &adapter->ctrl; | |
1828 | status = be_ctrl_init(adapter); | |
1829 | if (status) | |
1830 | goto free_netdev; | |
1831 | ||
1832 | status = be_stats_init(adapter); | |
1833 | if (status) | |
1834 | goto ctrl_clean; | |
1835 | ||
1836 | status = be_hw_up(adapter); | |
1837 | if (status) | |
1838 | goto stats_clean; | |
1839 | ||
1840 | status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK, | |
1841 | true /* permanent */, 0); | |
1842 | if (status) | |
1843 | goto stats_clean; | |
1844 | memcpy(netdev->dev_addr, mac, ETH_ALEN); | |
1845 | ||
1846 | INIT_DELAYED_WORK(&adapter->work, be_worker); | |
1847 | be_netdev_init(netdev); | |
1848 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); | |
1849 | ||
1850 | status = register_netdev(netdev); | |
1851 | if (status != 0) | |
1852 | goto stats_clean; | |
1853 | ||
1854 | dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num); | |
1855 | return 0; | |
1856 | ||
1857 | stats_clean: | |
1858 | be_stats_cleanup(adapter); | |
1859 | ctrl_clean: | |
1860 | be_ctrl_cleanup(adapter); | |
1861 | free_netdev: | |
1862 | free_netdev(adapter->netdev); | |
1863 | rel_reg: | |
1864 | pci_release_regions(pdev); | |
1865 | disable_dev: | |
1866 | pci_disable_device(pdev); | |
1867 | do_none: | |
1868 | dev_warn(&pdev->dev, BE_NAME " initialization failed\n"); | |
1869 | return status; | |
1870 | } | |
1871 | ||
1872 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |
1873 | { | |
1874 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1875 | struct net_device *netdev = adapter->netdev; | |
1876 | ||
1877 | netif_device_detach(netdev); | |
1878 | if (netif_running(netdev)) { | |
1879 | rtnl_lock(); | |
1880 | be_close(netdev); | |
1881 | rtnl_unlock(); | |
1882 | } | |
1883 | ||
1884 | pci_save_state(pdev); | |
1885 | pci_disable_device(pdev); | |
1886 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | static int be_resume(struct pci_dev *pdev) | |
1891 | { | |
1892 | int status = 0; | |
1893 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1894 | struct net_device *netdev = adapter->netdev; | |
1895 | ||
1896 | netif_device_detach(netdev); | |
1897 | ||
1898 | status = pci_enable_device(pdev); | |
1899 | if (status) | |
1900 | return status; | |
1901 | ||
1902 | pci_set_power_state(pdev, 0); | |
1903 | pci_restore_state(pdev); | |
1904 | ||
6b7c5b94 SP |
1905 | if (netif_running(netdev)) { |
1906 | rtnl_lock(); | |
1907 | be_open(netdev); | |
1908 | rtnl_unlock(); | |
1909 | } | |
1910 | netif_device_attach(netdev); | |
1911 | return 0; | |
1912 | } | |
1913 | ||
1914 | static struct pci_driver be_driver = { | |
1915 | .name = DRV_NAME, | |
1916 | .id_table = be_dev_ids, | |
1917 | .probe = be_probe, | |
1918 | .remove = be_remove, | |
1919 | .suspend = be_suspend, | |
1920 | .resume = be_resume | |
1921 | }; | |
1922 | ||
1923 | static int __init be_init_module(void) | |
1924 | { | |
1925 | if (rx_frag_size != 8192 && rx_frag_size != 4096 | |
1926 | && rx_frag_size != 2048) { | |
1927 | printk(KERN_WARNING DRV_NAME | |
1928 | " : Module param rx_frag_size must be 2048/4096/8192." | |
1929 | " Using 2048\n"); | |
1930 | rx_frag_size = 2048; | |
1931 | } | |
1932 | /* Ensure rx_frag_size is aligned to chache line */ | |
1933 | if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) { | |
1934 | printk(KERN_WARNING DRV_NAME | |
1935 | " : Bad module param rx_frag_size. Using 2048\n"); | |
1936 | rx_frag_size = 2048; | |
1937 | } | |
1938 | ||
1939 | return pci_register_driver(&be_driver); | |
1940 | } | |
1941 | module_init(be_init_module); | |
1942 | ||
1943 | static void __exit be_exit_module(void) | |
1944 | { | |
1945 | pci_unregister_driver(&be_driver); | |
1946 | } | |
1947 | module_exit(be_exit_module); |