]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
906bb26c | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
17 | #include <net/ip.h> | |
18 | #include <net/checksum.h> | |
19 | #include "net_driver.h" | |
8ceee660 | 20 | #include "efx.h" |
744093c9 | 21 | #include "nic.h" |
3273c2e8 | 22 | #include "selftest.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
25 | /* Number of RX descriptors pushed at once. */ | |
26 | #define EFX_RX_BATCH 8 | |
27 | ||
62b330ba SH |
28 | /* Maximum size of a buffer sharing a page */ |
29 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | |
30 | ||
8ceee660 BH |
31 | /* Size of buffer allocated for skb header area. */ |
32 | #define EFX_SKB_HEADERS 64u | |
33 | ||
34 | /* | |
35 | * rx_alloc_method - RX buffer allocation method | |
36 | * | |
37 | * This driver supports two methods for allocating and using RX buffers: | |
38 | * each RX buffer may be backed by an skb or by an order-n page. | |
39 | * | |
40 | * When LRO is in use then the second method has a lower overhead, | |
41 | * since we don't have to allocate then free skbs on reassembled frames. | |
42 | * | |
43 | * Values: | |
44 | * - RX_ALLOC_METHOD_AUTO = 0 | |
45 | * - RX_ALLOC_METHOD_SKB = 1 | |
46 | * - RX_ALLOC_METHOD_PAGE = 2 | |
47 | * | |
48 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
49 | * controlled by the parameters below. | |
50 | * | |
51 | * - Since pushing and popping descriptors are separated by the rx_queue | |
52 | * size, so the watermarks should be ~rxd_size. | |
53 | * - The performance win by using page-based allocation for LRO is less | |
54 | * than the performance hit of using page-based allocation of non-LRO, | |
55 | * so the watermarks should reflect this. | |
56 | * | |
57 | * Per channel we maintain a single variable, updated by each channel: | |
58 | * | |
59 | * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : | |
60 | * RX_ALLOC_FACTOR_SKB) | |
61 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
62 | * limits the hysteresis), and update the allocation strategy: | |
63 | * | |
64 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | |
65 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | |
66 | */ | |
c3c63365 | 67 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
8ceee660 BH |
68 | |
69 | #define RX_ALLOC_LEVEL_LRO 0x2000 | |
70 | #define RX_ALLOC_LEVEL_MAX 0x3000 | |
71 | #define RX_ALLOC_FACTOR_LRO 1 | |
72 | #define RX_ALLOC_FACTOR_SKB (-2) | |
73 | ||
74 | /* This is the percentage fill level below which new RX descriptors | |
75 | * will be added to the RX descriptor ring. | |
76 | */ | |
77 | static unsigned int rx_refill_threshold = 90; | |
78 | ||
79 | /* This is the percentage fill level to which an RX queue will be refilled | |
80 | * when the "RX refill threshold" is reached. | |
81 | */ | |
82 | static unsigned int rx_refill_limit = 95; | |
83 | ||
84 | /* | |
85 | * RX maximum head room required. | |
86 | * | |
87 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 88 | * pipelined receives. |
8ceee660 | 89 | */ |
62b330ba | 90 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 91 | |
55668611 BH |
92 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
93 | { | |
94 | /* Offset is always within one page, so we don't need to consider | |
95 | * the page order. | |
96 | */ | |
184be0c2 | 97 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); |
55668611 BH |
98 | } |
99 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
100 | { | |
101 | return PAGE_SIZE << efx->rx_buffer_order; | |
102 | } | |
8ceee660 | 103 | |
8ceee660 | 104 | /** |
f7d6f379 | 105 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
8ceee660 BH |
106 | * |
107 | * @rx_queue: Efx RX queue | |
8ceee660 | 108 | * |
f7d6f379 SH |
109 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
110 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | |
111 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | |
112 | * buffers. | |
8ceee660 | 113 | */ |
f7d6f379 | 114 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
115 | { |
116 | struct efx_nic *efx = rx_queue->efx; | |
117 | struct net_device *net_dev = efx->net_dev; | |
f7d6f379 | 118 | struct efx_rx_buffer *rx_buf; |
8ceee660 | 119 | int skb_len = efx->rx_buffer_len; |
f7d6f379 | 120 | unsigned index, count; |
8ceee660 | 121 | |
f7d6f379 SH |
122 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
123 | index = rx_queue->added_count & EFX_RXQ_MASK; | |
124 | rx_buf = efx_rx_buffer(rx_queue, index); | |
8ceee660 | 125 | |
f7d6f379 SH |
126 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); |
127 | if (unlikely(!rx_buf->skb)) | |
128 | return -ENOMEM; | |
129 | rx_buf->page = NULL; | |
8ceee660 | 130 | |
f7d6f379 SH |
131 | /* Adjust the SKB for padding and checksum */ |
132 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | |
133 | rx_buf->len = skb_len - NET_IP_ALIGN; | |
134 | rx_buf->data = (char *)rx_buf->skb->data; | |
135 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | |
136 | ||
137 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | |
138 | rx_buf->data, rx_buf->len, | |
139 | PCI_DMA_FROMDEVICE); | |
140 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | |
141 | rx_buf->dma_addr))) { | |
142 | dev_kfree_skb_any(rx_buf->skb); | |
143 | rx_buf->skb = NULL; | |
144 | return -EIO; | |
145 | } | |
8ceee660 | 146 | |
f7d6f379 SH |
147 | ++rx_queue->added_count; |
148 | ++rx_queue->alloc_skb_count; | |
8ceee660 BH |
149 | } |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
154 | /** | |
f7d6f379 | 155 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
156 | * |
157 | * @rx_queue: Efx RX queue | |
8ceee660 | 158 | * |
f7d6f379 SH |
159 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
160 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
161 | * code or 0 on success. If a single page can be split between two buffers, | |
162 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 163 | */ |
f7d6f379 | 164 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
165 | { |
166 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
167 | struct efx_rx_buffer *rx_buf; |
168 | struct page *page; | |
62b330ba SH |
169 | void *page_addr; |
170 | struct efx_rx_page_state *state; | |
f7d6f379 SH |
171 | dma_addr_t dma_addr; |
172 | unsigned index, count; | |
173 | ||
174 | /* We can split a page between two buffers */ | |
175 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
176 | ||
177 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
178 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
179 | efx->rx_buffer_order); | |
180 | if (unlikely(page == NULL)) | |
8ceee660 | 181 | return -ENOMEM; |
f7d6f379 SH |
182 | dma_addr = pci_map_page(efx->pci_dev, page, 0, |
183 | efx_rx_buf_size(efx), | |
8ceee660 | 184 | PCI_DMA_FROMDEVICE); |
8d8bb39b | 185 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
f7d6f379 | 186 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
187 | return -EIO; |
188 | } | |
62b330ba SH |
189 | page_addr = page_address(page); |
190 | state = page_addr; | |
191 | state->refcnt = 0; | |
192 | state->dma_addr = dma_addr; | |
193 | ||
194 | page_addr += sizeof(struct efx_rx_page_state); | |
195 | dma_addr += sizeof(struct efx_rx_page_state); | |
f7d6f379 SH |
196 | |
197 | split: | |
198 | index = rx_queue->added_count & EFX_RXQ_MASK; | |
199 | rx_buf = efx_rx_buffer(rx_queue, index); | |
62b330ba | 200 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
f7d6f379 SH |
201 | rx_buf->skb = NULL; |
202 | rx_buf->page = page; | |
62b330ba | 203 | rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; |
f7d6f379 SH |
204 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
205 | ++rx_queue->added_count; | |
206 | ++rx_queue->alloc_page_count; | |
62b330ba | 207 | ++state->refcnt; |
f7d6f379 | 208 | |
62b330ba | 209 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
210 | /* Use the second half of the page */ |
211 | get_page(page); | |
212 | dma_addr += (PAGE_SIZE >> 1); | |
213 | page_addr += (PAGE_SIZE >> 1); | |
214 | ++count; | |
215 | goto split; | |
8ceee660 BH |
216 | } |
217 | } | |
218 | ||
8ceee660 BH |
219 | return 0; |
220 | } | |
221 | ||
4d566063 BH |
222 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
223 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
224 | { |
225 | if (rx_buf->page) { | |
62b330ba SH |
226 | struct efx_rx_page_state *state; |
227 | ||
8ceee660 | 228 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
f7d6f379 | 229 | |
62b330ba SH |
230 | state = page_address(rx_buf->page); |
231 | if (--state->refcnt == 0) { | |
f7d6f379 | 232 | pci_unmap_page(efx->pci_dev, |
62b330ba | 233 | state->dma_addr, |
55668611 BH |
234 | efx_rx_buf_size(efx), |
235 | PCI_DMA_FROMDEVICE); | |
8ceee660 BH |
236 | } |
237 | } else if (likely(rx_buf->skb)) { | |
238 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | |
239 | rx_buf->len, PCI_DMA_FROMDEVICE); | |
240 | } | |
241 | } | |
242 | ||
4d566063 BH |
243 | static void efx_free_rx_buffer(struct efx_nic *efx, |
244 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
245 | { |
246 | if (rx_buf->page) { | |
247 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
248 | rx_buf->page = NULL; | |
249 | } else if (likely(rx_buf->skb)) { | |
250 | dev_kfree_skb_any(rx_buf->skb); | |
251 | rx_buf->skb = NULL; | |
252 | } | |
253 | } | |
254 | ||
4d566063 BH |
255 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
256 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
257 | { |
258 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | |
259 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | |
260 | } | |
261 | ||
24455800 SH |
262 | /* Attempt to resurrect the other receive buffer that used to share this page, |
263 | * which had previously been passed up to the kernel and freed. */ | |
264 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
265 | struct efx_rx_buffer *rx_buf) | |
266 | { | |
62b330ba | 267 | struct efx_rx_page_state *state = page_address(rx_buf->page); |
24455800 | 268 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
269 | unsigned fill_level, index; |
270 | ||
271 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
272 | * we'd like to insert an additional descriptor whilst leaving | |
273 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
274 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
275 | if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { | |
276 | /* We could place "state" on a list, and drain the list in | |
277 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
278 | return; | |
279 | } | |
24455800 | 280 | |
62b330ba | 281 | ++state->refcnt; |
24455800 SH |
282 | get_page(rx_buf->page); |
283 | ||
284 | index = rx_queue->added_count & EFX_RXQ_MASK; | |
285 | new_buf = efx_rx_buffer(rx_queue, index); | |
62b330ba | 286 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
24455800 SH |
287 | new_buf->skb = NULL; |
288 | new_buf->page = rx_buf->page; | |
62b330ba SH |
289 | new_buf->data = (void *) |
290 | ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); | |
24455800 SH |
291 | new_buf->len = rx_buf->len; |
292 | ++rx_queue->added_count; | |
293 | } | |
294 | ||
295 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
296 | * always room to add this buffer, because we've just popped a buffer. */ | |
297 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
298 | struct efx_rx_buffer *rx_buf) | |
299 | { | |
300 | struct efx_nic *efx = channel->efx; | |
301 | struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; | |
302 | struct efx_rx_buffer *new_buf; | |
303 | unsigned index; | |
304 | ||
62b330ba SH |
305 | if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && |
306 | page_count(rx_buf->page) == 1) | |
307 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | |
24455800 SH |
308 | |
309 | index = rx_queue->added_count & EFX_RXQ_MASK; | |
310 | new_buf = efx_rx_buffer(rx_queue, index); | |
311 | ||
312 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
313 | rx_buf->page = NULL; | |
314 | rx_buf->skb = NULL; | |
315 | ++rx_queue->added_count; | |
316 | } | |
317 | ||
8ceee660 BH |
318 | /** |
319 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
320 | * @rx_queue: RX descriptor queue | |
8ceee660 BH |
321 | * This will aim to fill the RX descriptor queue up to |
322 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | |
90d683af SH |
323 | * memory to do so, a slow fill will be scheduled. |
324 | * | |
325 | * The caller must provide serialisation (none is used here). In practise, | |
326 | * this means this function must run from the NAPI handler, or be called | |
327 | * when NAPI is disabled. | |
8ceee660 | 328 | */ |
90d683af | 329 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 330 | { |
f7d6f379 SH |
331 | struct efx_channel *channel = rx_queue->channel; |
332 | unsigned fill_level; | |
333 | int space, rc = 0; | |
8ceee660 | 334 | |
90d683af | 335 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 336 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
3ffeabdd | 337 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
8ceee660 | 338 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 339 | goto out; |
8ceee660 BH |
340 | |
341 | /* Record minimum fill level */ | |
b3475645 | 342 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
343 | if (fill_level) |
344 | rx_queue->min_fill = fill_level; | |
b3475645 | 345 | } |
8ceee660 | 346 | |
8ceee660 BH |
347 | space = rx_queue->fast_fill_limit - fill_level; |
348 | if (space < EFX_RX_BATCH) | |
24455800 | 349 | goto out; |
8ceee660 BH |
350 | |
351 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | |
352 | " level %d to level %d using %s allocation\n", | |
353 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | |
f7d6f379 | 354 | channel->rx_alloc_push_pages ? "page" : "skb"); |
8ceee660 BH |
355 | |
356 | do { | |
f7d6f379 SH |
357 | if (channel->rx_alloc_push_pages) |
358 | rc = efx_init_rx_buffers_page(rx_queue); | |
359 | else | |
360 | rc = efx_init_rx_buffers_skb(rx_queue); | |
361 | if (unlikely(rc)) { | |
362 | /* Ensure that we don't leave the rx queue empty */ | |
363 | if (rx_queue->added_count == rx_queue->removed_count) | |
364 | efx_schedule_slow_fill(rx_queue); | |
365 | goto out; | |
8ceee660 BH |
366 | } |
367 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
368 | ||
369 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " | |
370 | "to level %d\n", rx_queue->queue, | |
371 | rx_queue->added_count - rx_queue->removed_count); | |
372 | ||
373 | out: | |
24455800 SH |
374 | if (rx_queue->notified_count != rx_queue->added_count) |
375 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
376 | } |
377 | ||
90d683af | 378 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 379 | { |
90d683af SH |
380 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
381 | struct efx_channel *channel = rx_queue->channel; | |
8ceee660 | 382 | |
90d683af SH |
383 | /* Post an event to cause NAPI to run and refill the queue */ |
384 | efx_nic_generate_fill_event(channel); | |
8ceee660 | 385 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
386 | } |
387 | ||
4d566063 BH |
388 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
389 | struct efx_rx_buffer *rx_buf, | |
390 | int len, bool *discard, | |
391 | bool *leak_packet) | |
8ceee660 BH |
392 | { |
393 | struct efx_nic *efx = rx_queue->efx; | |
394 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
395 | ||
396 | if (likely(len <= max_len)) | |
397 | return; | |
398 | ||
399 | /* The packet must be discarded, but this is only a fatal error | |
400 | * if the caller indicated it was | |
401 | */ | |
dc8cfa55 | 402 | *discard = true; |
8ceee660 BH |
403 | |
404 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
405 | EFX_ERR_RL(efx, " RX queue %d seriously overlength " | |
406 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
407 | rx_queue->queue, len, max_len, | |
408 | efx->type->rx_buffer_padding); | |
409 | /* If this buffer was skb-allocated, then the meta | |
410 | * data at the end of the skb will be trashed. So | |
411 | * we have no choice but to leak the fragment. | |
412 | */ | |
413 | *leak_packet = (rx_buf->skb != NULL); | |
414 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | |
415 | } else { | |
416 | EFX_ERR_RL(efx, " RX queue %d overlength RX event " | |
417 | "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); | |
418 | } | |
419 | ||
420 | rx_queue->channel->n_rx_overlength++; | |
421 | } | |
422 | ||
423 | /* Pass a received packet up through the generic LRO stack | |
424 | * | |
425 | * Handles driverlink veto, and passes the fragment up via | |
426 | * the appropriate LRO method | |
427 | */ | |
4d566063 | 428 | static void efx_rx_packet_lro(struct efx_channel *channel, |
345056af BH |
429 | struct efx_rx_buffer *rx_buf, |
430 | bool checksummed) | |
8ceee660 | 431 | { |
da3bc071 | 432 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 433 | gro_result_t gro_result; |
8ceee660 BH |
434 | |
435 | /* Pass the skb/page into the LRO engine */ | |
436 | if (rx_buf->page) { | |
1241e951 BH |
437 | struct page *page = rx_buf->page; |
438 | struct sk_buff *skb; | |
8ceee660 | 439 | |
1241e951 BH |
440 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
441 | rx_buf->page = NULL; | |
442 | ||
443 | skb = napi_get_frags(napi); | |
76620aaf | 444 | if (!skb) { |
1241e951 BH |
445 | put_page(page); |
446 | return; | |
76620aaf HX |
447 | } |
448 | ||
1241e951 | 449 | skb_shinfo(skb)->frags[0].page = page; |
76620aaf HX |
450 | skb_shinfo(skb)->frags[0].page_offset = |
451 | efx_rx_buf_offset(rx_buf); | |
452 | skb_shinfo(skb)->frags[0].size = rx_buf->len; | |
453 | skb_shinfo(skb)->nr_frags = 1; | |
454 | ||
455 | skb->len = rx_buf->len; | |
456 | skb->data_len = rx_buf->len; | |
457 | skb->truesize += rx_buf->len; | |
345056af BH |
458 | skb->ip_summed = |
459 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | |
8ceee660 | 460 | |
3eadb7b0 BH |
461 | skb_record_rx_queue(skb, channel->channel); |
462 | ||
18e1d2be | 463 | gro_result = napi_gro_frags(napi); |
8ceee660 | 464 | } else { |
1241e951 | 465 | struct sk_buff *skb = rx_buf->skb; |
8ceee660 | 466 | |
1241e951 BH |
467 | EFX_BUG_ON_PARANOID(!skb); |
468 | EFX_BUG_ON_PARANOID(!checksummed); | |
8ceee660 | 469 | rx_buf->skb = NULL; |
1241e951 BH |
470 | |
471 | gro_result = napi_gro_receive(napi, skb); | |
8ceee660 | 472 | } |
18e1d2be BH |
473 | |
474 | if (gro_result == GRO_NORMAL) { | |
475 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
476 | } else if (gro_result != GRO_DROP) { | |
477 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | |
478 | channel->irq_mod_score += 2; | |
479 | } | |
8ceee660 BH |
480 | } |
481 | ||
8ceee660 | 482 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
dc8cfa55 | 483 | unsigned int len, bool checksummed, bool discard) |
8ceee660 BH |
484 | { |
485 | struct efx_nic *efx = rx_queue->efx; | |
24455800 | 486 | struct efx_channel *channel = rx_queue->channel; |
8ceee660 | 487 | struct efx_rx_buffer *rx_buf; |
dc8cfa55 | 488 | bool leak_packet = false; |
8ceee660 BH |
489 | |
490 | rx_buf = efx_rx_buffer(rx_queue, index); | |
491 | EFX_BUG_ON_PARANOID(!rx_buf->data); | |
492 | EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); | |
493 | EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); | |
494 | ||
495 | /* This allows the refill path to post another buffer. | |
496 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
497 | * isn't overwritten yet. | |
498 | */ | |
499 | rx_queue->removed_count++; | |
500 | ||
501 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
502 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | |
503 | &discard, &leak_packet); | |
504 | ||
505 | EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", | |
506 | rx_queue->queue, index, | |
507 | (unsigned long long)rx_buf->dma_addr, len, | |
508 | (checksummed ? " [SUMMED]" : ""), | |
509 | (discard ? " [DISCARD]" : "")); | |
510 | ||
511 | /* Discard packet, if instructed to do so */ | |
512 | if (unlikely(discard)) { | |
513 | if (unlikely(leak_packet)) | |
24455800 | 514 | channel->n_skbuff_leaks++; |
8ceee660 | 515 | else |
24455800 SH |
516 | efx_recycle_rx_buffer(channel, rx_buf); |
517 | ||
518 | /* Don't hold off the previous receive */ | |
519 | rx_buf = NULL; | |
520 | goto out; | |
8ceee660 BH |
521 | } |
522 | ||
523 | /* Release card resources - assumes all RX buffers consumed in-order | |
524 | * per RX queue | |
525 | */ | |
526 | efx_unmap_rx_buffer(efx, rx_buf); | |
527 | ||
528 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
529 | * the time we look at it. | |
530 | */ | |
531 | prefetch(rx_buf->data); | |
532 | ||
533 | /* Pipeline receives so that we give time for packet headers to be | |
534 | * prefetched into cache. | |
535 | */ | |
536 | rx_buf->len = len; | |
24455800 | 537 | out: |
8ceee660 BH |
538 | if (rx_queue->channel->rx_pkt) |
539 | __efx_rx_packet(rx_queue->channel, | |
540 | rx_queue->channel->rx_pkt, | |
541 | rx_queue->channel->rx_pkt_csummed); | |
542 | rx_queue->channel->rx_pkt = rx_buf; | |
543 | rx_queue->channel->rx_pkt_csummed = checksummed; | |
544 | } | |
545 | ||
546 | /* Handle a received packet. Second half: Touches packet payload. */ | |
547 | void __efx_rx_packet(struct efx_channel *channel, | |
dc8cfa55 | 548 | struct efx_rx_buffer *rx_buf, bool checksummed) |
8ceee660 BH |
549 | { |
550 | struct efx_nic *efx = channel->efx; | |
551 | struct sk_buff *skb; | |
8ceee660 | 552 | |
3273c2e8 BH |
553 | /* If we're in loopback test, then pass the packet directly to the |
554 | * loopback layer, and free the rx_buf here | |
555 | */ | |
556 | if (unlikely(efx->loopback_selftest)) { | |
557 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | |
558 | efx_free_rx_buffer(efx, rx_buf); | |
d96d7dc9 | 559 | return; |
3273c2e8 BH |
560 | } |
561 | ||
8ceee660 BH |
562 | if (rx_buf->skb) { |
563 | prefetch(skb_shinfo(rx_buf->skb)); | |
564 | ||
565 | skb_put(rx_buf->skb, rx_buf->len); | |
566 | ||
567 | /* Move past the ethernet header. rx_buf->data still points | |
568 | * at the ethernet header */ | |
569 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | |
570 | efx->net_dev); | |
3eadb7b0 BH |
571 | |
572 | skb_record_rx_queue(rx_buf->skb, channel->channel); | |
8ceee660 BH |
573 | } |
574 | ||
da3bc071 | 575 | if (likely(checksummed || rx_buf->page)) { |
345056af | 576 | efx_rx_packet_lro(channel, rx_buf, checksummed); |
d96d7dc9 | 577 | return; |
8ceee660 BH |
578 | } |
579 | ||
da3bc071 HX |
580 | /* We now own the SKB */ |
581 | skb = rx_buf->skb; | |
582 | rx_buf->skb = NULL; | |
8ceee660 BH |
583 | EFX_BUG_ON_PARANOID(!skb); |
584 | ||
585 | /* Set the SKB flags */ | |
da3bc071 | 586 | skb->ip_summed = CHECKSUM_NONE; |
8ceee660 BH |
587 | |
588 | /* Pass the packet up */ | |
589 | netif_receive_skb(skb); | |
590 | ||
591 | /* Update allocation strategy method */ | |
592 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
8ceee660 BH |
593 | } |
594 | ||
595 | void efx_rx_strategy(struct efx_channel *channel) | |
596 | { | |
597 | enum efx_rx_alloc_method method = rx_alloc_method; | |
598 | ||
599 | /* Only makes sense to use page based allocation if LRO is enabled */ | |
da3bc071 | 600 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
8ceee660 BH |
601 | method = RX_ALLOC_METHOD_SKB; |
602 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
603 | /* Constrain the rx_alloc_level */ | |
604 | if (channel->rx_alloc_level < 0) | |
605 | channel->rx_alloc_level = 0; | |
606 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
607 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
608 | ||
609 | /* Decide on the allocation method */ | |
610 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? | |
611 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | |
612 | } | |
613 | ||
614 | /* Push the option */ | |
615 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
616 | } | |
617 | ||
618 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
619 | { | |
620 | struct efx_nic *efx = rx_queue->efx; | |
621 | unsigned int rxq_size; | |
622 | int rc; | |
623 | ||
624 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | |
625 | ||
626 | /* Allocate RX buffers */ | |
3ffeabdd | 627 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
8ceee660 | 628 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); |
8831da7b BH |
629 | if (!rx_queue->buffer) |
630 | return -ENOMEM; | |
8ceee660 | 631 | |
152b6a62 | 632 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
633 | if (rc) { |
634 | kfree(rx_queue->buffer); | |
635 | rx_queue->buffer = NULL; | |
636 | } | |
8ceee660 BH |
637 | return rc; |
638 | } | |
639 | ||
bc3c90a2 | 640 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 641 | { |
8ceee660 BH |
642 | unsigned int max_fill, trigger, limit; |
643 | ||
644 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | |
645 | ||
646 | /* Initialise ptr fields */ | |
647 | rx_queue->added_count = 0; | |
648 | rx_queue->notified_count = 0; | |
649 | rx_queue->removed_count = 0; | |
650 | rx_queue->min_fill = -1U; | |
651 | rx_queue->min_overfill = -1U; | |
652 | ||
653 | /* Initialise limit fields */ | |
3ffeabdd | 654 | max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; |
8ceee660 BH |
655 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
656 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | |
657 | ||
658 | rx_queue->max_fill = max_fill; | |
659 | rx_queue->fast_fill_trigger = trigger; | |
660 | rx_queue->fast_fill_limit = limit; | |
661 | ||
662 | /* Set up RX descriptor ring */ | |
152b6a62 | 663 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
664 | } |
665 | ||
666 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
667 | { | |
668 | int i; | |
669 | struct efx_rx_buffer *rx_buf; | |
670 | ||
671 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | |
672 | ||
90d683af | 673 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 674 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
675 | |
676 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
677 | if (rx_queue->buffer) { | |
3ffeabdd | 678 | for (i = 0; i <= EFX_RXQ_MASK; i++) { |
8ceee660 BH |
679 | rx_buf = efx_rx_buffer(rx_queue, i); |
680 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
681 | } | |
682 | } | |
8ceee660 BH |
683 | } |
684 | ||
685 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
686 | { | |
687 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | |
688 | ||
152b6a62 | 689 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
690 | |
691 | kfree(rx_queue->buffer); | |
692 | rx_queue->buffer = NULL; | |
8ceee660 BH |
693 | } |
694 | ||
8ceee660 BH |
695 | |
696 | module_param(rx_alloc_method, int, 0644); | |
697 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
698 | ||
699 | module_param(rx_refill_threshold, uint, 0444); | |
700 | MODULE_PARM_DESC(rx_refill_threshold, | |
701 | "RX descriptor ring fast/slow fill threshold (%)"); | |
702 |