]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/virtio_net.c
virtio-net: Allow UFO feature to be set and advertised.
[net-next-2.6.git] / drivers / net / virtio_net.c
CommitLineData
296f96fc
RR
1/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
a9ea3fc6 22#include <linux/ethtool.h>
296f96fc
RR
23#include <linux/module.h>
24#include <linux/virtio.h>
25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h>
e918085a 27#include <linux/if_vlan.h>
296f96fc 28
6c0cd7c0
DL
29static int napi_weight = 128;
30module_param(napi_weight, int, 0444);
31
34a48579
RR
32static int csum = 1, gso = 1;
33module_param(csum, bool, 0444);
34module_param(gso, bool, 0444);
35
296f96fc 36/* FIXME: MTU in config. */
e918085a 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 38#define GOOD_COPY_LEN 128
296f96fc 39
f565a7c2 40#define VIRTNET_SEND_COMMAND_SG_MAX 2
2a41f71d 41
296f96fc
RR
42struct virtnet_info
43{
44 struct virtio_device *vdev;
2a41f71d 45 struct virtqueue *rvq, *svq, *cvq;
296f96fc
RR
46 struct net_device *dev;
47 struct napi_struct napi;
9f4d26d0 48 unsigned int status;
296f96fc 49
99ffc696
RR
50 /* The skb we couldn't send because buffers were full. */
51 struct sk_buff *last_xmit_skb;
52
363f1514 53 /* If we need to free in a timer, this is it. */
14c998f0
MM
54 struct timer_list xmit_free_timer;
55
296f96fc
RR
56 /* Number of input buffers, and max we've ever had. */
57 unsigned int num, max;
58
11a3a154
RR
59 /* For cleaning up after transmission. */
60 struct tasklet_struct tasklet;
363f1514 61 bool free_in_tasklet;
11a3a154 62
97402b96
HX
63 /* I like... big packets and I cannot lie! */
64 bool big_packets;
65
3f2c31d9
MM
66 /* Host will merge rx buffers for big packets (shake it! shake it!) */
67 bool mergeable_rx_bufs;
68
296f96fc
RR
69 /* Receive & send queues. */
70 struct sk_buff_head recv;
71 struct sk_buff_head send;
fb6813f4
RR
72
73 /* Chain pages by the private ptr. */
74 struct page *pages;
296f96fc
RR
75};
76
3f2c31d9 77static inline void *skb_vnet_hdr(struct sk_buff *skb)
296f96fc
RR
78{
79 return (struct virtio_net_hdr *)skb->cb;
80}
81
fb6813f4
RR
82static void give_a_page(struct virtnet_info *vi, struct page *page)
83{
84 page->private = (unsigned long)vi->pages;
85 vi->pages = page;
86}
87
0a888fd1
MM
88static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
89{
90 unsigned int i;
91
92 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
93 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
94 skb_shinfo(skb)->nr_frags = 0;
95 skb->data_len = 0;
96}
97
fb6813f4
RR
98static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
99{
100 struct page *p = vi->pages;
101
102 if (p)
103 vi->pages = (struct page *)p->private;
104 else
105 p = alloc_page(gfp_mask);
106 return p;
107}
108
2cb9c6ba 109static void skb_xmit_done(struct virtqueue *svq)
296f96fc 110{
2cb9c6ba 111 struct virtnet_info *vi = svq->vdev->priv;
296f96fc 112
2cb9c6ba
RR
113 /* Suppress further interrupts. */
114 svq->vq_ops->disable_cb(svq);
11a3a154 115
363f1514 116 /* We were probably waiting for more output buffers. */
296f96fc 117 netif_wake_queue(vi->dev);
11a3a154
RR
118
119 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
120 * queued, start_xmit won't be called. */
121 tasklet_schedule(&vi->tasklet);
296f96fc
RR
122}
123
124static void receive_skb(struct net_device *dev, struct sk_buff *skb,
125 unsigned len)
126{
3f2c31d9 127 struct virtnet_info *vi = netdev_priv(dev);
296f96fc 128 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
97402b96 129 int err;
3f2c31d9 130 int i;
296f96fc
RR
131
132 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
133 pr_debug("%s: short packet %i\n", dev->name, len);
134 dev->stats.rx_length_errors++;
135 goto drop;
136 }
23cde76d 137
3f2c31d9
MM
138 if (vi->mergeable_rx_bufs) {
139 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
140 unsigned int copy;
141 char *p = page_address(skb_shinfo(skb)->frags[0].page);
fb6813f4 142
3f2c31d9
MM
143 if (len > PAGE_SIZE)
144 len = PAGE_SIZE;
145 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
146
147 memcpy(hdr, p, sizeof(*mhdr));
148 p += sizeof(*mhdr);
149
150 copy = len;
151 if (copy > skb_tailroom(skb))
152 copy = skb_tailroom(skb);
153
154 memcpy(skb_put(skb, copy), p, copy);
155
156 len -= copy;
157
158 if (!len) {
159 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
160 skb_shinfo(skb)->nr_frags--;
161 } else {
162 skb_shinfo(skb)->frags[0].page_offset +=
163 sizeof(*mhdr) + copy;
164 skb_shinfo(skb)->frags[0].size = len;
165 skb->data_len += len;
166 skb->len += len;
167 }
168
169 while (--mhdr->num_buffers) {
170 struct sk_buff *nskb;
171
172 i = skb_shinfo(skb)->nr_frags;
173 if (i >= MAX_SKB_FRAGS) {
174 pr_debug("%s: packet too long %d\n", dev->name,
175 len);
176 dev->stats.rx_length_errors++;
177 goto drop;
178 }
179
180 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
181 if (!nskb) {
182 pr_debug("%s: rx error: %d buffers missing\n",
183 dev->name, mhdr->num_buffers);
184 dev->stats.rx_length_errors++;
185 goto drop;
186 }
187
188 __skb_unlink(nskb, &vi->recv);
189 vi->num--;
190
191 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
192 skb_shinfo(nskb)->nr_frags = 0;
193 kfree_skb(nskb);
194
195 if (len > PAGE_SIZE)
196 len = PAGE_SIZE;
197
198 skb_shinfo(skb)->frags[i].size = len;
199 skb_shinfo(skb)->nr_frags++;
200 skb->data_len += len;
201 skb->len += len;
202 }
203 } else {
204 len -= sizeof(struct virtio_net_hdr);
205
206 if (len <= MAX_PACKET_LEN)
207 trim_pages(vi, skb);
208
209 err = pskb_trim(skb, len);
210 if (err) {
211 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
212 len, err);
213 dev->stats.rx_dropped++;
214 goto drop;
215 }
97402b96 216 }
3f2c31d9 217
97402b96 218 skb->truesize += skb->data_len;
296f96fc
RR
219 dev->stats.rx_bytes += skb->len;
220 dev->stats.rx_packets++;
221
222 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
223 pr_debug("Needs csum!\n");
f35d9d8a 224 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
296f96fc 225 goto frame_err;
296f96fc
RR
226 }
227
23cde76d
MM
228 skb->protocol = eth_type_trans(skb, dev);
229 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
230 ntohs(skb->protocol), skb->len, skb->pkt_type);
231
296f96fc
RR
232 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
233 pr_debug("GSO!\n");
34a48579 234 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
296f96fc
RR
235 case VIRTIO_NET_HDR_GSO_TCPV4:
236 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
237 break;
296f96fc
RR
238 case VIRTIO_NET_HDR_GSO_UDP:
239 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
240 break;
241 case VIRTIO_NET_HDR_GSO_TCPV6:
242 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
243 break;
244 default:
245 if (net_ratelimit())
246 printk(KERN_WARNING "%s: bad gso type %u.\n",
247 dev->name, hdr->gso_type);
248 goto frame_err;
249 }
250
34a48579
RR
251 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
252 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
253
296f96fc
RR
254 skb_shinfo(skb)->gso_size = hdr->gso_size;
255 if (skb_shinfo(skb)->gso_size == 0) {
256 if (net_ratelimit())
257 printk(KERN_WARNING "%s: zero gso size.\n",
258 dev->name);
259 goto frame_err;
260 }
261
262 /* Header must be checked, and gso_segs computed. */
263 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
264 skb_shinfo(skb)->gso_segs = 0;
265 }
266
267 netif_receive_skb(skb);
268 return;
269
270frame_err:
271 dev->stats.rx_frame_errors++;
272drop:
273 dev_kfree_skb(skb);
274}
275
3f2c31d9 276static void try_fill_recv_maxbufs(struct virtnet_info *vi)
296f96fc
RR
277{
278 struct sk_buff *skb;
05271685 279 struct scatterlist sg[2+MAX_SKB_FRAGS];
97402b96 280 int num, err, i;
296f96fc 281
05271685 282 sg_init_table(sg, 2+MAX_SKB_FRAGS);
296f96fc 283 for (;;) {
3f2c31d9
MM
284 struct virtio_net_hdr *hdr;
285
8981f010 286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
296f96fc
RR
287 if (unlikely(!skb))
288 break;
289
8981f010 290 skb_reserve(skb, NET_IP_ALIGN);
296f96fc 291 skb_put(skb, MAX_PACKET_LEN);
3f2c31d9
MM
292
293 hdr = skb_vnet_hdr(skb);
8527bec5 294 sg_set_buf(sg, hdr, sizeof(*hdr));
97402b96
HX
295
296 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
fb6813f4 299 f->page = get_a_page(vi, GFP_ATOMIC);
97402b96
HX
300 if (!f->page)
301 break;
302
303 f->page_offset = 0;
304 f->size = PAGE_SIZE;
305
306 skb->data_len += PAGE_SIZE;
307 skb->len += PAGE_SIZE;
308
309 skb_shinfo(skb)->nr_frags++;
310 }
311 }
312
296f96fc
RR
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
314 skb_queue_head(&vi->recv, skb);
315
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
317 if (err) {
318 skb_unlink(skb, &vi->recv);
0a888fd1 319 trim_pages(vi, skb);
296f96fc
RR
320 kfree_skb(skb);
321 break;
322 }
323 vi->num++;
324 }
325 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq);
328}
329
3f2c31d9
MM
330static void try_fill_recv(struct virtnet_info *vi)
331{
332 struct sk_buff *skb;
333 struct scatterlist sg[1];
334 int err;
335
336 if (!vi->mergeable_rx_bufs) {
337 try_fill_recv_maxbufs(vi);
338 return;
339 }
340
341 for (;;) {
342 skb_frag_t *f;
343
344 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
345 if (unlikely(!skb))
346 break;
347
348 skb_reserve(skb, NET_IP_ALIGN);
349
350 f = &skb_shinfo(skb)->frags[0];
351 f->page = get_a_page(vi, GFP_ATOMIC);
352 if (!f->page) {
353 kfree_skb(skb);
354 break;
355 }
356
357 f->page_offset = 0;
358 f->size = PAGE_SIZE;
359
360 skb_shinfo(skb)->nr_frags++;
361
362 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
363 skb_queue_head(&vi->recv, skb);
364
365 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
366 if (err) {
367 skb_unlink(skb, &vi->recv);
368 kfree_skb(skb);
369 break;
370 }
371 vi->num++;
372 }
373 if (unlikely(vi->num > vi->max))
374 vi->max = vi->num;
375 vi->rvq->vq_ops->kick(vi->rvq);
376}
377
18445c4d 378static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
379{
380 struct virtnet_info *vi = rvq->vdev->priv;
18445c4d 381 /* Schedule NAPI, Suppress further interrupts if successful. */
288379f0 382 if (napi_schedule_prep(&vi->napi)) {
18445c4d 383 rvq->vq_ops->disable_cb(rvq);
288379f0 384 __napi_schedule(&vi->napi);
18445c4d 385 }
296f96fc
RR
386}
387
388static int virtnet_poll(struct napi_struct *napi, int budget)
389{
390 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
391 struct sk_buff *skb = NULL;
392 unsigned int len, received = 0;
393
394again:
395 while (received < budget &&
396 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
397 __skb_unlink(skb, &vi->recv);
398 receive_skb(vi->dev, skb, len);
399 vi->num--;
400 received++;
401 }
402
403 /* FIXME: If we oom and completely run out of inbufs, we need
404 * to start a timer trying to fill more. */
405 if (vi->num < vi->max / 2)
406 try_fill_recv(vi);
407
8329d98e
RR
408 /* Out of packets? */
409 if (received < budget) {
288379f0 410 napi_complete(napi);
18445c4d 411 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
4265f161
CB
412 && napi_schedule_prep(napi)) {
413 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 414 __napi_schedule(napi);
296f96fc 415 goto again;
4265f161 416 }
296f96fc
RR
417 }
418
419 return received;
420}
421
422static void free_old_xmit_skbs(struct virtnet_info *vi)
423{
424 struct sk_buff *skb;
425 unsigned int len;
426
427 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
428 pr_debug("Sent skb %p\n", skb);
429 __skb_unlink(skb, &vi->send);
655aa31f 430 vi->dev->stats.tx_bytes += skb->len;
296f96fc
RR
431 vi->dev->stats.tx_packets++;
432 kfree_skb(skb);
433 }
434}
435
363f1514
RR
436/* If the virtio transport doesn't always notify us when all in-flight packets
437 * are consumed, we fall back to using this function on a timer to free them. */
14c998f0
MM
438static void xmit_free(unsigned long data)
439{
440 struct virtnet_info *vi = (void *)data;
441
442 netif_tx_lock(vi->dev);
443
444 free_old_xmit_skbs(vi);
445
446 if (!skb_queue_empty(&vi->send))
447 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
448
449 netif_tx_unlock(vi->dev);
450}
451
99ffc696 452static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
296f96fc 453{
14c998f0 454 int num, err;
05271685 455 struct scatterlist sg[2+MAX_SKB_FRAGS];
3f2c31d9
MM
456 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
457 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
296f96fc 458 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
296f96fc 459
05271685 460 sg_init_table(sg, 2+MAX_SKB_FRAGS);
4d125de3 461
e174961c 462 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
296f96fc 463
296f96fc
RR
464 if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
466 hdr->csum_start = skb->csum_start - skb_headroom(skb);
467 hdr->csum_offset = skb->csum_offset;
468 } else {
469 hdr->flags = 0;
470 hdr->csum_offset = hdr->csum_start = 0;
471 }
472
473 if (skb_is_gso(skb)) {
b82f08ea 474 hdr->hdr_len = skb_headlen(skb);
296f96fc 475 hdr->gso_size = skb_shinfo(skb)->gso_size;
34a48579 476 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
296f96fc
RR
477 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
478 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
479 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
480 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
481 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
482 else
483 BUG();
34a48579
RR
484 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
485 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
296f96fc
RR
486 } else {
487 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
50c8ea80 488 hdr->gso_size = hdr->hdr_len = 0;
296f96fc
RR
489 }
490
3f2c31d9
MM
491 mhdr->num_buffers = 0;
492
493 /* Encode metadata header at front. */
494 if (vi->mergeable_rx_bufs)
8527bec5 495 sg_set_buf(sg, mhdr, sizeof(*mhdr));
3f2c31d9 496 else
8527bec5 497 sg_set_buf(sg, hdr, sizeof(*hdr));
3f2c31d9 498
296f96fc 499 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
99ffc696 500
14c998f0 501 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
363f1514 502 if (!err && !vi->free_in_tasklet)
14c998f0
MM
503 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
504
505 return err;
99ffc696
RR
506}
507
11a3a154
RR
508static void xmit_tasklet(unsigned long data)
509{
510 struct virtnet_info *vi = (void *)data;
511
512 netif_tx_lock_bh(vi->dev);
513 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
514 vi->svq->vq_ops->kick(vi->svq);
515 vi->last_xmit_skb = NULL;
516 }
363f1514
RR
517 if (vi->free_in_tasklet)
518 free_old_xmit_skbs(vi);
11a3a154
RR
519 netif_tx_unlock_bh(vi->dev);
520}
521
99ffc696
RR
522static int start_xmit(struct sk_buff *skb, struct net_device *dev)
523{
524 struct virtnet_info *vi = netdev_priv(dev);
2cb9c6ba
RR
525
526again:
527 /* Free up any pending old buffers before queueing new ones. */
528 free_old_xmit_skbs(vi);
99ffc696
RR
529
530 /* If we has a buffer left over from last time, send it now. */
9953ca6c
MM
531 if (unlikely(vi->last_xmit_skb) &&
532 xmit_skb(vi, vi->last_xmit_skb) != 0)
533 goto stop_queue;
534
535 vi->last_xmit_skb = NULL;
2cb9c6ba 536
99ffc696 537 /* Put new one in send queue and do transmit */
7eb2e251
RR
538 if (likely(skb)) {
539 __skb_queue_head(&vi->send, skb);
540 if (xmit_skb(vi, skb) != 0) {
541 vi->last_xmit_skb = skb;
542 skb = NULL;
543 goto stop_queue;
544 }
296f96fc 545 }
99ffc696 546done:
296f96fc 547 vi->svq->vq_ops->kick(vi->svq);
99ffc696
RR
548 return NETDEV_TX_OK;
549
550stop_queue:
551 pr_debug("%s: virtio not prepared to send\n", dev->name);
552 netif_stop_queue(dev);
553
554 /* Activate callback for using skbs: if this returns false it
555 * means some were used in the meantime. */
556 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
557 vi->svq->vq_ops->disable_cb(vi->svq);
558 netif_start_queue(dev);
559 goto again;
560 }
9953ca6c
MM
561 if (skb) {
562 /* Drop this skb: we only queue one. */
563 vi->dev->stats.tx_dropped++;
564 kfree_skb(skb);
565 }
99ffc696 566 goto done;
296f96fc
RR
567}
568
9c46f6d4
AW
569static int virtnet_set_mac_address(struct net_device *dev, void *p)
570{
571 struct virtnet_info *vi = netdev_priv(dev);
572 struct virtio_device *vdev = vi->vdev;
573 int ret;
574
575 ret = eth_mac_addr(dev, p);
576 if (ret)
577 return ret;
578
62994b2d
AW
579 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
580 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
581 dev->dev_addr, dev->addr_len);
9c46f6d4
AW
582
583 return 0;
584}
585
da74e89d
AS
586#ifdef CONFIG_NET_POLL_CONTROLLER
587static void virtnet_netpoll(struct net_device *dev)
588{
589 struct virtnet_info *vi = netdev_priv(dev);
590
591 napi_schedule(&vi->napi);
592}
593#endif
594
296f96fc
RR
595static int virtnet_open(struct net_device *dev)
596{
597 struct virtnet_info *vi = netdev_priv(dev);
598
296f96fc 599 napi_enable(&vi->napi);
a48bd8f6
RR
600
601 /* If all buffers were filled by other side before we napi_enabled, we
602 * won't get another interrupt, so process any outstanding packets
370076d9
CB
603 * now. virtnet_poll wants re-enable the queue, so we disable here.
604 * We synchronize against interrupts via NAPI_STATE_SCHED */
288379f0 605 if (napi_schedule_prep(&vi->napi)) {
370076d9 606 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 607 __napi_schedule(&vi->napi);
370076d9 608 }
296f96fc
RR
609 return 0;
610}
611
2a41f71d
AW
612/*
613 * Send command via the control virtqueue and check status. Commands
614 * supported by the hypervisor, as indicated by feature bits, should
615 * never fail unless improperly formated.
616 */
617static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
618 struct scatterlist *data, int out, int in)
619{
23e258e1 620 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
2a41f71d
AW
621 struct virtio_net_ctrl_hdr ctrl;
622 virtio_net_ctrl_ack status = ~0;
623 unsigned int tmp;
23e258e1 624 int i;
2a41f71d 625
0ee904c3
AB
626 /* Caller should know better */
627 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
628 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
2a41f71d
AW
629
630 out++; /* Add header */
631 in++; /* Add return status */
632
633 ctrl.class = class;
634 ctrl.cmd = cmd;
635
636 sg_init_table(sg, out + in);
637
638 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
23e258e1
AW
639 for_each_sg(data, s, out + in - 2, i)
640 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
2a41f71d
AW
641 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
642
0ee904c3 643 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi));
2a41f71d
AW
644
645 vi->cvq->vq_ops->kick(vi->cvq);
646
647 /*
648 * Spin for a response, the kick causes an ioport write, trapping
649 * into the hypervisor, so the request should be handled immediately.
650 */
651 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
652 cpu_relax();
653
654 return status == VIRTIO_NET_OK;
655}
656
296f96fc
RR
657static int virtnet_close(struct net_device *dev)
658{
659 struct virtnet_info *vi = netdev_priv(dev);
296f96fc
RR
660
661 napi_disable(&vi->napi);
662
296f96fc
RR
663 return 0;
664}
665
a9ea3fc6
HX
666static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
667{
668 struct virtnet_info *vi = netdev_priv(dev);
669 struct virtio_device *vdev = vi->vdev;
670
671 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
672 return -ENOSYS;
673
674 return ethtool_op_set_tx_hw_csum(dev, data);
675}
676
2af7698e
AW
677static void virtnet_set_rx_mode(struct net_device *dev)
678{
679 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 680 struct scatterlist sg[2];
2af7698e 681 u8 promisc, allmulti;
f565a7c2
AW
682 struct virtio_net_ctrl_mac *mac_data;
683 struct dev_addr_list *addr;
ccffad25 684 struct netdev_hw_addr *ha;
f565a7c2
AW
685 void *buf;
686 int i;
2af7698e
AW
687
688 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
689 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
690 return;
691
f565a7c2
AW
692 promisc = ((dev->flags & IFF_PROMISC) != 0);
693 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 694
23e258e1 695 sg_init_one(sg, &promisc, sizeof(promisc));
2af7698e
AW
696
697 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
698 VIRTIO_NET_CTRL_RX_PROMISC,
f565a7c2 699 sg, 1, 0))
2af7698e
AW
700 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
701 promisc ? "en" : "dis");
702
23e258e1 703 sg_init_one(sg, &allmulti, sizeof(allmulti));
2af7698e
AW
704
705 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
706 VIRTIO_NET_CTRL_RX_ALLMULTI,
f565a7c2 707 sg, 1, 0))
2af7698e
AW
708 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
709 allmulti ? "en" : "dis");
f565a7c2
AW
710
711 /* MAC filter - use one buffer for both lists */
31278e71 712 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
f565a7c2
AW
713 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
714 if (!buf) {
715 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
716 return;
717 }
718
23e258e1
AW
719 sg_init_table(sg, 2);
720
f565a7c2 721 /* Store the unicast list and count in the front of the buffer */
31278e71 722 mac_data->entries = dev->uc.count;
ccffad25 723 i = 0;
31278e71 724 list_for_each_entry(ha, &dev->uc.list, list)
ccffad25 725 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
726
727 sg_set_buf(&sg[0], mac_data,
31278e71 728 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
f565a7c2
AW
729
730 /* multicast list and count fill the end */
31278e71 731 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
f565a7c2
AW
732
733 mac_data->entries = dev->mc_count;
734 addr = dev->mc_list;
735 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
736 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
737
738 sg_set_buf(&sg[1], mac_data,
739 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
740
741 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
742 VIRTIO_NET_CTRL_MAC_TABLE_SET,
743 sg, 2, 0))
744 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
745
746 kfree(buf);
2af7698e
AW
747}
748
1824a989 749static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
0bde9569
AW
750{
751 struct virtnet_info *vi = netdev_priv(dev);
752 struct scatterlist sg;
753
23e258e1 754 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
755
756 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
757 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
758 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
759}
760
1824a989 761static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
0bde9569
AW
762{
763 struct virtnet_info *vi = netdev_priv(dev);
764 struct scatterlist sg;
765
23e258e1 766 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
767
768 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
769 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
770 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
771}
772
a9ea3fc6
HX
773static struct ethtool_ops virtnet_ethtool_ops = {
774 .set_tx_csum = virtnet_set_tx_csum,
775 .set_sg = ethtool_op_set_sg,
0276b497 776 .set_tso = ethtool_op_set_tso,
5c516751 777 .set_ufo = ethtool_op_set_ufo,
9f4d26d0 778 .get_link = ethtool_op_get_link,
a9ea3fc6
HX
779};
780
39da5814
MM
781#define MIN_MTU 68
782#define MAX_MTU 65535
783
784static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
785{
786 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
787 return -EINVAL;
788 dev->mtu = new_mtu;
789 return 0;
790}
791
76288b4e
SH
792static const struct net_device_ops virtnet_netdev = {
793 .ndo_open = virtnet_open,
794 .ndo_stop = virtnet_close,
795 .ndo_start_xmit = start_xmit,
796 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 797 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 798 .ndo_set_rx_mode = virtnet_set_rx_mode,
76288b4e 799 .ndo_change_mtu = virtnet_change_mtu,
1824a989
AW
800 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
801 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
76288b4e
SH
802#ifdef CONFIG_NET_POLL_CONTROLLER
803 .ndo_poll_controller = virtnet_netpoll,
804#endif
805};
806
9f4d26d0
MM
807static void virtnet_update_status(struct virtnet_info *vi)
808{
809 u16 v;
810
811 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
812 return;
813
814 vi->vdev->config->get(vi->vdev,
815 offsetof(struct virtio_net_config, status),
816 &v, sizeof(v));
817
818 /* Ignore unknown (future) status bits */
819 v &= VIRTIO_NET_S_LINK_UP;
820
821 if (vi->status == v)
822 return;
823
824 vi->status = v;
825
826 if (vi->status & VIRTIO_NET_S_LINK_UP) {
827 netif_carrier_on(vi->dev);
828 netif_wake_queue(vi->dev);
829 } else {
830 netif_carrier_off(vi->dev);
831 netif_stop_queue(vi->dev);
832 }
833}
834
835static void virtnet_config_changed(struct virtio_device *vdev)
836{
837 struct virtnet_info *vi = vdev->priv;
838
839 virtnet_update_status(vi);
840}
841
296f96fc
RR
842static int virtnet_probe(struct virtio_device *vdev)
843{
844 int err;
296f96fc
RR
845 struct net_device *dev;
846 struct virtnet_info *vi;
d2a7ddda
MT
847 struct virtqueue *vqs[3];
848 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
849 const char *names[] = { "input", "output", "control" };
850 int nvqs;
296f96fc
RR
851
852 /* Allocate ourselves a network device with room for our info */
853 dev = alloc_etherdev(sizeof(struct virtnet_info));
854 if (!dev)
855 return -ENOMEM;
856
857 /* Set up network device as normal. */
76288b4e 858 dev->netdev_ops = &virtnet_netdev;
296f96fc 859 dev->features = NETIF_F_HIGHDMA;
a9ea3fc6 860 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
296f96fc
RR
861 SET_NETDEV_DEV(dev, &vdev->dev);
862
863 /* Do we support "hardware" checksums? */
c45a6816 864 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc
RR
865 /* This opens up the world of extra features. */
866 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
c45a6816 867 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
34a48579
RR
868 dev->features |= NETIF_F_TSO | NETIF_F_UFO
869 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
870 }
5539ae96 871 /* Individual feature bits: what can host handle? */
c45a6816 872 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
5539ae96 873 dev->features |= NETIF_F_TSO;
c45a6816 874 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
5539ae96 875 dev->features |= NETIF_F_TSO6;
c45a6816 876 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
5539ae96 877 dev->features |= NETIF_F_TSO_ECN;
c45a6816 878 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
5539ae96 879 dev->features |= NETIF_F_UFO;
296f96fc
RR
880 }
881
882 /* Configuration may specify what MAC to use. Otherwise random. */
c45a6816 883 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
a586d4f6
RR
884 vdev->config->get(vdev,
885 offsetof(struct virtio_net_config, mac),
886 dev->dev_addr, dev->addr_len);
62994b2d 887 } else
296f96fc
RR
888 random_ether_addr(dev->dev_addr);
889
890 /* Set up our device-specific information */
891 vi = netdev_priv(dev);
6c0cd7c0 892 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
296f96fc
RR
893 vi->dev = dev;
894 vi->vdev = vdev;
d9d5dcc8 895 vdev->priv = vi;
fb6813f4 896 vi->pages = NULL;
296f96fc 897
363f1514
RR
898 /* If they give us a callback when all buffers are done, we don't need
899 * the timer. */
900 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
901
97402b96
HX
902 /* If we can receive ANY GSO packets, we must allocate large ones. */
903 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
904 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
905 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
906 vi->big_packets = true;
907
3f2c31d9
MM
908 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
909 vi->mergeable_rx_bufs = true;
910
d2a7ddda
MT
911 /* We expect two virtqueues, receive then send,
912 * and optionally control. */
913 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
914
915 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
916 if (err)
296f96fc 917 goto free;
296f96fc 918
d2a7ddda
MT
919 vi->rvq = vqs[0];
920 vi->svq = vqs[1];
296f96fc 921
2a41f71d 922 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
d2a7ddda 923 vi->cvq = vqs[2];
0bde9569
AW
924
925 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
926 dev->features |= NETIF_F_HW_VLAN_FILTER;
2a41f71d
AW
927 }
928
296f96fc
RR
929 /* Initialize our empty receive and send queues. */
930 skb_queue_head_init(&vi->recv);
931 skb_queue_head_init(&vi->send);
932
11a3a154
RR
933 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
934
363f1514
RR
935 if (!vi->free_in_tasklet)
936 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
14c998f0 937
296f96fc
RR
938 err = register_netdev(dev);
939 if (err) {
940 pr_debug("virtio_net: registering device failed\n");
d2a7ddda 941 goto free_vqs;
296f96fc 942 }
b3369c1f
RR
943
944 /* Last of all, set up some receive buffers. */
945 try_fill_recv(vi);
946
947 /* If we didn't even get one input buffer, we're useless. */
948 if (vi->num == 0) {
949 err = -ENOMEM;
950 goto unregister;
951 }
952
9f4d26d0
MM
953 vi->status = VIRTIO_NET_S_LINK_UP;
954 virtnet_update_status(vi);
4783256e 955 netif_carrier_on(dev);
9f4d26d0 956
296f96fc 957 pr_debug("virtnet: registered device %s\n", dev->name);
296f96fc
RR
958 return 0;
959
b3369c1f
RR
960unregister:
961 unregister_netdev(dev);
d2a7ddda
MT
962free_vqs:
963 vdev->config->del_vqs(vdev);
296f96fc
RR
964free:
965 free_netdev(dev);
966 return err;
967}
968
969static void virtnet_remove(struct virtio_device *vdev)
970{
74b2553f 971 struct virtnet_info *vi = vdev->priv;
b3369c1f
RR
972 struct sk_buff *skb;
973
6e5aa7ef
RR
974 /* Stop all the virtqueues. */
975 vdev->config->reset(vdev);
976
363f1514
RR
977 if (!vi->free_in_tasklet)
978 del_timer_sync(&vi->xmit_free_timer);
14c998f0 979
b3369c1f 980 /* Free our skbs in send and recv queues, if any. */
b3369c1f
RR
981 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
982 kfree_skb(skb);
983 vi->num--;
984 }
288369cc 985 __skb_queue_purge(&vi->send);
b3369c1f
RR
986
987 BUG_ON(vi->num != 0);
74b2553f 988
74b2553f 989 unregister_netdev(vi->dev);
fb6813f4 990
d2a7ddda
MT
991 vdev->config->del_vqs(vi->vdev);
992
fb6813f4
RR
993 while (vi->pages)
994 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
995
74b2553f 996 free_netdev(vi->dev);
296f96fc
RR
997}
998
999static struct virtio_device_id id_table[] = {
1000 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1001 { 0 },
1002};
1003
c45a6816 1004static unsigned int features[] = {
5e4fe5c4
MM
1005 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1006 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
c45a6816 1007 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
97402b96 1008 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
5c516751 1009 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2a41f71d 1010 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
0bde9569 1011 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
97402b96 1012 VIRTIO_F_NOTIFY_ON_EMPTY,
c45a6816
RR
1013};
1014
296f96fc 1015static struct virtio_driver virtio_net = {
c45a6816
RR
1016 .feature_table = features,
1017 .feature_table_size = ARRAY_SIZE(features),
296f96fc
RR
1018 .driver.name = KBUILD_MODNAME,
1019 .driver.owner = THIS_MODULE,
1020 .id_table = id_table,
1021 .probe = virtnet_probe,
1022 .remove = __devexit_p(virtnet_remove),
9f4d26d0 1023 .config_changed = virtnet_config_changed,
296f96fc
RR
1024};
1025
1026static int __init init(void)
1027{
1028 return register_virtio_driver(&virtio_net);
1029}
1030
1031static void __exit fini(void)
1032{
1033 unregister_virtio_driver(&virtio_net);
1034}
1035module_init(init);
1036module_exit(fini);
1037
1038MODULE_DEVICE_TABLE(virtio, id_table);
1039MODULE_DESCRIPTION("Virtio network driver");
1040MODULE_LICENSE("GPL");