]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/virtio_net.c
drivers/net: Move && and || to end of previous line
[net-next-2.6.git] / drivers / net / virtio_net.c
CommitLineData
48925e37 1/* A network driver using virtio.
296f96fc
RR
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
a9ea3fc6 22#include <linux/ethtool.h>
296f96fc
RR
23#include <linux/module.h>
24#include <linux/virtio.h>
25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h>
e918085a 27#include <linux/if_vlan.h>
296f96fc 28
6c0cd7c0
DL
29static int napi_weight = 128;
30module_param(napi_weight, int, 0444);
31
34a48579
RR
32static int csum = 1, gso = 1;
33module_param(csum, bool, 0444);
34module_param(gso, bool, 0444);
35
296f96fc 36/* FIXME: MTU in config. */
e918085a 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 38#define GOOD_COPY_LEN 128
296f96fc 39
f565a7c2 40#define VIRTNET_SEND_COMMAND_SG_MAX 2
2a41f71d 41
296f96fc
RR
42struct virtnet_info
43{
44 struct virtio_device *vdev;
2a41f71d 45 struct virtqueue *rvq, *svq, *cvq;
296f96fc
RR
46 struct net_device *dev;
47 struct napi_struct napi;
9f4d26d0 48 unsigned int status;
296f96fc
RR
49
50 /* Number of input buffers, and max we've ever had. */
51 unsigned int num, max;
52
97402b96
HX
53 /* I like... big packets and I cannot lie! */
54 bool big_packets;
55
3f2c31d9
MM
56 /* Host will merge rx buffers for big packets (shake it! shake it!) */
57 bool mergeable_rx_bufs;
58
296f96fc
RR
59 /* Receive & send queues. */
60 struct sk_buff_head recv;
61 struct sk_buff_head send;
fb6813f4 62
3161e453
RR
63 /* Work struct for refilling if we run low on memory. */
64 struct delayed_work refill;
65
fb6813f4
RR
66 /* Chain pages by the private ptr. */
67 struct page *pages;
296f96fc
RR
68};
69
b3f24698
RR
70struct skb_vnet_hdr {
71 union {
72 struct virtio_net_hdr hdr;
73 struct virtio_net_hdr_mrg_rxbuf mhdr;
74 };
48925e37 75 unsigned int num_sg;
b3f24698
RR
76};
77
78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 79{
b3f24698 80 return (struct skb_vnet_hdr *)skb->cb;
296f96fc
RR
81}
82
fb6813f4
RR
83static void give_a_page(struct virtnet_info *vi, struct page *page)
84{
85 page->private = (unsigned long)vi->pages;
86 vi->pages = page;
87}
88
0a888fd1
MM
89static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90{
91 unsigned int i;
92
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
95 skb_shinfo(skb)->nr_frags = 0;
96 skb->data_len = 0;
97}
98
fb6813f4
RR
99static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100{
101 struct page *p = vi->pages;
102
103 if (p)
104 vi->pages = (struct page *)p->private;
105 else
106 p = alloc_page(gfp_mask);
107 return p;
108}
109
2cb9c6ba 110static void skb_xmit_done(struct virtqueue *svq)
296f96fc 111{
2cb9c6ba 112 struct virtnet_info *vi = svq->vdev->priv;
296f96fc 113
2cb9c6ba
RR
114 /* Suppress further interrupts. */
115 svq->vq_ops->disable_cb(svq);
11a3a154 116
363f1514 117 /* We were probably waiting for more output buffers. */
296f96fc 118 netif_wake_queue(vi->dev);
296f96fc
RR
119}
120
121static void receive_skb(struct net_device *dev, struct sk_buff *skb,
122 unsigned len)
123{
3f2c31d9 124 struct virtnet_info *vi = netdev_priv(dev);
b3f24698 125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
97402b96 126 int err;
3f2c31d9 127 int i;
296f96fc
RR
128
129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
130 pr_debug("%s: short packet %i\n", dev->name, len);
131 dev->stats.rx_length_errors++;
132 goto drop;
133 }
23cde76d 134
3f2c31d9 135 if (vi->mergeable_rx_bufs) {
3f2c31d9
MM
136 unsigned int copy;
137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
fb6813f4 138
3f2c31d9
MM
139 if (len > PAGE_SIZE)
140 len = PAGE_SIZE;
141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
142
b3f24698
RR
143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
144 p += sizeof(hdr->mhdr);
3f2c31d9
MM
145
146 copy = len;
147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149
150 memcpy(skb_put(skb, copy), p, copy);
151
152 len -= copy;
153
154 if (!len) {
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
b3f24698 159 sizeof(hdr->mhdr) + copy;
3f2c31d9
MM
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164
b3f24698 165 while (--hdr->mhdr.num_buffers) {
3f2c31d9
MM
166 struct sk_buff *nskb;
167
168 i = skb_shinfo(skb)->nr_frags;
169 if (i >= MAX_SKB_FRAGS) {
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
177 if (!nskb) {
178 pr_debug("%s: rx error: %d buffers missing\n",
b3f24698 179 dev->name, hdr->mhdr.num_buffers);
3f2c31d9
MM
180 dev->stats.rx_length_errors++;
181 goto drop;
182 }
183
184 __skb_unlink(nskb, &vi->recv);
185 vi->num--;
186
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
188 skb_shinfo(nskb)->nr_frags = 0;
189 kfree_skb(nskb);
190
191 if (len > PAGE_SIZE)
192 len = PAGE_SIZE;
193
194 skb_shinfo(skb)->frags[i].size = len;
195 skb_shinfo(skb)->nr_frags++;
196 skb->data_len += len;
197 skb->len += len;
198 }
199 } else {
b3f24698 200 len -= sizeof(hdr->hdr);
3f2c31d9
MM
201
202 if (len <= MAX_PACKET_LEN)
203 trim_pages(vi, skb);
204
205 err = pskb_trim(skb, len);
206 if (err) {
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
208 len, err);
209 dev->stats.rx_dropped++;
210 goto drop;
211 }
97402b96 212 }
3f2c31d9 213
97402b96 214 skb->truesize += skb->data_len;
296f96fc
RR
215 dev->stats.rx_bytes += skb->len;
216 dev->stats.rx_packets++;
217
b3f24698 218 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
296f96fc 219 pr_debug("Needs csum!\n");
b3f24698
RR
220 if (!skb_partial_csum_set(skb,
221 hdr->hdr.csum_start,
222 hdr->hdr.csum_offset))
296f96fc 223 goto frame_err;
296f96fc
RR
224 }
225
23cde76d
MM
226 skb->protocol = eth_type_trans(skb, dev);
227 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
228 ntohs(skb->protocol), skb->len, skb->pkt_type);
229
b3f24698 230 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
296f96fc 231 pr_debug("GSO!\n");
b3f24698 232 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
296f96fc
RR
233 case VIRTIO_NET_HDR_GSO_TCPV4:
234 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
235 break;
296f96fc
RR
236 case VIRTIO_NET_HDR_GSO_UDP:
237 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
238 break;
239 case VIRTIO_NET_HDR_GSO_TCPV6:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
241 break;
242 default:
243 if (net_ratelimit())
244 printk(KERN_WARNING "%s: bad gso type %u.\n",
b3f24698 245 dev->name, hdr->hdr.gso_type);
296f96fc
RR
246 goto frame_err;
247 }
248
b3f24698 249 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
34a48579
RR
250 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
251
b3f24698 252 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
296f96fc
RR
253 if (skb_shinfo(skb)->gso_size == 0) {
254 if (net_ratelimit())
255 printk(KERN_WARNING "%s: zero gso size.\n",
256 dev->name);
257 goto frame_err;
258 }
259
260 /* Header must be checked, and gso_segs computed. */
261 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
262 skb_shinfo(skb)->gso_segs = 0;
263 }
264
265 netif_receive_skb(skb);
266 return;
267
268frame_err:
269 dev->stats.rx_frame_errors++;
270drop:
271 dev_kfree_skb(skb);
272}
273
3161e453 274static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
296f96fc
RR
275{
276 struct sk_buff *skb;
05271685 277 struct scatterlist sg[2+MAX_SKB_FRAGS];
97402b96 278 int num, err, i;
3161e453 279 bool oom = false;
296f96fc 280
05271685 281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
0aea51c3 282 do {
b3f24698 283 struct skb_vnet_hdr *hdr;
3f2c31d9 284
89d71a66 285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
3161e453
RR
286 if (unlikely(!skb)) {
287 oom = true;
296f96fc 288 break;
3161e453 289 }
296f96fc
RR
290
291 skb_put(skb, MAX_PACKET_LEN);
3f2c31d9
MM
292
293 hdr = skb_vnet_hdr(skb);
b3f24698 294 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
97402b96
HX
295
296 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
3161e453 299 f->page = get_a_page(vi, gfp);
97402b96
HX
300 if (!f->page)
301 break;
302
303 f->page_offset = 0;
304 f->size = PAGE_SIZE;
305
306 skb->data_len += PAGE_SIZE;
307 skb->len += PAGE_SIZE;
308
309 skb_shinfo(skb)->nr_frags++;
310 }
311 }
312
296f96fc
RR
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
314 skb_queue_head(&vi->recv, skb);
315
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
3c1b27d5 317 if (err < 0) {
296f96fc 318 skb_unlink(skb, &vi->recv);
0a888fd1 319 trim_pages(vi, skb);
296f96fc
RR
320 kfree_skb(skb);
321 break;
322 }
323 vi->num++;
0aea51c3 324 } while (err >= num);
296f96fc
RR
325 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq);
3161e453 328 return !oom;
296f96fc
RR
329}
330
3161e453
RR
331/* Returns false if we couldn't fill entirely (OOM). */
332static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
3f2c31d9
MM
333{
334 struct sk_buff *skb;
335 struct scatterlist sg[1];
336 int err;
3161e453 337 bool oom = false;
3f2c31d9 338
3161e453
RR
339 if (!vi->mergeable_rx_bufs)
340 return try_fill_recv_maxbufs(vi, gfp);
3f2c31d9 341
0aea51c3 342 do {
3f2c31d9
MM
343 skb_frag_t *f;
344
89d71a66 345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
3161e453
RR
346 if (unlikely(!skb)) {
347 oom = true;
3f2c31d9 348 break;
3161e453 349 }
3f2c31d9 350
3f2c31d9 351 f = &skb_shinfo(skb)->frags[0];
3161e453 352 f->page = get_a_page(vi, gfp);
3f2c31d9 353 if (!f->page) {
3161e453 354 oom = true;
3f2c31d9
MM
355 kfree_skb(skb);
356 break;
357 }
358
359 f->page_offset = 0;
360 f->size = PAGE_SIZE;
361
362 skb_shinfo(skb)->nr_frags++;
363
364 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
365 skb_queue_head(&vi->recv, skb);
366
367 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
3c1b27d5 368 if (err < 0) {
3f2c31d9
MM
369 skb_unlink(skb, &vi->recv);
370 kfree_skb(skb);
371 break;
372 }
373 vi->num++;
0aea51c3 374 } while (err > 0);
3f2c31d9
MM
375 if (unlikely(vi->num > vi->max))
376 vi->max = vi->num;
377 vi->rvq->vq_ops->kick(vi->rvq);
3161e453 378 return !oom;
3f2c31d9
MM
379}
380
18445c4d 381static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
382{
383 struct virtnet_info *vi = rvq->vdev->priv;
18445c4d 384 /* Schedule NAPI, Suppress further interrupts if successful. */
288379f0 385 if (napi_schedule_prep(&vi->napi)) {
18445c4d 386 rvq->vq_ops->disable_cb(rvq);
288379f0 387 __napi_schedule(&vi->napi);
18445c4d 388 }
296f96fc
RR
389}
390
3161e453
RR
391static void refill_work(struct work_struct *work)
392{
393 struct virtnet_info *vi;
394 bool still_empty;
395
396 vi = container_of(work, struct virtnet_info, refill.work);
397 napi_disable(&vi->napi);
398 try_fill_recv(vi, GFP_KERNEL);
399 still_empty = (vi->num == 0);
400 napi_enable(&vi->napi);
401
402 /* In theory, this can happen: if we don't get any buffers in
403 * we will *never* try to fill again. */
404 if (still_empty)
405 schedule_delayed_work(&vi->refill, HZ/2);
406}
407
296f96fc
RR
408static int virtnet_poll(struct napi_struct *napi, int budget)
409{
410 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
411 struct sk_buff *skb = NULL;
412 unsigned int len, received = 0;
413
414again:
415 while (received < budget &&
416 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
417 __skb_unlink(skb, &vi->recv);
418 receive_skb(vi->dev, skb, len);
419 vi->num--;
420 received++;
421 }
422
3161e453
RR
423 if (vi->num < vi->max / 2) {
424 if (!try_fill_recv(vi, GFP_ATOMIC))
425 schedule_delayed_work(&vi->refill, 0);
426 }
296f96fc 427
8329d98e
RR
428 /* Out of packets? */
429 if (received < budget) {
288379f0 430 napi_complete(napi);
8e95a202
JP
431 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
432 napi_schedule_prep(napi)) {
4265f161 433 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 434 __napi_schedule(napi);
296f96fc 435 goto again;
4265f161 436 }
296f96fc
RR
437 }
438
439 return received;
440}
441
48925e37 442static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
296f96fc
RR
443{
444 struct sk_buff *skb;
48925e37 445 unsigned int len, tot_sgs = 0;
296f96fc
RR
446
447 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
448 pr_debug("Sent skb %p\n", skb);
449 __skb_unlink(skb, &vi->send);
655aa31f 450 vi->dev->stats.tx_bytes += skb->len;
296f96fc 451 vi->dev->stats.tx_packets++;
48925e37 452 tot_sgs += skb_vnet_hdr(skb)->num_sg;
ed79bab8 453 dev_kfree_skb_any(skb);
296f96fc 454 }
48925e37 455 return tot_sgs;
296f96fc
RR
456}
457
99ffc696 458static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
296f96fc 459{
05271685 460 struct scatterlist sg[2+MAX_SKB_FRAGS];
b3f24698 461 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
296f96fc 462 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
296f96fc 463
05271685 464 sg_init_table(sg, 2+MAX_SKB_FRAGS);
4d125de3 465
e174961c 466 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
296f96fc 467
296f96fc 468 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b3f24698
RR
469 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
470 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
471 hdr->hdr.csum_offset = skb->csum_offset;
296f96fc 472 } else {
b3f24698
RR
473 hdr->hdr.flags = 0;
474 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
296f96fc
RR
475 }
476
477 if (skb_is_gso(skb)) {
b3f24698
RR
478 hdr->hdr.hdr_len = skb_headlen(skb);
479 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
34a48579 480 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
b3f24698 481 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
296f96fc 482 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
b3f24698 483 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
296f96fc 484 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
b3f24698 485 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
296f96fc
RR
486 else
487 BUG();
34a48579 488 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
b3f24698 489 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
296f96fc 490 } else {
b3f24698
RR
491 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
492 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
296f96fc
RR
493 }
494
b3f24698 495 hdr->mhdr.num_buffers = 0;
3f2c31d9
MM
496
497 /* Encode metadata header at front. */
498 if (vi->mergeable_rx_bufs)
b3f24698 499 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
3f2c31d9 500 else
b3f24698 501 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
3f2c31d9 502
48925e37
RR
503 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
504 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
11a3a154
RR
505}
506
424efe9c 507static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
508{
509 struct virtnet_info *vi = netdev_priv(dev);
48925e37 510 int capacity;
2cb9c6ba
RR
511
512again:
513 /* Free up any pending old buffers before queueing new ones. */
514 free_old_xmit_skbs(vi);
99ffc696 515
03f191ba 516 /* Try to transmit */
48925e37
RR
517 capacity = xmit_skb(vi, skb);
518
519 /* This can happen with OOM and indirect buffers. */
520 if (unlikely(capacity < 0)) {
521 netif_stop_queue(dev);
522 dev_warn(&dev->dev, "Unexpected full queue\n");
523 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
524 vi->svq->vq_ops->disable_cb(vi->svq);
525 netif_start_queue(dev);
526 goto again;
527 }
528 return NETDEV_TX_BUSY;
296f96fc 529 }
48925e37 530 vi->svq->vq_ops->kick(vi->svq);
03f191ba
MT
531
532 /*
533 * Put new one in send queue. You'd expect we'd need this before
534 * xmit_skb calls add_buf(), since the callback can be triggered
535 * immediately after that. But since the callback just triggers
536 * another call back here, normal network xmit locking prevents the
537 * race.
538 */
539 __skb_queue_head(&vi->send, skb);
540
48925e37
RR
541 /* Don't wait up for transmitted skbs to be freed. */
542 skb_orphan(skb);
543 nf_reset(skb);
544
545 /* Apparently nice girls don't return TX_BUSY; stop the queue
546 * before it gets out of hand. Naturally, this wastes entries. */
547 if (capacity < 2+MAX_SKB_FRAGS) {
548 netif_stop_queue(dev);
549 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
550 /* More just got used, free them then recheck. */
551 capacity += free_old_xmit_skbs(vi);
552 if (capacity >= 2+MAX_SKB_FRAGS) {
553 netif_start_queue(dev);
554 vi->svq->vq_ops->disable_cb(vi->svq);
555 }
556 }
99ffc696 557 }
48925e37
RR
558
559 return NETDEV_TX_OK;
296f96fc
RR
560}
561
9c46f6d4
AW
562static int virtnet_set_mac_address(struct net_device *dev, void *p)
563{
564 struct virtnet_info *vi = netdev_priv(dev);
565 struct virtio_device *vdev = vi->vdev;
566 int ret;
567
568 ret = eth_mac_addr(dev, p);
569 if (ret)
570 return ret;
571
62994b2d
AW
572 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
573 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
574 dev->dev_addr, dev->addr_len);
9c46f6d4
AW
575
576 return 0;
577}
578
da74e89d
AS
579#ifdef CONFIG_NET_POLL_CONTROLLER
580static void virtnet_netpoll(struct net_device *dev)
581{
582 struct virtnet_info *vi = netdev_priv(dev);
583
584 napi_schedule(&vi->napi);
585}
586#endif
587
296f96fc
RR
588static int virtnet_open(struct net_device *dev)
589{
590 struct virtnet_info *vi = netdev_priv(dev);
591
296f96fc 592 napi_enable(&vi->napi);
a48bd8f6
RR
593
594 /* If all buffers were filled by other side before we napi_enabled, we
595 * won't get another interrupt, so process any outstanding packets
370076d9
CB
596 * now. virtnet_poll wants re-enable the queue, so we disable here.
597 * We synchronize against interrupts via NAPI_STATE_SCHED */
288379f0 598 if (napi_schedule_prep(&vi->napi)) {
370076d9 599 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 600 __napi_schedule(&vi->napi);
370076d9 601 }
296f96fc
RR
602 return 0;
603}
604
2a41f71d
AW
605/*
606 * Send command via the control virtqueue and check status. Commands
607 * supported by the hypervisor, as indicated by feature bits, should
608 * never fail unless improperly formated.
609 */
610static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
611 struct scatterlist *data, int out, int in)
612{
23e258e1 613 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
2a41f71d
AW
614 struct virtio_net_ctrl_hdr ctrl;
615 virtio_net_ctrl_ack status = ~0;
616 unsigned int tmp;
23e258e1 617 int i;
2a41f71d 618
0ee904c3
AB
619 /* Caller should know better */
620 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
621 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
2a41f71d
AW
622
623 out++; /* Add header */
624 in++; /* Add return status */
625
626 ctrl.class = class;
627 ctrl.cmd = cmd;
628
629 sg_init_table(sg, out + in);
630
631 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
23e258e1
AW
632 for_each_sg(data, s, out + in - 2, i)
633 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
2a41f71d
AW
634 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
635
3c1b27d5 636 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
2a41f71d
AW
637
638 vi->cvq->vq_ops->kick(vi->cvq);
639
640 /*
641 * Spin for a response, the kick causes an ioport write, trapping
642 * into the hypervisor, so the request should be handled immediately.
643 */
644 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
645 cpu_relax();
646
647 return status == VIRTIO_NET_OK;
648}
649
296f96fc
RR
650static int virtnet_close(struct net_device *dev)
651{
652 struct virtnet_info *vi = netdev_priv(dev);
296f96fc
RR
653
654 napi_disable(&vi->napi);
655
296f96fc
RR
656 return 0;
657}
658
a9ea3fc6
HX
659static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
660{
661 struct virtnet_info *vi = netdev_priv(dev);
662 struct virtio_device *vdev = vi->vdev;
663
664 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
665 return -ENOSYS;
666
667 return ethtool_op_set_tx_hw_csum(dev, data);
668}
669
2af7698e
AW
670static void virtnet_set_rx_mode(struct net_device *dev)
671{
672 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 673 struct scatterlist sg[2];
2af7698e 674 u8 promisc, allmulti;
f565a7c2
AW
675 struct virtio_net_ctrl_mac *mac_data;
676 struct dev_addr_list *addr;
ccffad25 677 struct netdev_hw_addr *ha;
f565a7c2
AW
678 void *buf;
679 int i;
2af7698e
AW
680
681 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
682 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
683 return;
684
f565a7c2
AW
685 promisc = ((dev->flags & IFF_PROMISC) != 0);
686 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 687
23e258e1 688 sg_init_one(sg, &promisc, sizeof(promisc));
2af7698e
AW
689
690 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
691 VIRTIO_NET_CTRL_RX_PROMISC,
f565a7c2 692 sg, 1, 0))
2af7698e
AW
693 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
694 promisc ? "en" : "dis");
695
23e258e1 696 sg_init_one(sg, &allmulti, sizeof(allmulti));
2af7698e
AW
697
698 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
699 VIRTIO_NET_CTRL_RX_ALLMULTI,
f565a7c2 700 sg, 1, 0))
2af7698e
AW
701 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
702 allmulti ? "en" : "dis");
f565a7c2
AW
703
704 /* MAC filter - use one buffer for both lists */
31278e71 705 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
f565a7c2
AW
706 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
707 if (!buf) {
708 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
709 return;
710 }
711
23e258e1
AW
712 sg_init_table(sg, 2);
713
f565a7c2 714 /* Store the unicast list and count in the front of the buffer */
31278e71 715 mac_data->entries = dev->uc.count;
ccffad25 716 i = 0;
31278e71 717 list_for_each_entry(ha, &dev->uc.list, list)
ccffad25 718 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
719
720 sg_set_buf(&sg[0], mac_data,
31278e71 721 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
f565a7c2
AW
722
723 /* multicast list and count fill the end */
31278e71 724 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
f565a7c2
AW
725
726 mac_data->entries = dev->mc_count;
727 addr = dev->mc_list;
728 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
729 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
730
731 sg_set_buf(&sg[1], mac_data,
732 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
733
734 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
735 VIRTIO_NET_CTRL_MAC_TABLE_SET,
736 sg, 2, 0))
737 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
738
739 kfree(buf);
2af7698e
AW
740}
741
1824a989 742static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
0bde9569
AW
743{
744 struct virtnet_info *vi = netdev_priv(dev);
745 struct scatterlist sg;
746
23e258e1 747 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
748
749 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
750 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
751 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
752}
753
1824a989 754static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
0bde9569
AW
755{
756 struct virtnet_info *vi = netdev_priv(dev);
757 struct scatterlist sg;
758
23e258e1 759 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
760
761 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
762 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
763 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
764}
765
0fc0b732 766static const struct ethtool_ops virtnet_ethtool_ops = {
a9ea3fc6
HX
767 .set_tx_csum = virtnet_set_tx_csum,
768 .set_sg = ethtool_op_set_sg,
0276b497 769 .set_tso = ethtool_op_set_tso,
5c516751 770 .set_ufo = ethtool_op_set_ufo,
9f4d26d0 771 .get_link = ethtool_op_get_link,
a9ea3fc6
HX
772};
773
39da5814
MM
774#define MIN_MTU 68
775#define MAX_MTU 65535
776
777static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
778{
779 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
780 return -EINVAL;
781 dev->mtu = new_mtu;
782 return 0;
783}
784
76288b4e
SH
785static const struct net_device_ops virtnet_netdev = {
786 .ndo_open = virtnet_open,
787 .ndo_stop = virtnet_close,
788 .ndo_start_xmit = start_xmit,
789 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 790 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 791 .ndo_set_rx_mode = virtnet_set_rx_mode,
76288b4e 792 .ndo_change_mtu = virtnet_change_mtu,
1824a989
AW
793 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
794 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
76288b4e
SH
795#ifdef CONFIG_NET_POLL_CONTROLLER
796 .ndo_poll_controller = virtnet_netpoll,
797#endif
798};
799
9f4d26d0
MM
800static void virtnet_update_status(struct virtnet_info *vi)
801{
802 u16 v;
803
804 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
805 return;
806
807 vi->vdev->config->get(vi->vdev,
808 offsetof(struct virtio_net_config, status),
809 &v, sizeof(v));
810
811 /* Ignore unknown (future) status bits */
812 v &= VIRTIO_NET_S_LINK_UP;
813
814 if (vi->status == v)
815 return;
816
817 vi->status = v;
818
819 if (vi->status & VIRTIO_NET_S_LINK_UP) {
820 netif_carrier_on(vi->dev);
821 netif_wake_queue(vi->dev);
822 } else {
823 netif_carrier_off(vi->dev);
824 netif_stop_queue(vi->dev);
825 }
826}
827
828static void virtnet_config_changed(struct virtio_device *vdev)
829{
830 struct virtnet_info *vi = vdev->priv;
831
832 virtnet_update_status(vi);
833}
834
296f96fc
RR
835static int virtnet_probe(struct virtio_device *vdev)
836{
837 int err;
296f96fc
RR
838 struct net_device *dev;
839 struct virtnet_info *vi;
d2a7ddda
MT
840 struct virtqueue *vqs[3];
841 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
842 const char *names[] = { "input", "output", "control" };
843 int nvqs;
296f96fc
RR
844
845 /* Allocate ourselves a network device with room for our info */
846 dev = alloc_etherdev(sizeof(struct virtnet_info));
847 if (!dev)
848 return -ENOMEM;
849
850 /* Set up network device as normal. */
76288b4e 851 dev->netdev_ops = &virtnet_netdev;
296f96fc 852 dev->features = NETIF_F_HIGHDMA;
a9ea3fc6 853 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
296f96fc
RR
854 SET_NETDEV_DEV(dev, &vdev->dev);
855
856 /* Do we support "hardware" checksums? */
c45a6816 857 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc
RR
858 /* This opens up the world of extra features. */
859 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
c45a6816 860 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
34a48579
RR
861 dev->features |= NETIF_F_TSO | NETIF_F_UFO
862 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
863 }
5539ae96 864 /* Individual feature bits: what can host handle? */
c45a6816 865 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
5539ae96 866 dev->features |= NETIF_F_TSO;
c45a6816 867 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
5539ae96 868 dev->features |= NETIF_F_TSO6;
c45a6816 869 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
5539ae96 870 dev->features |= NETIF_F_TSO_ECN;
c45a6816 871 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
5539ae96 872 dev->features |= NETIF_F_UFO;
296f96fc
RR
873 }
874
875 /* Configuration may specify what MAC to use. Otherwise random. */
c45a6816 876 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
a586d4f6
RR
877 vdev->config->get(vdev,
878 offsetof(struct virtio_net_config, mac),
879 dev->dev_addr, dev->addr_len);
62994b2d 880 } else
296f96fc
RR
881 random_ether_addr(dev->dev_addr);
882
883 /* Set up our device-specific information */
884 vi = netdev_priv(dev);
6c0cd7c0 885 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
296f96fc
RR
886 vi->dev = dev;
887 vi->vdev = vdev;
d9d5dcc8 888 vdev->priv = vi;
fb6813f4 889 vi->pages = NULL;
3161e453 890 INIT_DELAYED_WORK(&vi->refill, refill_work);
296f96fc 891
97402b96 892 /* If we can receive ANY GSO packets, we must allocate large ones. */
8e95a202
JP
893 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
894 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
895 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
97402b96
HX
896 vi->big_packets = true;
897
3f2c31d9
MM
898 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
899 vi->mergeable_rx_bufs = true;
900
d2a7ddda
MT
901 /* We expect two virtqueues, receive then send,
902 * and optionally control. */
903 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
904
905 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
906 if (err)
296f96fc 907 goto free;
296f96fc 908
d2a7ddda
MT
909 vi->rvq = vqs[0];
910 vi->svq = vqs[1];
296f96fc 911
2a41f71d 912 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
d2a7ddda 913 vi->cvq = vqs[2];
0bde9569
AW
914
915 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
916 dev->features |= NETIF_F_HW_VLAN_FILTER;
2a41f71d
AW
917 }
918
296f96fc
RR
919 /* Initialize our empty receive and send queues. */
920 skb_queue_head_init(&vi->recv);
921 skb_queue_head_init(&vi->send);
922
923 err = register_netdev(dev);
924 if (err) {
925 pr_debug("virtio_net: registering device failed\n");
d2a7ddda 926 goto free_vqs;
296f96fc 927 }
b3369c1f
RR
928
929 /* Last of all, set up some receive buffers. */
3161e453 930 try_fill_recv(vi, GFP_KERNEL);
b3369c1f
RR
931
932 /* If we didn't even get one input buffer, we're useless. */
933 if (vi->num == 0) {
934 err = -ENOMEM;
935 goto unregister;
936 }
937
9f4d26d0
MM
938 vi->status = VIRTIO_NET_S_LINK_UP;
939 virtnet_update_status(vi);
4783256e 940 netif_carrier_on(dev);
9f4d26d0 941
296f96fc 942 pr_debug("virtnet: registered device %s\n", dev->name);
296f96fc
RR
943 return 0;
944
b3369c1f
RR
945unregister:
946 unregister_netdev(dev);
3161e453 947 cancel_delayed_work_sync(&vi->refill);
d2a7ddda
MT
948free_vqs:
949 vdev->config->del_vqs(vdev);
296f96fc
RR
950free:
951 free_netdev(dev);
952 return err;
953}
954
3d1285be 955static void __devexit virtnet_remove(struct virtio_device *vdev)
296f96fc 956{
74b2553f 957 struct virtnet_info *vi = vdev->priv;
b3369c1f
RR
958 struct sk_buff *skb;
959
6e5aa7ef
RR
960 /* Stop all the virtqueues. */
961 vdev->config->reset(vdev);
962
b3369c1f 963 /* Free our skbs in send and recv queues, if any. */
b3369c1f
RR
964 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
965 kfree_skb(skb);
966 vi->num--;
967 }
288369cc 968 __skb_queue_purge(&vi->send);
b3369c1f
RR
969
970 BUG_ON(vi->num != 0);
74b2553f 971
74b2553f 972 unregister_netdev(vi->dev);
3161e453 973 cancel_delayed_work_sync(&vi->refill);
fb6813f4 974
d2a7ddda
MT
975 vdev->config->del_vqs(vi->vdev);
976
fb6813f4
RR
977 while (vi->pages)
978 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
979
74b2553f 980 free_netdev(vi->dev);
296f96fc
RR
981}
982
983static struct virtio_device_id id_table[] = {
984 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
985 { 0 },
986};
987
c45a6816 988static unsigned int features[] = {
5e4fe5c4
MM
989 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
990 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
c45a6816 991 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
97402b96 992 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
5c516751 993 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2a41f71d 994 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
0bde9569 995 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
c45a6816
RR
996};
997
22402529 998static struct virtio_driver virtio_net_driver = {
c45a6816
RR
999 .feature_table = features,
1000 .feature_table_size = ARRAY_SIZE(features),
296f96fc
RR
1001 .driver.name = KBUILD_MODNAME,
1002 .driver.owner = THIS_MODULE,
1003 .id_table = id_table,
1004 .probe = virtnet_probe,
1005 .remove = __devexit_p(virtnet_remove),
9f4d26d0 1006 .config_changed = virtnet_config_changed,
296f96fc
RR
1007};
1008
1009static int __init init(void)
1010{
22402529 1011 return register_virtio_driver(&virtio_net_driver);
296f96fc
RR
1012}
1013
1014static void __exit fini(void)
1015{
22402529 1016 unregister_virtio_driver(&virtio_net_driver);
296f96fc
RR
1017}
1018module_init(init);
1019module_exit(fini);
1020
1021MODULE_DEVICE_TABLE(virtio, id_table);
1022MODULE_DESCRIPTION("Virtio network driver");
1023MODULE_LICENSE("GPL");