]>
Commit | Line | Data |
---|---|---|
296f96fc RR |
1 | /* A simple network driver using virtio. |
2 | * | |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | //#define DEBUG | |
20 | #include <linux/netdevice.h> | |
21 | #include <linux/etherdevice.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/virtio.h> | |
24 | #include <linux/virtio_net.h> | |
25 | #include <linux/scatterlist.h> | |
26 | ||
27 | /* FIXME: MTU in config. */ | |
28 | #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) | |
29 | ||
30 | struct virtnet_info | |
31 | { | |
32 | struct virtio_device *vdev; | |
33 | struct virtqueue *rvq, *svq; | |
34 | struct net_device *dev; | |
35 | struct napi_struct napi; | |
36 | ||
37 | /* Number of input buffers, and max we've ever had. */ | |
38 | unsigned int num, max; | |
39 | ||
40 | /* Receive & send queues. */ | |
41 | struct sk_buff_head recv; | |
42 | struct sk_buff_head send; | |
43 | }; | |
44 | ||
45 | static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) | |
46 | { | |
47 | return (struct virtio_net_hdr *)skb->cb; | |
48 | } | |
49 | ||
50 | static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) | |
51 | { | |
52 | sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); | |
53 | } | |
54 | ||
55 | static bool skb_xmit_done(struct virtqueue *rvq) | |
56 | { | |
57 | struct virtnet_info *vi = rvq->vdev->priv; | |
58 | ||
59 | /* In case we were waiting for output buffers. */ | |
60 | netif_wake_queue(vi->dev); | |
61 | return true; | |
62 | } | |
63 | ||
64 | static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |
65 | unsigned len) | |
66 | { | |
67 | struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); | |
68 | ||
69 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | |
70 | pr_debug("%s: short packet %i\n", dev->name, len); | |
71 | dev->stats.rx_length_errors++; | |
72 | goto drop; | |
73 | } | |
74 | len -= sizeof(struct virtio_net_hdr); | |
75 | BUG_ON(len > MAX_PACKET_LEN); | |
76 | ||
77 | skb_trim(skb, len); | |
78 | skb->protocol = eth_type_trans(skb, dev); | |
79 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
80 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
81 | dev->stats.rx_bytes += skb->len; | |
82 | dev->stats.rx_packets++; | |
83 | ||
84 | if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
85 | pr_debug("Needs csum!\n"); | |
f35d9d8a | 86 | if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) |
296f96fc | 87 | goto frame_err; |
296f96fc RR |
88 | } |
89 | ||
90 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
91 | pr_debug("GSO!\n"); | |
92 | switch (hdr->gso_type) { | |
93 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
94 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
95 | break; | |
96 | case VIRTIO_NET_HDR_GSO_TCPV4_ECN: | |
97 | skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN; | |
98 | break; | |
99 | case VIRTIO_NET_HDR_GSO_UDP: | |
100 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | |
101 | break; | |
102 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
103 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
104 | break; | |
105 | default: | |
106 | if (net_ratelimit()) | |
107 | printk(KERN_WARNING "%s: bad gso type %u.\n", | |
108 | dev->name, hdr->gso_type); | |
109 | goto frame_err; | |
110 | } | |
111 | ||
112 | skb_shinfo(skb)->gso_size = hdr->gso_size; | |
113 | if (skb_shinfo(skb)->gso_size == 0) { | |
114 | if (net_ratelimit()) | |
115 | printk(KERN_WARNING "%s: zero gso size.\n", | |
116 | dev->name); | |
117 | goto frame_err; | |
118 | } | |
119 | ||
120 | /* Header must be checked, and gso_segs computed. */ | |
121 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
122 | skb_shinfo(skb)->gso_segs = 0; | |
123 | } | |
124 | ||
125 | netif_receive_skb(skb); | |
126 | return; | |
127 | ||
128 | frame_err: | |
129 | dev->stats.rx_frame_errors++; | |
130 | drop: | |
131 | dev_kfree_skb(skb); | |
132 | } | |
133 | ||
134 | static void try_fill_recv(struct virtnet_info *vi) | |
135 | { | |
136 | struct sk_buff *skb; | |
137 | struct scatterlist sg[1+MAX_SKB_FRAGS]; | |
138 | int num, err; | |
139 | ||
4d125de3 | 140 | sg_init_table(sg, 1+MAX_SKB_FRAGS); |
296f96fc RR |
141 | for (;;) { |
142 | skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); | |
143 | if (unlikely(!skb)) | |
144 | break; | |
145 | ||
146 | skb_put(skb, MAX_PACKET_LEN); | |
147 | vnet_hdr_to_sg(sg, skb); | |
148 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | |
149 | skb_queue_head(&vi->recv, skb); | |
150 | ||
151 | err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); | |
152 | if (err) { | |
153 | skb_unlink(skb, &vi->recv); | |
154 | kfree_skb(skb); | |
155 | break; | |
156 | } | |
157 | vi->num++; | |
158 | } | |
159 | if (unlikely(vi->num > vi->max)) | |
160 | vi->max = vi->num; | |
161 | vi->rvq->vq_ops->kick(vi->rvq); | |
162 | } | |
163 | ||
164 | static bool skb_recv_done(struct virtqueue *rvq) | |
165 | { | |
166 | struct virtnet_info *vi = rvq->vdev->priv; | |
167 | netif_rx_schedule(vi->dev, &vi->napi); | |
168 | /* Suppress further interrupts. */ | |
169 | return false; | |
170 | } | |
171 | ||
172 | static int virtnet_poll(struct napi_struct *napi, int budget) | |
173 | { | |
174 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); | |
175 | struct sk_buff *skb = NULL; | |
176 | unsigned int len, received = 0; | |
177 | ||
178 | again: | |
179 | while (received < budget && | |
180 | (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { | |
181 | __skb_unlink(skb, &vi->recv); | |
182 | receive_skb(vi->dev, skb, len); | |
183 | vi->num--; | |
184 | received++; | |
185 | } | |
186 | ||
187 | /* FIXME: If we oom and completely run out of inbufs, we need | |
188 | * to start a timer trying to fill more. */ | |
189 | if (vi->num < vi->max / 2) | |
190 | try_fill_recv(vi); | |
191 | ||
8329d98e RR |
192 | /* Out of packets? */ |
193 | if (received < budget) { | |
296f96fc RR |
194 | netif_rx_complete(vi->dev, napi); |
195 | if (unlikely(!vi->rvq->vq_ops->restart(vi->rvq)) | |
196 | && netif_rx_reschedule(vi->dev, napi)) | |
197 | goto again; | |
198 | } | |
199 | ||
200 | return received; | |
201 | } | |
202 | ||
203 | static void free_old_xmit_skbs(struct virtnet_info *vi) | |
204 | { | |
205 | struct sk_buff *skb; | |
206 | unsigned int len; | |
207 | ||
208 | while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { | |
209 | pr_debug("Sent skb %p\n", skb); | |
210 | __skb_unlink(skb, &vi->send); | |
211 | vi->dev->stats.tx_bytes += len; | |
212 | vi->dev->stats.tx_packets++; | |
213 | kfree_skb(skb); | |
214 | } | |
215 | } | |
216 | ||
217 | static int start_xmit(struct sk_buff *skb, struct net_device *dev) | |
218 | { | |
219 | struct virtnet_info *vi = netdev_priv(dev); | |
220 | int num, err; | |
221 | struct scatterlist sg[1+MAX_SKB_FRAGS]; | |
222 | struct virtio_net_hdr *hdr; | |
223 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | |
224 | DECLARE_MAC_BUF(mac); | |
225 | ||
4d125de3 RR |
226 | sg_init_table(sg, 1+MAX_SKB_FRAGS); |
227 | ||
296f96fc RR |
228 | pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest)); |
229 | ||
230 | free_old_xmit_skbs(vi); | |
231 | ||
232 | /* Encode metadata header at front. */ | |
233 | hdr = skb_vnet_hdr(skb); | |
234 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
235 | hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
236 | hdr->csum_start = skb->csum_start - skb_headroom(skb); | |
237 | hdr->csum_offset = skb->csum_offset; | |
238 | } else { | |
239 | hdr->flags = 0; | |
240 | hdr->csum_offset = hdr->csum_start = 0; | |
241 | } | |
242 | ||
243 | if (skb_is_gso(skb)) { | |
244 | hdr->gso_size = skb_shinfo(skb)->gso_size; | |
245 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) | |
246 | hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN; | |
247 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | |
248 | hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
249 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | |
250 | hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
251 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) | |
252 | hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
253 | else | |
254 | BUG(); | |
255 | } else { | |
256 | hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
257 | hdr->gso_size = 0; | |
258 | } | |
259 | ||
260 | vnet_hdr_to_sg(sg, skb); | |
261 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | |
262 | __skb_queue_head(&vi->send, skb); | |
263 | err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); | |
264 | if (err) { | |
265 | pr_debug("%s: virtio not prepared to send\n", dev->name); | |
266 | skb_unlink(skb, &vi->send); | |
267 | netif_stop_queue(dev); | |
268 | return NETDEV_TX_BUSY; | |
269 | } | |
270 | vi->svq->vq_ops->kick(vi->svq); | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | static int virtnet_open(struct net_device *dev) | |
276 | { | |
277 | struct virtnet_info *vi = netdev_priv(dev); | |
278 | ||
279 | try_fill_recv(vi); | |
280 | ||
281 | /* If we didn't even get one input buffer, we're useless. */ | |
282 | if (vi->num == 0) | |
283 | return -ENOMEM; | |
284 | ||
285 | napi_enable(&vi->napi); | |
286 | return 0; | |
287 | } | |
288 | ||
289 | static int virtnet_close(struct net_device *dev) | |
290 | { | |
291 | struct virtnet_info *vi = netdev_priv(dev); | |
292 | struct sk_buff *skb; | |
293 | ||
294 | napi_disable(&vi->napi); | |
295 | ||
296 | /* networking core has neutered skb_xmit_done/skb_recv_done, so don't | |
297 | * worry about races vs. get(). */ | |
298 | vi->rvq->vq_ops->shutdown(vi->rvq); | |
299 | while ((skb = __skb_dequeue(&vi->recv)) != NULL) { | |
300 | kfree_skb(skb); | |
301 | vi->num--; | |
302 | } | |
303 | vi->svq->vq_ops->shutdown(vi->svq); | |
304 | while ((skb = __skb_dequeue(&vi->send)) != NULL) | |
305 | kfree_skb(skb); | |
306 | ||
307 | BUG_ON(vi->num != 0); | |
308 | return 0; | |
309 | } | |
310 | ||
311 | static int virtnet_probe(struct virtio_device *vdev) | |
312 | { | |
313 | int err; | |
314 | unsigned int len; | |
315 | struct net_device *dev; | |
316 | struct virtnet_info *vi; | |
317 | void *token; | |
318 | ||
319 | /* Allocate ourselves a network device with room for our info */ | |
320 | dev = alloc_etherdev(sizeof(struct virtnet_info)); | |
321 | if (!dev) | |
322 | return -ENOMEM; | |
323 | ||
324 | /* Set up network device as normal. */ | |
325 | ether_setup(dev); | |
326 | dev->open = virtnet_open; | |
327 | dev->stop = virtnet_close; | |
328 | dev->hard_start_xmit = start_xmit; | |
329 | dev->features = NETIF_F_HIGHDMA; | |
330 | SET_NETDEV_DEV(dev, &vdev->dev); | |
331 | ||
332 | /* Do we support "hardware" checksums? */ | |
333 | token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_F, &len); | |
334 | if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_NO_CSUM)) { | |
335 | /* This opens up the world of extra features. */ | |
336 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | |
337 | if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4)) | |
338 | dev->features |= NETIF_F_TSO; | |
339 | if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_UFO)) | |
340 | dev->features |= NETIF_F_UFO; | |
341 | if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4_ECN)) | |
342 | dev->features |= NETIF_F_TSO_ECN; | |
343 | if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO6)) | |
344 | dev->features |= NETIF_F_TSO6; | |
345 | } | |
346 | ||
347 | /* Configuration may specify what MAC to use. Otherwise random. */ | |
348 | token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_MAC_F, &len); | |
349 | if (token) { | |
350 | dev->addr_len = len; | |
351 | vdev->config->get(vdev, token, dev->dev_addr, len); | |
352 | } else | |
353 | random_ether_addr(dev->dev_addr); | |
354 | ||
355 | /* Set up our device-specific information */ | |
356 | vi = netdev_priv(dev); | |
357 | netif_napi_add(dev, &vi->napi, virtnet_poll, 16); | |
358 | vi->dev = dev; | |
359 | vi->vdev = vdev; | |
360 | ||
361 | /* We expect two virtqueues, receive then send. */ | |
362 | vi->rvq = vdev->config->find_vq(vdev, skb_recv_done); | |
363 | if (IS_ERR(vi->rvq)) { | |
364 | err = PTR_ERR(vi->rvq); | |
365 | goto free; | |
366 | } | |
367 | ||
368 | vi->svq = vdev->config->find_vq(vdev, skb_xmit_done); | |
369 | if (IS_ERR(vi->svq)) { | |
370 | err = PTR_ERR(vi->svq); | |
371 | goto free_recv; | |
372 | } | |
373 | ||
374 | /* Initialize our empty receive and send queues. */ | |
375 | skb_queue_head_init(&vi->recv); | |
376 | skb_queue_head_init(&vi->send); | |
377 | ||
378 | err = register_netdev(dev); | |
379 | if (err) { | |
380 | pr_debug("virtio_net: registering device failed\n"); | |
381 | goto free_send; | |
382 | } | |
383 | pr_debug("virtnet: registered device %s\n", dev->name); | |
384 | vdev->priv = vi; | |
385 | return 0; | |
386 | ||
387 | free_send: | |
388 | vdev->config->del_vq(vi->svq); | |
389 | free_recv: | |
390 | vdev->config->del_vq(vi->rvq); | |
391 | free: | |
392 | free_netdev(dev); | |
393 | return err; | |
394 | } | |
395 | ||
396 | static void virtnet_remove(struct virtio_device *vdev) | |
397 | { | |
74b2553f RR |
398 | struct virtnet_info *vi = vdev->priv; |
399 | ||
400 | vdev->config->del_vq(vi->svq); | |
401 | vdev->config->del_vq(vi->rvq); | |
402 | unregister_netdev(vi->dev); | |
403 | free_netdev(vi->dev); | |
296f96fc RR |
404 | } |
405 | ||
406 | static struct virtio_device_id id_table[] = { | |
407 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
408 | { 0 }, | |
409 | }; | |
410 | ||
411 | static struct virtio_driver virtio_net = { | |
412 | .driver.name = KBUILD_MODNAME, | |
413 | .driver.owner = THIS_MODULE, | |
414 | .id_table = id_table, | |
415 | .probe = virtnet_probe, | |
416 | .remove = __devexit_p(virtnet_remove), | |
417 | }; | |
418 | ||
419 | static int __init init(void) | |
420 | { | |
421 | return register_virtio_driver(&virtio_net); | |
422 | } | |
423 | ||
424 | static void __exit fini(void) | |
425 | { | |
426 | unregister_virtio_driver(&virtio_net); | |
427 | } | |
428 | module_init(init); | |
429 | module_exit(fini); | |
430 | ||
431 | MODULE_DEVICE_TABLE(virtio, id_table); | |
432 | MODULE_DESCRIPTION("Virtio network driver"); | |
433 | MODULE_LICENSE("GPL"); |