]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/core/netpoll.c
ca6dc31843eaf9ca7f3cc480ad70481b63971fd1
[net-next-2.6.git] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #include <linux/moduleparam.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <linux/slab.h>
26 #include <net/tcp.h>
27 #include <net/udp.h>
28 #include <asm/unaligned.h>
29 #include <trace/events/napi.h>
30
31 /*
32  * We maintain a small pool of fully-sized skbs, to make sure the
33  * message gets out even in extreme OOM situations.
34  */
35
36 #define MAX_UDP_CHUNK 1460
37 #define MAX_SKBS 32
38 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39
40 static struct sk_buff_head skb_pool;
41
42 static atomic_t trapped;
43
44 #define USEC_PER_POLL   50
45 #define NETPOLL_RX_ENABLED  1
46 #define NETPOLL_RX_DROP     2
47
48 #define MAX_SKB_SIZE \
49                 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50                                 sizeof(struct iphdr) + sizeof(struct ethhdr))
51
52 static void arp_reply(struct sk_buff *skb);
53
54 static unsigned int carrier_timeout = 4;
55 module_param(carrier_timeout, uint, 0644);
56
57 static void queue_process(struct work_struct *work)
58 {
59         struct netpoll_info *npinfo =
60                 container_of(work, struct netpoll_info, tx_work.work);
61         struct sk_buff *skb;
62         unsigned long flags;
63
64         while ((skb = skb_dequeue(&npinfo->txq))) {
65                 struct net_device *dev = skb->dev;
66                 const struct net_device_ops *ops = dev->netdev_ops;
67                 struct netdev_queue *txq;
68
69                 if (!netif_device_present(dev) || !netif_running(dev)) {
70                         __kfree_skb(skb);
71                         continue;
72                 }
73
74                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
76                 local_irq_save(flags);
77                 __netif_tx_lock(txq, smp_processor_id());
78                 if (netif_tx_queue_stopped(txq) ||
79                     netif_tx_queue_frozen(txq) ||
80                     ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
81                         skb_queue_head(&npinfo->txq, skb);
82                         __netif_tx_unlock(txq);
83                         local_irq_restore(flags);
84
85                         schedule_delayed_work(&npinfo->tx_work, HZ/10);
86                         return;
87                 }
88                 __netif_tx_unlock(txq);
89                 local_irq_restore(flags);
90         }
91 }
92
93 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
94                             unsigned short ulen, __be32 saddr, __be32 daddr)
95 {
96         __wsum psum;
97
98         if (uh->check == 0 || skb_csum_unnecessary(skb))
99                 return 0;
100
101         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
102
103         if (skb->ip_summed == CHECKSUM_COMPLETE &&
104             !csum_fold(csum_add(psum, skb->csum)))
105                 return 0;
106
107         skb->csum = psum;
108
109         return __skb_checksum_complete(skb);
110 }
111
112 /*
113  * Check whether delayed processing was scheduled for our NIC. If so,
114  * we attempt to grab the poll lock and use ->poll() to pump the card.
115  * If this fails, either we've recursed in ->poll() or it's already
116  * running on another CPU.
117  *
118  * Note: we don't mask interrupts with this lock because we're using
119  * trylock here and interrupts are already disabled in the softirq
120  * case. Further, we test the poll_owner to avoid recursion on UP
121  * systems where the lock doesn't exist.
122  *
123  * In cases where there is bi-directional communications, reading only
124  * one message at a time can lead to packets being dropped by the
125  * network adapter, forcing superfluous retries and possibly timeouts.
126  * Thus, we set our budget to greater than 1.
127  */
128 static int poll_one_napi(struct netpoll_info *npinfo,
129                          struct napi_struct *napi, int budget)
130 {
131         int work;
132
133         /* net_rx_action's ->poll() invocations and our's are
134          * synchronized by this test which is only made while
135          * holding the napi->poll_lock.
136          */
137         if (!test_bit(NAPI_STATE_SCHED, &napi->state))
138                 return budget;
139
140         npinfo->rx_flags |= NETPOLL_RX_DROP;
141         atomic_inc(&trapped);
142         set_bit(NAPI_STATE_NPSVC, &napi->state);
143
144         work = napi->poll(napi, budget);
145         trace_napi_poll(napi);
146
147         clear_bit(NAPI_STATE_NPSVC, &napi->state);
148         atomic_dec(&trapped);
149         npinfo->rx_flags &= ~NETPOLL_RX_DROP;
150
151         return budget - work;
152 }
153
154 static void poll_napi(struct net_device *dev)
155 {
156         struct napi_struct *napi;
157         int budget = 16;
158
159         list_for_each_entry(napi, &dev->napi_list, dev_list) {
160                 if (napi->poll_owner != smp_processor_id() &&
161                     spin_trylock(&napi->poll_lock)) {
162                         budget = poll_one_napi(dev->npinfo, napi, budget);
163                         spin_unlock(&napi->poll_lock);
164
165                         if (!budget)
166                                 break;
167                 }
168         }
169 }
170
171 static void service_arp_queue(struct netpoll_info *npi)
172 {
173         if (npi) {
174                 struct sk_buff *skb;
175
176                 while ((skb = skb_dequeue(&npi->arp_tx)))
177                         arp_reply(skb);
178         }
179 }
180
181 void netpoll_poll_dev(struct net_device *dev)
182 {
183         const struct net_device_ops *ops;
184
185         if (!dev || !netif_running(dev))
186                 return;
187
188         ops = dev->netdev_ops;
189         if (!ops->ndo_poll_controller)
190                 return;
191
192         /* Process pending work on NIC */
193         ops->ndo_poll_controller(dev);
194
195         poll_napi(dev);
196
197         service_arp_queue(dev->npinfo);
198
199 }
200
201 void netpoll_poll(struct netpoll *np)
202 {
203         netpoll_poll_dev(np->dev);
204 }
205
206 static void refill_skbs(void)
207 {
208         struct sk_buff *skb;
209         unsigned long flags;
210
211         spin_lock_irqsave(&skb_pool.lock, flags);
212         while (skb_pool.qlen < MAX_SKBS) {
213                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
214                 if (!skb)
215                         break;
216
217                 __skb_queue_tail(&skb_pool, skb);
218         }
219         spin_unlock_irqrestore(&skb_pool.lock, flags);
220 }
221
222 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
223 {
224         int count = 0;
225         struct sk_buff *skb;
226
227         refill_skbs();
228 repeat:
229
230         skb = alloc_skb(len, GFP_ATOMIC);
231         if (!skb)
232                 skb = skb_dequeue(&skb_pool);
233
234         if (!skb) {
235                 if (++count < 10) {
236                         netpoll_poll(np);
237                         goto repeat;
238                 }
239                 return NULL;
240         }
241
242         atomic_set(&skb->users, 1);
243         skb_reserve(skb, reserve);
244         return skb;
245 }
246
247 static int netpoll_owner_active(struct net_device *dev)
248 {
249         struct napi_struct *napi;
250
251         list_for_each_entry(napi, &dev->napi_list, dev_list) {
252                 if (napi->poll_owner == smp_processor_id())
253                         return 1;
254         }
255         return 0;
256 }
257
258 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
259 {
260         int status = NETDEV_TX_BUSY;
261         unsigned long tries;
262         struct net_device *dev = np->dev;
263         const struct net_device_ops *ops = dev->netdev_ops;
264         /* It is up to the caller to keep npinfo alive. */
265         struct netpoll_info *npinfo = np->dev->npinfo;
266
267         if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
268                 __kfree_skb(skb);
269                 return;
270         }
271
272         /* don't get messages out of order, and no recursion */
273         if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
274                 struct netdev_queue *txq;
275                 unsigned long flags;
276
277                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
278
279                 local_irq_save(flags);
280                 /* try until next clock tick */
281                 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
282                      tries > 0; --tries) {
283                         if (__netif_tx_trylock(txq)) {
284                                 if (!netif_tx_queue_stopped(txq)) {
285                                         dev->priv_flags |= IFF_IN_NETPOLL;
286                                         status = ops->ndo_start_xmit(skb, dev);
287                                         dev->priv_flags &= ~IFF_IN_NETPOLL;
288                                         if (status == NETDEV_TX_OK)
289                                                 txq_trans_update(txq);
290                                 }
291                                 __netif_tx_unlock(txq);
292
293                                 if (status == NETDEV_TX_OK)
294                                         break;
295
296                         }
297
298                         /* tickle device maybe there is some cleanup */
299                         netpoll_poll(np);
300
301                         udelay(USEC_PER_POLL);
302                 }
303
304                 WARN_ONCE(!irqs_disabled(),
305                         "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
306                         dev->name, ops->ndo_start_xmit);
307
308                 local_irq_restore(flags);
309         }
310
311         if (status != NETDEV_TX_OK) {
312                 skb_queue_tail(&npinfo->txq, skb);
313                 schedule_delayed_work(&npinfo->tx_work,0);
314         }
315 }
316
317 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
318 {
319         int total_len, eth_len, ip_len, udp_len;
320         struct sk_buff *skb;
321         struct udphdr *udph;
322         struct iphdr *iph;
323         struct ethhdr *eth;
324
325         udp_len = len + sizeof(*udph);
326         ip_len = eth_len = udp_len + sizeof(*iph);
327         total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
328
329         skb = find_skb(np, total_len, total_len - len);
330         if (!skb)
331                 return;
332
333         skb_copy_to_linear_data(skb, msg, len);
334         skb->len += len;
335
336         skb_push(skb, sizeof(*udph));
337         skb_reset_transport_header(skb);
338         udph = udp_hdr(skb);
339         udph->source = htons(np->local_port);
340         udph->dest = htons(np->remote_port);
341         udph->len = htons(udp_len);
342         udph->check = 0;
343         udph->check = csum_tcpudp_magic(np->local_ip,
344                                         np->remote_ip,
345                                         udp_len, IPPROTO_UDP,
346                                         csum_partial(udph, udp_len, 0));
347         if (udph->check == 0)
348                 udph->check = CSUM_MANGLED_0;
349
350         skb_push(skb, sizeof(*iph));
351         skb_reset_network_header(skb);
352         iph = ip_hdr(skb);
353
354         /* iph->version = 4; iph->ihl = 5; */
355         put_unaligned(0x45, (unsigned char *)iph);
356         iph->tos      = 0;
357         put_unaligned(htons(ip_len), &(iph->tot_len));
358         iph->id       = 0;
359         iph->frag_off = 0;
360         iph->ttl      = 64;
361         iph->protocol = IPPROTO_UDP;
362         iph->check    = 0;
363         put_unaligned(np->local_ip, &(iph->saddr));
364         put_unaligned(np->remote_ip, &(iph->daddr));
365         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
366
367         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
368         skb_reset_mac_header(skb);
369         skb->protocol = eth->h_proto = htons(ETH_P_IP);
370         memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
371         memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
372
373         skb->dev = np->dev;
374
375         netpoll_send_skb(np, skb);
376 }
377
378 static void arp_reply(struct sk_buff *skb)
379 {
380         struct netpoll_info *npinfo = skb->dev->npinfo;
381         struct arphdr *arp;
382         unsigned char *arp_ptr;
383         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
384         __be32 sip, tip;
385         unsigned char *sha;
386         struct sk_buff *send_skb;
387         struct netpoll *np, *tmp;
388         unsigned long flags;
389         int hits = 0;
390
391         if (list_empty(&npinfo->rx_np))
392                 return;
393
394         /* Before checking the packet, we do some early
395            inspection whether this is interesting at all */
396         spin_lock_irqsave(&npinfo->rx_lock, flags);
397         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
398                 if (np->dev == skb->dev)
399                         hits++;
400         }
401         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
402
403         /* No netpoll struct is using this dev */
404         if (!hits)
405                 return;
406
407         /* No arp on this interface */
408         if (skb->dev->flags & IFF_NOARP)
409                 return;
410
411         if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
412                 return;
413
414         skb_reset_network_header(skb);
415         skb_reset_transport_header(skb);
416         arp = arp_hdr(skb);
417
418         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
419              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
420             arp->ar_pro != htons(ETH_P_IP) ||
421             arp->ar_op != htons(ARPOP_REQUEST))
422                 return;
423
424         arp_ptr = (unsigned char *)(arp+1);
425         /* save the location of the src hw addr */
426         sha = arp_ptr;
427         arp_ptr += skb->dev->addr_len;
428         memcpy(&sip, arp_ptr, 4);
429         arp_ptr += 4;
430         /* If we actually cared about dst hw addr,
431            it would get copied here */
432         arp_ptr += skb->dev->addr_len;
433         memcpy(&tip, arp_ptr, 4);
434
435         /* Should we ignore arp? */
436         if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
437                 return;
438
439         size = arp_hdr_len(skb->dev);
440
441         spin_lock_irqsave(&npinfo->rx_lock, flags);
442         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
443                 if (tip != np->local_ip)
444                         continue;
445
446                 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
447                                     LL_RESERVED_SPACE(np->dev));
448                 if (!send_skb)
449                         continue;
450
451                 skb_reset_network_header(send_skb);
452                 arp = (struct arphdr *) skb_put(send_skb, size);
453                 send_skb->dev = skb->dev;
454                 send_skb->protocol = htons(ETH_P_ARP);
455
456                 /* Fill the device header for the ARP frame */
457                 if (dev_hard_header(send_skb, skb->dev, ptype,
458                                     sha, np->dev->dev_addr,
459                                     send_skb->len) < 0) {
460                         kfree_skb(send_skb);
461                         continue;
462                 }
463
464                 /*
465                  * Fill out the arp protocol part.
466                  *
467                  * we only support ethernet device type,
468                  * which (according to RFC 1390) should
469                  * always equal 1 (Ethernet).
470                  */
471
472                 arp->ar_hrd = htons(np->dev->type);
473                 arp->ar_pro = htons(ETH_P_IP);
474                 arp->ar_hln = np->dev->addr_len;
475                 arp->ar_pln = 4;
476                 arp->ar_op = htons(type);
477
478                 arp_ptr = (unsigned char *)(arp + 1);
479                 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
480                 arp_ptr += np->dev->addr_len;
481                 memcpy(arp_ptr, &tip, 4);
482                 arp_ptr += 4;
483                 memcpy(arp_ptr, sha, np->dev->addr_len);
484                 arp_ptr += np->dev->addr_len;
485                 memcpy(arp_ptr, &sip, 4);
486
487                 netpoll_send_skb(np, send_skb);
488
489                 /* If there are several rx_hooks for the same address,
490                    we're fine by sending a single reply */
491                 break;
492         }
493         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
494 }
495
496 int __netpoll_rx(struct sk_buff *skb)
497 {
498         int proto, len, ulen;
499         int hits = 0;
500         struct iphdr *iph;
501         struct udphdr *uh;
502         struct netpoll_info *npinfo = skb->dev->npinfo;
503         struct netpoll *np, *tmp;
504
505         if (list_empty(&npinfo->rx_np))
506                 goto out;
507
508         if (skb->dev->type != ARPHRD_ETHER)
509                 goto out;
510
511         /* check if netpoll clients need ARP */
512         if (skb->protocol == htons(ETH_P_ARP) &&
513             atomic_read(&trapped)) {
514                 skb_queue_tail(&npinfo->arp_tx, skb);
515                 return 1;
516         }
517
518         proto = ntohs(eth_hdr(skb)->h_proto);
519         if (proto != ETH_P_IP)
520                 goto out;
521         if (skb->pkt_type == PACKET_OTHERHOST)
522                 goto out;
523         if (skb_shared(skb))
524                 goto out;
525
526         iph = (struct iphdr *)skb->data;
527         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
528                 goto out;
529         if (iph->ihl < 5 || iph->version != 4)
530                 goto out;
531         if (!pskb_may_pull(skb, iph->ihl*4))
532                 goto out;
533         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
534                 goto out;
535
536         len = ntohs(iph->tot_len);
537         if (skb->len < len || len < iph->ihl*4)
538                 goto out;
539
540         /*
541          * Our transport medium may have padded the buffer out.
542          * Now We trim to the true length of the frame.
543          */
544         if (pskb_trim_rcsum(skb, len))
545                 goto out;
546
547         if (iph->protocol != IPPROTO_UDP)
548                 goto out;
549
550         len -= iph->ihl*4;
551         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
552         ulen = ntohs(uh->len);
553
554         if (ulen != len)
555                 goto out;
556         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
557                 goto out;
558
559         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
560                 if (np->local_ip && np->local_ip != iph->daddr)
561                         continue;
562                 if (np->remote_ip && np->remote_ip != iph->saddr)
563                         continue;
564                 if (np->local_port && np->local_port != ntohs(uh->dest))
565                         continue;
566
567                 np->rx_hook(np, ntohs(uh->source),
568                                (char *)(uh+1),
569                                ulen - sizeof(struct udphdr));
570                 hits++;
571         }
572
573         if (!hits)
574                 goto out;
575
576         kfree_skb(skb);
577         return 1;
578
579 out:
580         if (atomic_read(&trapped)) {
581                 kfree_skb(skb);
582                 return 1;
583         }
584
585         return 0;
586 }
587
588 void netpoll_print_options(struct netpoll *np)
589 {
590         printk(KERN_INFO "%s: local port %d\n",
591                          np->name, np->local_port);
592         printk(KERN_INFO "%s: local IP %pI4\n",
593                          np->name, &np->local_ip);
594         printk(KERN_INFO "%s: interface '%s'\n",
595                          np->name, np->dev_name);
596         printk(KERN_INFO "%s: remote port %d\n",
597                          np->name, np->remote_port);
598         printk(KERN_INFO "%s: remote IP %pI4\n",
599                          np->name, &np->remote_ip);
600         printk(KERN_INFO "%s: remote ethernet address %pM\n",
601                          np->name, np->remote_mac);
602 }
603
604 int netpoll_parse_options(struct netpoll *np, char *opt)
605 {
606         char *cur=opt, *delim;
607
608         if (*cur != '@') {
609                 if ((delim = strchr(cur, '@')) == NULL)
610                         goto parse_failed;
611                 *delim = 0;
612                 np->local_port = simple_strtol(cur, NULL, 10);
613                 cur = delim;
614         }
615         cur++;
616
617         if (*cur != '/') {
618                 if ((delim = strchr(cur, '/')) == NULL)
619                         goto parse_failed;
620                 *delim = 0;
621                 np->local_ip = in_aton(cur);
622                 cur = delim;
623         }
624         cur++;
625
626         if (*cur != ',') {
627                 /* parse out dev name */
628                 if ((delim = strchr(cur, ',')) == NULL)
629                         goto parse_failed;
630                 *delim = 0;
631                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
632                 cur = delim;
633         }
634         cur++;
635
636         if (*cur != '@') {
637                 /* dst port */
638                 if ((delim = strchr(cur, '@')) == NULL)
639                         goto parse_failed;
640                 *delim = 0;
641                 if (*cur == ' ' || *cur == '\t')
642                         printk(KERN_INFO "%s: warning: whitespace"
643                                         "is not allowed\n", np->name);
644                 np->remote_port = simple_strtol(cur, NULL, 10);
645                 cur = delim;
646         }
647         cur++;
648
649         /* dst ip */
650         if ((delim = strchr(cur, '/')) == NULL)
651                 goto parse_failed;
652         *delim = 0;
653         np->remote_ip = in_aton(cur);
654         cur = delim + 1;
655
656         if (*cur != 0) {
657                 /* MAC address */
658                 if ((delim = strchr(cur, ':')) == NULL)
659                         goto parse_failed;
660                 *delim = 0;
661                 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
662                 cur = delim + 1;
663                 if ((delim = strchr(cur, ':')) == NULL)
664                         goto parse_failed;
665                 *delim = 0;
666                 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
667                 cur = delim + 1;
668                 if ((delim = strchr(cur, ':')) == NULL)
669                         goto parse_failed;
670                 *delim = 0;
671                 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
672                 cur = delim + 1;
673                 if ((delim = strchr(cur, ':')) == NULL)
674                         goto parse_failed;
675                 *delim = 0;
676                 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
677                 cur = delim + 1;
678                 if ((delim = strchr(cur, ':')) == NULL)
679                         goto parse_failed;
680                 *delim = 0;
681                 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
682                 cur = delim + 1;
683                 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
684         }
685
686         netpoll_print_options(np);
687
688         return 0;
689
690  parse_failed:
691         printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
692                np->name, cur);
693         return -1;
694 }
695
696 int __netpoll_setup(struct netpoll *np)
697 {
698         struct net_device *ndev = np->dev;
699         struct netpoll_info *npinfo;
700         const struct net_device_ops *ops;
701         unsigned long flags;
702         int err;
703
704         if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
705             !ndev->netdev_ops->ndo_poll_controller) {
706                 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
707                        np->name, np->dev_name);
708                 err = -ENOTSUPP;
709                 goto out;
710         }
711
712         if (!ndev->npinfo) {
713                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
714                 if (!npinfo) {
715                         err = -ENOMEM;
716                         goto out;
717                 }
718
719                 npinfo->rx_flags = 0;
720                 INIT_LIST_HEAD(&npinfo->rx_np);
721
722                 spin_lock_init(&npinfo->rx_lock);
723                 skb_queue_head_init(&npinfo->arp_tx);
724                 skb_queue_head_init(&npinfo->txq);
725                 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
726
727                 atomic_set(&npinfo->refcnt, 1);
728
729                 ops = np->dev->netdev_ops;
730                 if (ops->ndo_netpoll_setup) {
731                         err = ops->ndo_netpoll_setup(ndev, npinfo);
732                         if (err)
733                                 goto free_npinfo;
734                 }
735         } else {
736                 npinfo = ndev->npinfo;
737                 atomic_inc(&npinfo->refcnt);
738         }
739
740         npinfo->netpoll = np;
741
742         if (np->rx_hook) {
743                 spin_lock_irqsave(&npinfo->rx_lock, flags);
744                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
745                 list_add_tail(&np->rx, &npinfo->rx_np);
746                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
747         }
748
749         /* last thing to do is link it to the net device structure */
750         rcu_assign_pointer(ndev->npinfo, npinfo);
751
752         return 0;
753
754 free_npinfo:
755         kfree(npinfo);
756 out:
757         return err;
758 }
759 EXPORT_SYMBOL_GPL(__netpoll_setup);
760
761 int netpoll_setup(struct netpoll *np)
762 {
763         struct net_device *ndev = NULL;
764         struct in_device *in_dev;
765         int err;
766
767         if (np->dev_name)
768                 ndev = dev_get_by_name(&init_net, np->dev_name);
769         if (!ndev) {
770                 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
771                        np->name, np->dev_name);
772                 return -ENODEV;
773         }
774
775         if (!netif_running(ndev)) {
776                 unsigned long atmost, atleast;
777
778                 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
779                        np->name, np->dev_name);
780
781                 rtnl_lock();
782                 err = dev_open(ndev);
783                 rtnl_unlock();
784
785                 if (err) {
786                         printk(KERN_ERR "%s: failed to open %s\n",
787                                np->name, ndev->name);
788                         goto put;
789                 }
790
791                 atleast = jiffies + HZ/10;
792                 atmost = jiffies + carrier_timeout * HZ;
793                 while (!netif_carrier_ok(ndev)) {
794                         if (time_after(jiffies, atmost)) {
795                                 printk(KERN_NOTICE
796                                        "%s: timeout waiting for carrier\n",
797                                        np->name);
798                                 break;
799                         }
800                         msleep(1);
801                 }
802
803                 /* If carrier appears to come up instantly, we don't
804                  * trust it and pause so that we don't pump all our
805                  * queued console messages into the bitbucket.
806                  */
807
808                 if (time_before(jiffies, atleast)) {
809                         printk(KERN_NOTICE "%s: carrier detect appears"
810                                " untrustworthy, waiting 4 seconds\n",
811                                np->name);
812                         msleep(4000);
813                 }
814         }
815
816         if (!np->local_ip) {
817                 rcu_read_lock();
818                 in_dev = __in_dev_get_rcu(ndev);
819
820                 if (!in_dev || !in_dev->ifa_list) {
821                         rcu_read_unlock();
822                         printk(KERN_ERR "%s: no IP address for %s, aborting\n",
823                                np->name, np->dev_name);
824                         err = -EDESTADDRREQ;
825                         goto put;
826                 }
827
828                 np->local_ip = in_dev->ifa_list->ifa_local;
829                 rcu_read_unlock();
830                 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
831         }
832
833         np->dev = ndev;
834
835         /* fill up the skb queue */
836         refill_skbs();
837
838         rtnl_lock();
839         err = __netpoll_setup(np);
840         rtnl_unlock();
841
842         if (err)
843                 goto put;
844
845         return 0;
846
847 put:
848         dev_put(ndev);
849         return err;
850 }
851
852 static int __init netpoll_init(void)
853 {
854         skb_queue_head_init(&skb_pool);
855         return 0;
856 }
857 core_initcall(netpoll_init);
858
859 void __netpoll_cleanup(struct netpoll *np)
860 {
861         struct netpoll_info *npinfo;
862         unsigned long flags;
863
864         npinfo = np->dev->npinfo;
865         if (!npinfo)
866                 return;
867
868         if (!list_empty(&npinfo->rx_np)) {
869                 spin_lock_irqsave(&npinfo->rx_lock, flags);
870                 list_del(&np->rx);
871                 if (list_empty(&npinfo->rx_np))
872                         npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
873                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
874         }
875
876         if (atomic_dec_and_test(&npinfo->refcnt)) {
877                 const struct net_device_ops *ops;
878
879                 ops = np->dev->netdev_ops;
880                 if (ops->ndo_netpoll_cleanup)
881                         ops->ndo_netpoll_cleanup(np->dev);
882
883                 rcu_assign_pointer(np->dev->npinfo, NULL);
884
885                 /* avoid racing with NAPI reading npinfo */
886                 synchronize_rcu_bh();
887
888                 skb_queue_purge(&npinfo->arp_tx);
889                 skb_queue_purge(&npinfo->txq);
890                 cancel_rearming_delayed_work(&npinfo->tx_work);
891
892                 /* clean after last, unfinished work */
893                 __skb_queue_purge(&npinfo->txq);
894                 kfree(npinfo);
895         }
896 }
897 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
898
899 void netpoll_cleanup(struct netpoll *np)
900 {
901         if (!np->dev)
902                 return;
903
904         rtnl_lock();
905         __netpoll_cleanup(np);
906         rtnl_unlock();
907
908         dev_put(np->dev);
909         np->dev = NULL;
910 }
911
912 int netpoll_trap(void)
913 {
914         return atomic_read(&trapped);
915 }
916
917 void netpoll_set_trap(int trap)
918 {
919         if (trap)
920                 atomic_inc(&trapped);
921         else
922                 atomic_dec(&trapped);
923 }
924
925 EXPORT_SYMBOL(netpoll_send_skb);
926 EXPORT_SYMBOL(netpoll_set_trap);
927 EXPORT_SYMBOL(netpoll_trap);
928 EXPORT_SYMBOL(netpoll_print_options);
929 EXPORT_SYMBOL(netpoll_parse_options);
930 EXPORT_SYMBOL(netpoll_setup);
931 EXPORT_SYMBOL(netpoll_cleanup);
932 EXPORT_SYMBOL(netpoll_send_udp);
933 EXPORT_SYMBOL(netpoll_poll_dev);
934 EXPORT_SYMBOL(netpoll_poll);