]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/netpoll.c
scm: lower SCM_MAX_FD
[net-next-2.6.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
bff38771 12#include <linux/moduleparam.h>
1da177e4
LT
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
14c85021 16#include <linux/if_arp.h>
1da177e4
LT
17#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
5a0e3ad6 25#include <linux/slab.h>
1da177e4
LT
26#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
9cbc1cb8 29#include <trace/events/napi.h>
1da177e4
LT
30
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
38#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39
a1bcfacd 40static struct sk_buff_head skb_pool;
1da177e4
LT
41
42static atomic_t trapped;
43
2bdfe0ba 44#define USEC_PER_POLL 50
d9452e9f
DM
45#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2
1da177e4
LT
47
48#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr))
51
3578b0c8 52static void zap_completion_queue(void);
068c6e98 53static void arp_reply(struct sk_buff *skb);
1da177e4 54
bff38771
AV
55static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644);
57
c4028958 58static void queue_process(struct work_struct *work)
1da177e4 59{
4c1ac1b4
DH
60 struct netpoll_info *npinfo =
61 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 62 struct sk_buff *skb;
3640543d 63 unsigned long flags;
1da177e4 64
6c43ff18
SH
65 while ((skb = skb_dequeue(&npinfo->txq))) {
66 struct net_device *dev = skb->dev;
00829823 67 const struct net_device_ops *ops = dev->netdev_ops;
fd2ea0a7 68 struct netdev_queue *txq;
1da177e4 69
6c43ff18
SH
70 if (!netif_device_present(dev) || !netif_running(dev)) {
71 __kfree_skb(skb);
72 continue;
73 }
1da177e4 74
fd2ea0a7
DM
75 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
76
3640543d 77 local_irq_save(flags);
fd2ea0a7
DM
78 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_stopped(txq) ||
c3f26a26 80 netif_tx_queue_frozen(txq) ||
00829823 81 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
6c43ff18 82 skb_queue_head(&npinfo->txq, skb);
fd2ea0a7 83 __netif_tx_unlock(txq);
3640543d 84 local_irq_restore(flags);
1da177e4 85
25442caf 86 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
87 return;
88 }
fd2ea0a7 89 __netif_tx_unlock(txq);
3640543d 90 local_irq_restore(flags);
1da177e4 91 }
1da177e4
LT
92}
93
b51655b9
AV
94static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
95 unsigned short ulen, __be32 saddr, __be32 daddr)
1da177e4 96{
d6f5493c 97 __wsum psum;
fb286bb2 98
60476372 99 if (uh->check == 0 || skb_csum_unnecessary(skb))
1da177e4
LT
100 return 0;
101
fb286bb2
HX
102 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
103
84fa7933 104 if (skb->ip_summed == CHECKSUM_COMPLETE &&
d3bc23e7 105 !csum_fold(csum_add(psum, skb->csum)))
fb286bb2 106 return 0;
1da177e4 107
fb286bb2 108 skb->csum = psum;
1da177e4 109
fb286bb2 110 return __skb_checksum_complete(skb);
1da177e4
LT
111}
112
113/*
114 * Check whether delayed processing was scheduled for our NIC. If so,
115 * we attempt to grab the poll lock and use ->poll() to pump the card.
116 * If this fails, either we've recursed in ->poll() or it's already
117 * running on another CPU.
118 *
119 * Note: we don't mask interrupts with this lock because we're using
120 * trylock here and interrupts are already disabled in the softirq
121 * case. Further, we test the poll_owner to avoid recursion on UP
122 * systems where the lock doesn't exist.
123 *
124 * In cases where there is bi-directional communications, reading only
125 * one message at a time can lead to packets being dropped by the
126 * network adapter, forcing superfluous retries and possibly timeouts.
127 * Thus, we set our budget to greater than 1.
128 */
0a7606c1
DM
129static int poll_one_napi(struct netpoll_info *npinfo,
130 struct napi_struct *napi, int budget)
131{
132 int work;
133
134 /* net_rx_action's ->poll() invocations and our's are
135 * synchronized by this test which is only made while
136 * holding the napi->poll_lock.
137 */
138 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
139 return budget;
140
d9452e9f 141 npinfo->rx_flags |= NETPOLL_RX_DROP;
0a7606c1 142 atomic_inc(&trapped);
7b363e44 143 set_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
144
145 work = napi->poll(napi, budget);
7d18f114 146 trace_napi_poll(napi);
0a7606c1 147
7b363e44 148 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1 149 atomic_dec(&trapped);
d9452e9f 150 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
0a7606c1
DM
151
152 return budget - work;
153}
154
5106930b 155static void poll_napi(struct net_device *dev)
1da177e4 156{
bea3348e 157 struct napi_struct *napi;
1da177e4
LT
158 int budget = 16;
159
f13d493d 160 list_for_each_entry(napi, &dev->napi_list, dev_list) {
0a7606c1 161 if (napi->poll_owner != smp_processor_id() &&
bea3348e 162 spin_trylock(&napi->poll_lock)) {
5106930b 163 budget = poll_one_napi(dev->npinfo, napi, budget);
bea3348e 164 spin_unlock(&napi->poll_lock);
0a7606c1
DM
165
166 if (!budget)
167 break;
bea3348e 168 }
1da177e4
LT
169 }
170}
171
068c6e98
NH
172static void service_arp_queue(struct netpoll_info *npi)
173{
5106930b
SH
174 if (npi) {
175 struct sk_buff *skb;
068c6e98 176
5106930b
SH
177 while ((skb = skb_dequeue(&npi->arp_tx)))
178 arp_reply(skb);
068c6e98 179 }
068c6e98
NH
180}
181
0e34e931 182void netpoll_poll_dev(struct net_device *dev)
1da177e4 183{
5e392739 184 const struct net_device_ops *ops;
5106930b 185
5e392739
PE
186 if (!dev || !netif_running(dev))
187 return;
188
189 ops = dev->netdev_ops;
190 if (!ops->ndo_poll_controller)
1da177e4
LT
191 return;
192
193 /* Process pending work on NIC */
d314774c 194 ops->ndo_poll_controller(dev);
5106930b
SH
195
196 poll_napi(dev);
1da177e4 197
5106930b 198 service_arp_queue(dev->npinfo);
068c6e98 199
3578b0c8 200 zap_completion_queue();
1da177e4 201}
9e34a5b5 202EXPORT_SYMBOL(netpoll_poll_dev);
1da177e4 203
0e34e931
WC
204void netpoll_poll(struct netpoll *np)
205{
206 netpoll_poll_dev(np->dev);
207}
9e34a5b5 208EXPORT_SYMBOL(netpoll_poll);
0e34e931 209
1da177e4
LT
210static void refill_skbs(void)
211{
212 struct sk_buff *skb;
213 unsigned long flags;
214
a1bcfacd
SH
215 spin_lock_irqsave(&skb_pool.lock, flags);
216 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
217 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
218 if (!skb)
219 break;
220
a1bcfacd 221 __skb_queue_tail(&skb_pool, skb);
1da177e4 222 }
a1bcfacd 223 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
224}
225
3578b0c8
DM
226static void zap_completion_queue(void)
227{
228 unsigned long flags;
229 struct softnet_data *sd = &get_cpu_var(softnet_data);
230
231 if (sd->completion_queue) {
232 struct sk_buff *clist;
233
234 local_irq_save(flags);
235 clist = sd->completion_queue;
236 sd->completion_queue = NULL;
237 local_irq_restore(flags);
238
239 while (clist != NULL) {
240 struct sk_buff *skb = clist;
241 clist = clist->next;
242 if (skb->destructor) {
243 atomic_inc(&skb->users);
244 dev_kfree_skb_any(skb); /* put this one back */
245 } else {
246 __kfree_skb(skb);
247 }
248 }
249 }
250
251 put_cpu_var(softnet_data);
252}
253
a1bcfacd 254static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 255{
a1bcfacd
SH
256 int count = 0;
257 struct sk_buff *skb;
1da177e4 258
3578b0c8 259 zap_completion_queue();
a1bcfacd 260 refill_skbs();
1da177e4 261repeat:
1da177e4
LT
262
263 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
264 if (!skb)
265 skb = skb_dequeue(&skb_pool);
1da177e4
LT
266
267 if (!skb) {
a1bcfacd
SH
268 if (++count < 10) {
269 netpoll_poll(np);
270 goto repeat;
1da177e4 271 }
a1bcfacd 272 return NULL;
1da177e4
LT
273 }
274
275 atomic_set(&skb->users, 1);
276 skb_reserve(skb, reserve);
277 return skb;
278}
279
bea3348e
SH
280static int netpoll_owner_active(struct net_device *dev)
281{
282 struct napi_struct *napi;
283
284 list_for_each_entry(napi, &dev->napi_list, dev_list) {
285 if (napi->poll_owner == smp_processor_id())
286 return 1;
287 }
288 return 0;
289}
290
c2355e1a
NH
291void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
292 struct net_device *dev)
1da177e4 293{
2bdfe0ba
SH
294 int status = NETDEV_TX_BUSY;
295 unsigned long tries;
00829823 296 const struct net_device_ops *ops = dev->netdev_ops;
de85d99e 297 /* It is up to the caller to keep npinfo alive. */
4ec93edb 298 struct netpoll_info *npinfo = np->dev->npinfo;
2bdfe0ba 299
4ec93edb
YH
300 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
301 __kfree_skb(skb);
302 return;
303 }
2bdfe0ba
SH
304
305 /* don't get messages out of order, and no recursion */
bea3348e 306 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 307 struct netdev_queue *txq;
a49f99ff
AM
308 unsigned long flags;
309
fd2ea0a7
DM
310 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
311
a49f99ff 312 local_irq_save(flags);
0db3dc73
SH
313 /* try until next clock tick */
314 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
315 tries > 0; --tries) {
fd2ea0a7 316 if (__netif_tx_trylock(txq)) {
08baf561 317 if (!netif_tx_queue_stopped(txq)) {
0e34e931 318 dev->priv_flags |= IFF_IN_NETPOLL;
00829823 319 status = ops->ndo_start_xmit(skb, dev);
0e34e931 320 dev->priv_flags &= ~IFF_IN_NETPOLL;
08baf561
ED
321 if (status == NETDEV_TX_OK)
322 txq_trans_update(txq);
323 }
fd2ea0a7 324 __netif_tx_unlock(txq);
e37b8d93
AM
325
326 if (status == NETDEV_TX_OK)
327 break;
328
e37b8d93 329 }
0db3dc73
SH
330
331 /* tickle device maybe there is some cleanup */
332 netpoll_poll(np);
333
334 udelay(USEC_PER_POLL);
0db1d6fc 335 }
79b1bee8
DD
336
337 WARN_ONCE(!irqs_disabled(),
338 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
339 dev->name, ops->ndo_start_xmit);
340
a49f99ff 341 local_irq_restore(flags);
1da177e4 342 }
1da177e4 343
2bdfe0ba 344 if (status != NETDEV_TX_OK) {
5de4a473 345 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 346 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 347 }
1da177e4 348}
c2355e1a 349EXPORT_SYMBOL(netpoll_send_skb_on_dev);
1da177e4
LT
350
351void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
352{
353 int total_len, eth_len, ip_len, udp_len;
354 struct sk_buff *skb;
355 struct udphdr *udph;
356 struct iphdr *iph;
357 struct ethhdr *eth;
358
359 udp_len = len + sizeof(*udph);
360 ip_len = eth_len = udp_len + sizeof(*iph);
361 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
362
363 skb = find_skb(np, total_len, total_len - len);
364 if (!skb)
365 return;
366
27d7ff46 367 skb_copy_to_linear_data(skb, msg, len);
1da177e4
LT
368 skb->len += len;
369
4bedb452
ACM
370 skb_push(skb, sizeof(*udph));
371 skb_reset_transport_header(skb);
372 udph = udp_hdr(skb);
1da177e4
LT
373 udph->source = htons(np->local_port);
374 udph->dest = htons(np->remote_port);
375 udph->len = htons(udp_len);
376 udph->check = 0;
e7557af5
HH
377 udph->check = csum_tcpudp_magic(np->local_ip,
378 np->remote_ip,
8e365eec 379 udp_len, IPPROTO_UDP,
07f0757a 380 csum_partial(udph, udp_len, 0));
8e365eec 381 if (udph->check == 0)
5e57dff2 382 udph->check = CSUM_MANGLED_0;
1da177e4 383
e2d1bca7
ACM
384 skb_push(skb, sizeof(*iph));
385 skb_reset_network_header(skb);
eddc9ec5 386 iph = ip_hdr(skb);
1da177e4
LT
387
388 /* iph->version = 4; iph->ihl = 5; */
389 put_unaligned(0x45, (unsigned char *)iph);
390 iph->tos = 0;
391 put_unaligned(htons(ip_len), &(iph->tot_len));
392 iph->id = 0;
393 iph->frag_off = 0;
394 iph->ttl = 64;
395 iph->protocol = IPPROTO_UDP;
396 iph->check = 0;
e7557af5
HH
397 put_unaligned(np->local_ip, &(iph->saddr));
398 put_unaligned(np->remote_ip, &(iph->daddr));
1da177e4
LT
399 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
400
401 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
459a98ed 402 skb_reset_mac_header(skb);
206daaf7 403 skb->protocol = eth->h_proto = htons(ETH_P_IP);
09538641
SH
404 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
405 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
1da177e4
LT
406
407 skb->dev = np->dev;
408
409 netpoll_send_skb(np, skb);
410}
9e34a5b5 411EXPORT_SYMBOL(netpoll_send_udp);
1da177e4
LT
412
413static void arp_reply(struct sk_buff *skb)
414{
115c1d6e 415 struct netpoll_info *npinfo = skb->dev->npinfo;
1da177e4
LT
416 struct arphdr *arp;
417 unsigned char *arp_ptr;
418 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
252e3346 419 __be32 sip, tip;
47bbec02 420 unsigned char *sha;
1da177e4 421 struct sk_buff *send_skb;
508e14b4
DB
422 struct netpoll *np, *tmp;
423 unsigned long flags;
424 int hits = 0;
425
426 if (list_empty(&npinfo->rx_np))
427 return;
428
429 /* Before checking the packet, we do some early
430 inspection whether this is interesting at all */
431 spin_lock_irqsave(&npinfo->rx_lock, flags);
432 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
433 if (np->dev == skb->dev)
434 hits++;
435 }
436 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4 437
508e14b4
DB
438 /* No netpoll struct is using this dev */
439 if (!hits)
115c1d6e 440 return;
1da177e4
LT
441
442 /* No arp on this interface */
443 if (skb->dev->flags & IFF_NOARP)
444 return;
445
988b7050 446 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
1da177e4
LT
447 return;
448
c1d2bbe1 449 skb_reset_network_header(skb);
badff6d0 450 skb_reset_transport_header(skb);
d0a92be0 451 arp = arp_hdr(skb);
1da177e4
LT
452
453 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
454 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
455 arp->ar_pro != htons(ETH_P_IP) ||
456 arp->ar_op != htons(ARPOP_REQUEST))
457 return;
458
47bbec02
NH
459 arp_ptr = (unsigned char *)(arp+1);
460 /* save the location of the src hw addr */
461 sha = arp_ptr;
462 arp_ptr += skb->dev->addr_len;
1da177e4 463 memcpy(&sip, arp_ptr, 4);
47bbec02 464 arp_ptr += 4;
508e14b4
DB
465 /* If we actually cared about dst hw addr,
466 it would get copied here */
47bbec02 467 arp_ptr += skb->dev->addr_len;
1da177e4
LT
468 memcpy(&tip, arp_ptr, 4);
469
470 /* Should we ignore arp? */
508e14b4 471 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
1da177e4
LT
472 return;
473
988b7050 474 size = arp_hdr_len(skb->dev);
1da177e4 475
508e14b4
DB
476 spin_lock_irqsave(&npinfo->rx_lock, flags);
477 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
478 if (tip != np->local_ip)
479 continue;
1da177e4 480
508e14b4
DB
481 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
482 LL_RESERVED_SPACE(np->dev));
483 if (!send_skb)
484 continue;
1da177e4 485
508e14b4
DB
486 skb_reset_network_header(send_skb);
487 arp = (struct arphdr *) skb_put(send_skb, size);
488 send_skb->dev = skb->dev;
489 send_skb->protocol = htons(ETH_P_ARP);
1da177e4 490
508e14b4
DB
491 /* Fill the device header for the ARP frame */
492 if (dev_hard_header(send_skb, skb->dev, ptype,
493 sha, np->dev->dev_addr,
494 send_skb->len) < 0) {
495 kfree_skb(send_skb);
496 continue;
497 }
1da177e4 498
508e14b4
DB
499 /*
500 * Fill out the arp protocol part.
501 *
502 * we only support ethernet device type,
503 * which (according to RFC 1390) should
504 * always equal 1 (Ethernet).
505 */
1da177e4 506
508e14b4
DB
507 arp->ar_hrd = htons(np->dev->type);
508 arp->ar_pro = htons(ETH_P_IP);
509 arp->ar_hln = np->dev->addr_len;
510 arp->ar_pln = 4;
511 arp->ar_op = htons(type);
512
513 arp_ptr = (unsigned char *)(arp + 1);
514 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
515 arp_ptr += np->dev->addr_len;
516 memcpy(arp_ptr, &tip, 4);
517 arp_ptr += 4;
518 memcpy(arp_ptr, sha, np->dev->addr_len);
519 arp_ptr += np->dev->addr_len;
520 memcpy(arp_ptr, &sip, 4);
521
522 netpoll_send_skb(np, send_skb);
523
524 /* If there are several rx_hooks for the same address,
525 we're fine by sending a single reply */
526 break;
527 }
528 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4
LT
529}
530
531int __netpoll_rx(struct sk_buff *skb)
532{
533 int proto, len, ulen;
508e14b4 534 int hits = 0;
1da177e4
LT
535 struct iphdr *iph;
536 struct udphdr *uh;
508e14b4
DB
537 struct netpoll_info *npinfo = skb->dev->npinfo;
538 struct netpoll *np, *tmp;
068c6e98 539
508e14b4 540 if (list_empty(&npinfo->rx_np))
1da177e4 541 goto out;
508e14b4 542
1da177e4
LT
543 if (skb->dev->type != ARPHRD_ETHER)
544 goto out;
545
d9452e9f 546 /* check if netpoll clients need ARP */
724800d6 547 if (skb->protocol == htons(ETH_P_ARP) &&
1da177e4 548 atomic_read(&trapped)) {
508e14b4 549 skb_queue_tail(&npinfo->arp_tx, skb);
1da177e4
LT
550 return 1;
551 }
552
553 proto = ntohs(eth_hdr(skb)->h_proto);
554 if (proto != ETH_P_IP)
555 goto out;
556 if (skb->pkt_type == PACKET_OTHERHOST)
557 goto out;
558 if (skb_shared(skb))
559 goto out;
560
561 iph = (struct iphdr *)skb->data;
562 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
563 goto out;
564 if (iph->ihl < 5 || iph->version != 4)
565 goto out;
566 if (!pskb_may_pull(skb, iph->ihl*4))
567 goto out;
568 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
569 goto out;
570
571 len = ntohs(iph->tot_len);
572 if (skb->len < len || len < iph->ihl*4)
573 goto out;
574
5e7d7fa5
AL
575 /*
576 * Our transport medium may have padded the buffer out.
577 * Now We trim to the true length of the frame.
578 */
579 if (pskb_trim_rcsum(skb, len))
580 goto out;
581
1da177e4
LT
582 if (iph->protocol != IPPROTO_UDP)
583 goto out;
584
585 len -= iph->ihl*4;
586 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
587 ulen = ntohs(uh->len);
588
589 if (ulen != len)
590 goto out;
fb286bb2 591 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
1da177e4 592 goto out;
1da177e4 593
508e14b4
DB
594 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
595 if (np->local_ip && np->local_ip != iph->daddr)
596 continue;
597 if (np->remote_ip && np->remote_ip != iph->saddr)
598 continue;
599 if (np->local_port && np->local_port != ntohs(uh->dest))
600 continue;
601
602 np->rx_hook(np, ntohs(uh->source),
603 (char *)(uh+1),
604 ulen - sizeof(struct udphdr));
605 hits++;
606 }
607
608 if (!hits)
609 goto out;
1da177e4
LT
610
611 kfree_skb(skb);
612 return 1;
613
614out:
615 if (atomic_read(&trapped)) {
616 kfree_skb(skb);
617 return 1;
618 }
619
620 return 0;
621}
622
0bcc1816
SS
623void netpoll_print_options(struct netpoll *np)
624{
625 printk(KERN_INFO "%s: local port %d\n",
626 np->name, np->local_port);
e7557af5
HH
627 printk(KERN_INFO "%s: local IP %pI4\n",
628 np->name, &np->local_ip);
5fc05f87 629 printk(KERN_INFO "%s: interface '%s'\n",
0bcc1816
SS
630 np->name, np->dev_name);
631 printk(KERN_INFO "%s: remote port %d\n",
632 np->name, np->remote_port);
e7557af5
HH
633 printk(KERN_INFO "%s: remote IP %pI4\n",
634 np->name, &np->remote_ip);
e174961c
JB
635 printk(KERN_INFO "%s: remote ethernet address %pM\n",
636 np->name, np->remote_mac);
0bcc1816 637}
9e34a5b5 638EXPORT_SYMBOL(netpoll_print_options);
0bcc1816 639
1da177e4
LT
640int netpoll_parse_options(struct netpoll *np, char *opt)
641{
642 char *cur=opt, *delim;
643
c68b9070 644 if (*cur != '@') {
1da177e4
LT
645 if ((delim = strchr(cur, '@')) == NULL)
646 goto parse_failed;
c68b9070
DM
647 *delim = 0;
648 np->local_port = simple_strtol(cur, NULL, 10);
649 cur = delim;
1da177e4
LT
650 }
651 cur++;
1da177e4 652
c68b9070 653 if (*cur != '/') {
1da177e4
LT
654 if ((delim = strchr(cur, '/')) == NULL)
655 goto parse_failed;
c68b9070 656 *delim = 0;
e7557af5 657 np->local_ip = in_aton(cur);
c68b9070 658 cur = delim;
1da177e4
LT
659 }
660 cur++;
661
c68b9070 662 if (*cur != ',') {
1da177e4
LT
663 /* parse out dev name */
664 if ((delim = strchr(cur, ',')) == NULL)
665 goto parse_failed;
c68b9070 666 *delim = 0;
1da177e4 667 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 668 cur = delim;
1da177e4
LT
669 }
670 cur++;
671
c68b9070 672 if (*cur != '@') {
1da177e4
LT
673 /* dst port */
674 if ((delim = strchr(cur, '@')) == NULL)
675 goto parse_failed;
c68b9070 676 *delim = 0;
5fc05f87
AW
677 if (*cur == ' ' || *cur == '\t')
678 printk(KERN_INFO "%s: warning: whitespace"
679 "is not allowed\n", np->name);
c68b9070
DM
680 np->remote_port = simple_strtol(cur, NULL, 10);
681 cur = delim;
1da177e4
LT
682 }
683 cur++;
1da177e4
LT
684
685 /* dst ip */
686 if ((delim = strchr(cur, '/')) == NULL)
687 goto parse_failed;
c68b9070 688 *delim = 0;
e7557af5 689 np->remote_ip = in_aton(cur);
c68b9070 690 cur = delim + 1;
1da177e4 691
c68b9070 692 if (*cur != 0) {
1da177e4
LT
693 /* MAC address */
694 if ((delim = strchr(cur, ':')) == NULL)
695 goto parse_failed;
c68b9070
DM
696 *delim = 0;
697 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
698 cur = delim + 1;
1da177e4
LT
699 if ((delim = strchr(cur, ':')) == NULL)
700 goto parse_failed;
c68b9070
DM
701 *delim = 0;
702 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
703 cur = delim + 1;
1da177e4
LT
704 if ((delim = strchr(cur, ':')) == NULL)
705 goto parse_failed;
c68b9070
DM
706 *delim = 0;
707 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
708 cur = delim + 1;
1da177e4
LT
709 if ((delim = strchr(cur, ':')) == NULL)
710 goto parse_failed;
c68b9070
DM
711 *delim = 0;
712 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
713 cur = delim + 1;
1da177e4
LT
714 if ((delim = strchr(cur, ':')) == NULL)
715 goto parse_failed;
c68b9070
DM
716 *delim = 0;
717 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
718 cur = delim + 1;
719 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
1da177e4
LT
720 }
721
0bcc1816 722 netpoll_print_options(np);
1da177e4
LT
723
724 return 0;
725
726 parse_failed:
5fc05f87 727 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
1da177e4
LT
728 np->name, cur);
729 return -1;
730}
9e34a5b5 731EXPORT_SYMBOL(netpoll_parse_options);
1da177e4 732
8fdd95ec 733int __netpoll_setup(struct netpoll *np)
1da177e4 734{
8fdd95ec 735 struct net_device *ndev = np->dev;
115c1d6e 736 struct netpoll_info *npinfo;
4247e161 737 const struct net_device_ops *ops;
fbeec2e1 738 unsigned long flags;
b41848b6 739 int err;
1da177e4 740
8fdd95ec
HX
741 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
742 !ndev->netdev_ops->ndo_poll_controller) {
743 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
744 np->name, np->dev_name);
745 err = -ENOTSUPP;
746 goto out;
747 }
748
749 if (!ndev->npinfo) {
750 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
751 if (!npinfo) {
752 err = -ENOMEM;
753 goto out;
754 }
755
756 npinfo->rx_flags = 0;
757 INIT_LIST_HEAD(&npinfo->rx_np);
758
759 spin_lock_init(&npinfo->rx_lock);
760 skb_queue_head_init(&npinfo->arp_tx);
761 skb_queue_head_init(&npinfo->txq);
762 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
763
764 atomic_set(&npinfo->refcnt, 1);
765
766 ops = np->dev->netdev_ops;
767 if (ops->ndo_netpoll_setup) {
768 err = ops->ndo_netpoll_setup(ndev, npinfo);
769 if (err)
770 goto free_npinfo;
771 }
772 } else {
773 npinfo = ndev->npinfo;
774 atomic_inc(&npinfo->refcnt);
775 }
776
777 npinfo->netpoll = np;
778
779 if (np->rx_hook) {
780 spin_lock_irqsave(&npinfo->rx_lock, flags);
781 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
782 list_add_tail(&np->rx, &npinfo->rx_np);
783 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
784 }
785
786 /* last thing to do is link it to the net device structure */
787 rcu_assign_pointer(ndev->npinfo, npinfo);
8fdd95ec
HX
788
789 return 0;
790
791free_npinfo:
792 kfree(npinfo);
793out:
794 return err;
795}
796EXPORT_SYMBOL_GPL(__netpoll_setup);
797
798int netpoll_setup(struct netpoll *np)
799{
800 struct net_device *ndev = NULL;
801 struct in_device *in_dev;
802 int err;
803
1da177e4 804 if (np->dev_name)
881d966b 805 ndev = dev_get_by_name(&init_net, np->dev_name);
1da177e4
LT
806 if (!ndev) {
807 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
808 np->name, np->dev_name);
b41848b6 809 return -ENODEV;
1da177e4
LT
810 }
811
1da177e4
LT
812 if (!netif_running(ndev)) {
813 unsigned long atmost, atleast;
814
815 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
816 np->name, np->dev_name);
817
6756ae4b 818 rtnl_lock();
b41848b6
SH
819 err = dev_open(ndev);
820 rtnl_unlock();
821
822 if (err) {
1da177e4 823 printk(KERN_ERR "%s: failed to open %s\n",
b41848b6 824 np->name, ndev->name);
dbaa1541 825 goto put;
1da177e4 826 }
1da177e4
LT
827
828 atleast = jiffies + HZ/10;
bff38771 829 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
830 while (!netif_carrier_ok(ndev)) {
831 if (time_after(jiffies, atmost)) {
832 printk(KERN_NOTICE
833 "%s: timeout waiting for carrier\n",
834 np->name);
835 break;
836 }
1b614fb9 837 msleep(1);
1da177e4
LT
838 }
839
840 /* If carrier appears to come up instantly, we don't
841 * trust it and pause so that we don't pump all our
842 * queued console messages into the bitbucket.
843 */
844
845 if (time_before(jiffies, atleast)) {
846 printk(KERN_NOTICE "%s: carrier detect appears"
847 " untrustworthy, waiting 4 seconds\n",
848 np->name);
849 msleep(4000);
850 }
851 }
852
1da177e4
LT
853 if (!np->local_ip) {
854 rcu_read_lock();
e5ed6399 855 in_dev = __in_dev_get_rcu(ndev);
1da177e4
LT
856
857 if (!in_dev || !in_dev->ifa_list) {
858 rcu_read_unlock();
859 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
860 np->name, np->dev_name);
b41848b6 861 err = -EDESTADDRREQ;
dbaa1541 862 goto put;
1da177e4
LT
863 }
864
e7557af5 865 np->local_ip = in_dev->ifa_list->ifa_local;
1da177e4 866 rcu_read_unlock();
e7557af5 867 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
1da177e4
LT
868 }
869
dbaa1541
HX
870 np->dev = ndev;
871
872 /* fill up the skb queue */
873 refill_skbs();
874
875 rtnl_lock();
8fdd95ec 876 err = __netpoll_setup(np);
dbaa1541 877 rtnl_unlock();
53fb95d3 878
8fdd95ec
HX
879 if (err)
880 goto put;
881
1da177e4
LT
882 return 0;
883
21edbb22 884put:
1da177e4 885 dev_put(ndev);
b41848b6 886 return err;
1da177e4 887}
9e34a5b5 888EXPORT_SYMBOL(netpoll_setup);
1da177e4 889
c68b9070
DM
890static int __init netpoll_init(void)
891{
a1bcfacd
SH
892 skb_queue_head_init(&skb_pool);
893 return 0;
894}
895core_initcall(netpoll_init);
896
8fdd95ec 897void __netpoll_cleanup(struct netpoll *np)
1da177e4 898{
fbeec2e1
JM
899 struct netpoll_info *npinfo;
900 unsigned long flags;
901
8fdd95ec
HX
902 npinfo = np->dev->npinfo;
903 if (!npinfo)
dbaa1541 904 return;
93ec2c72 905
8fdd95ec
HX
906 if (!list_empty(&npinfo->rx_np)) {
907 spin_lock_irqsave(&npinfo->rx_lock, flags);
908 list_del(&np->rx);
909 if (list_empty(&npinfo->rx_np))
910 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
911 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
912 }
de85d99e 913
8fdd95ec
HX
914 if (atomic_dec_and_test(&npinfo->refcnt)) {
915 const struct net_device_ops *ops;
de85d99e 916
8fdd95ec
HX
917 ops = np->dev->netdev_ops;
918 if (ops->ndo_netpoll_cleanup)
919 ops->ndo_netpoll_cleanup(np->dev);
de85d99e 920
8fdd95ec 921 rcu_assign_pointer(np->dev->npinfo, NULL);
de85d99e 922
dbaa1541
HX
923 /* avoid racing with NAPI reading npinfo */
924 synchronize_rcu_bh();
93ec2c72 925
dbaa1541
HX
926 skb_queue_purge(&npinfo->arp_tx);
927 skb_queue_purge(&npinfo->txq);
928 cancel_rearming_delayed_work(&npinfo->tx_work);
93ec2c72 929
dbaa1541
HX
930 /* clean after last, unfinished work */
931 __skb_queue_purge(&npinfo->txq);
932 kfree(npinfo);
115c1d6e 933 }
8fdd95ec
HX
934}
935EXPORT_SYMBOL_GPL(__netpoll_cleanup);
fbeec2e1 936
8fdd95ec
HX
937void netpoll_cleanup(struct netpoll *np)
938{
939 if (!np->dev)
940 return;
dbaa1541 941
8fdd95ec
HX
942 rtnl_lock();
943 __netpoll_cleanup(np);
944 rtnl_unlock();
945
946 dev_put(np->dev);
1da177e4
LT
947 np->dev = NULL;
948}
9e34a5b5 949EXPORT_SYMBOL(netpoll_cleanup);
1da177e4
LT
950
951int netpoll_trap(void)
952{
953 return atomic_read(&trapped);
954}
9e34a5b5 955EXPORT_SYMBOL(netpoll_trap);
1da177e4
LT
956
957void netpoll_set_trap(int trap)
958{
959 if (trap)
960 atomic_inc(&trapped);
961 else
962 atomic_dec(&trapped);
963}
1da177e4 964EXPORT_SYMBOL(netpoll_set_trap);