]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/netpoll.h
netpoll retry cleanup
[net-next-2.6.git] / include / linux / netpoll.h
CommitLineData
1da177e4
LT
1/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
53fb95d3 12#include <linux/rcupdate.h>
1da177e4
LT
13#include <linux/list.h>
14
15struct netpoll;
16
17struct netpoll {
18 struct net_device *dev;
19 char dev_name[16], *name;
1da177e4
LT
20 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6];
115c1d6e
JM
25};
26
27struct netpoll_info {
93ec2c72 28 atomic_t refcnt;
1da177e4
LT
29 spinlock_t poll_lock;
30 int poll_owner;
115c1d6e 31 int rx_flags;
fbeec2e1
JM
32 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
068c6e98 34 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
b6cd27ed
SH
35 struct sk_buff_head txq;
36 struct work_struct tx_work;
1da177e4
LT
37};
38
39void netpoll_poll(struct netpoll *np);
40void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
41int netpoll_parse_options(struct netpoll *np, char *opt);
42int netpoll_setup(struct netpoll *np);
43int netpoll_trap(void);
44void netpoll_set_trap(int trap);
45void netpoll_cleanup(struct netpoll *np);
46int __netpoll_rx(struct sk_buff *skb);
47void netpoll_queue(struct sk_buff *skb);
48
49#ifdef CONFIG_NETPOLL
50static inline int netpoll_rx(struct sk_buff *skb)
51{
115c1d6e 52 struct netpoll_info *npinfo = skb->dev->npinfo;
fbeec2e1
JM
53 unsigned long flags;
54 int ret = 0;
115c1d6e 55
fbeec2e1 56 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
115c1d6e
JM
57 return 0;
58
fbeec2e1
JM
59 spin_lock_irqsave(&npinfo->rx_lock, flags);
60 /* check rx_flags again with the lock held */
61 if (npinfo->rx_flags && __netpoll_rx(skb))
62 ret = 1;
63 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
64
65 return ret;
1da177e4
LT
66}
67
53fb95d3 68static inline void *netpoll_poll_lock(struct net_device *dev)
1da177e4 69{
53fb95d3 70 rcu_read_lock(); /* deal with race on ->npinfo */
115c1d6e
JM
71 if (dev->npinfo) {
72 spin_lock(&dev->npinfo->poll_lock);
73 dev->npinfo->poll_owner = smp_processor_id();
53fb95d3 74 return dev->npinfo;
1da177e4 75 }
53fb95d3 76 return NULL;
1da177e4
LT
77}
78
53fb95d3 79static inline void netpoll_poll_unlock(void *have)
1da177e4 80{
53fb95d3
MM
81 struct netpoll_info *npi = have;
82
83 if (npi) {
84 npi->poll_owner = -1;
85 spin_unlock(&npi->poll_lock);
1da177e4 86 }
53fb95d3 87 rcu_read_unlock();
1da177e4
LT
88}
89
90#else
91#define netpoll_rx(a) 0
afb997c6 92#define netpoll_poll_lock(a) NULL
1da177e4
LT
93#define netpoll_poll_unlock(a)
94#endif
95
96#endif