]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/packet/af_packet.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
1da177e4
LT
43 *
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
48 *
49 */
1ce4f28b 50
1da177e4 51#include <linux/types.h>
1da177e4 52#include <linux/mm.h>
4fc268d2 53#include <linux/capability.h>
1da177e4
LT
54#include <linux/fcntl.h>
55#include <linux/socket.h>
56#include <linux/in.h>
57#include <linux/inet.h>
58#include <linux/netdevice.h>
59#include <linux/if_packet.h>
60#include <linux/wireless.h>
ffbc6111 61#include <linux/kernel.h>
1da177e4 62#include <linux/kmod.h>
5a0e3ad6 63#include <linux/slab.h>
457c4cbc 64#include <net/net_namespace.h>
1da177e4
LT
65#include <net/ip.h>
66#include <net/protocol.h>
67#include <linux/skbuff.h>
68#include <net/sock.h>
69#include <linux/errno.h>
70#include <linux/timer.h>
71#include <asm/system.h>
72#include <asm/uaccess.h>
73#include <asm/ioctls.h>
74#include <asm/page.h>
a1f8e7f7 75#include <asm/cacheflush.h>
1da177e4
LT
76#include <asm/io.h>
77#include <linux/proc_fs.h>
78#include <linux/seq_file.h>
79#include <linux/poll.h>
80#include <linux/module.h>
81#include <linux/init.h>
905db440 82#include <linux/mutex.h>
05423b24 83#include <linux/if_vlan.h>
bfd5f4a3 84#include <linux/virtio_net.h>
1da177e4
LT
85
86#ifdef CONFIG_INET
87#include <net/inet_common.h>
88#endif
89
1da177e4
LT
90/*
91 Assumptions:
92 - if device has no dev->hard_header routine, it adds and removes ll header
93 inside itself. In this case ll header is invisible outside of device,
94 but higher levels still should reserve dev->hard_header_len.
95 Some devices are enough clever to reallocate skb, when header
96 will not fit to reserved space (tunnel), another ones are silly
97 (PPP).
98 - packet socket receives packets with pulled ll header,
99 so that SOCK_RAW should push it back.
100
101On receive:
102-----------
103
104Incoming, dev->hard_header!=NULL
b0e380b1
ACM
105 mac_header -> ll header
106 data -> data
1da177e4
LT
107
108Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
109 mac_header -> ll header
110 data -> ll header
1da177e4
LT
111
112Incoming, dev->hard_header==NULL
b0e380b1
ACM
113 mac_header -> UNKNOWN position. It is very likely, that it points to ll
114 header. PPP makes it, that is wrong, because introduce
db0c58f9 115 assymetry between rx and tx paths.
b0e380b1 116 data -> data
1da177e4
LT
117
118Outgoing, dev->hard_header==NULL
b0e380b1
ACM
119 mac_header -> data. ll header is still not built!
120 data -> data
1da177e4
LT
121
122Resume
123 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
124
125
126On transmit:
127------------
128
129dev->hard_header != NULL
b0e380b1
ACM
130 mac_header -> ll header
131 data -> ll header
1da177e4
LT
132
133dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
134 mac_header -> data
135 data -> data
1da177e4
LT
136
137 We should set nh.raw on output to correct posistion,
138 packet classifier depends on it.
139 */
140
1da177e4
LT
141/* Private packet socket structures. */
142
40d4e3df 143struct packet_mclist {
1da177e4
LT
144 struct packet_mclist *next;
145 int ifindex;
146 int count;
147 unsigned short type;
148 unsigned short alen;
0fb375fb
EB
149 unsigned char addr[MAX_ADDR_LEN];
150};
151/* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
40d4e3df 154struct packet_mreq_max {
0fb375fb
EB
155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 159};
a2efcfa0 160
69e3c75f
JB
161static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
162 int closing, int tx_ring);
163
164struct packet_ring_buffer {
40d4e3df 165 char **pg_vec;
69e3c75f
JB
166 unsigned int head;
167 unsigned int frames_per_block;
168 unsigned int frame_size;
169 unsigned int frame_max;
170
171 unsigned int pg_vec_order;
172 unsigned int pg_vec_pages;
173 unsigned int pg_vec_len;
174
175 atomic_t pending;
176};
177
178struct packet_sock;
179static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
1da177e4
LT
180
181static void packet_flush_mclist(struct sock *sk);
182
183struct packet_sock {
184 /* struct sock has to be the first member of packet_sock */
185 struct sock sk;
186 struct tpacket_stats stats;
69e3c75f
JB
187 struct packet_ring_buffer rx_ring;
188 struct packet_ring_buffer tx_ring;
1da177e4 189 int copy_thresh;
1da177e4 190 spinlock_t bind_lock;
905db440 191 struct mutex pg_vec_lock;
8dc41944 192 unsigned int running:1, /* prot_hook is attached*/
80feaacb 193 auxdata:1,
bfd5f4a3
SS
194 origdev:1,
195 has_vnet_hdr:1;
1da177e4 196 int ifindex; /* bound device */
0e11c91e 197 __be16 num;
1da177e4 198 struct packet_mclist *mclist;
1da177e4 199 atomic_t mapped;
bbd6ef87
PM
200 enum tpacket_versions tp_version;
201 unsigned int tp_hdrlen;
8913336a 202 unsigned int tp_reserve;
69e3c75f 203 unsigned int tp_loss:1;
94b05952 204 struct packet_type prot_hook ____cacheline_aligned_in_smp;
1da177e4
LT
205};
206
ffbc6111
HX
207struct packet_skb_cb {
208 unsigned int origlen;
209 union {
210 struct sockaddr_pkt pkt;
211 struct sockaddr_ll ll;
212 } sa;
213};
214
215#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 216
69e3c75f 217static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 218{
bbd6ef87
PM
219 union {
220 struct tpacket_hdr *h1;
221 struct tpacket2_hdr *h2;
222 void *raw;
223 } h;
1da177e4 224
69e3c75f 225 h.raw = frame;
bbd6ef87
PM
226 switch (po->tp_version) {
227 case TPACKET_V1:
69e3c75f
JB
228 h.h1->tp_status = status;
229 flush_dcache_page(virt_to_page(&h.h1->tp_status));
bbd6ef87
PM
230 break;
231 case TPACKET_V2:
69e3c75f
JB
232 h.h2->tp_status = status;
233 flush_dcache_page(virt_to_page(&h.h2->tp_status));
bbd6ef87 234 break;
69e3c75f 235 default:
40d4e3df 236 pr_err("TPACKET version not supported\n");
69e3c75f 237 BUG();
bbd6ef87 238 }
69e3c75f
JB
239
240 smp_wmb();
bbd6ef87
PM
241}
242
69e3c75f 243static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87
PM
244{
245 union {
246 struct tpacket_hdr *h1;
247 struct tpacket2_hdr *h2;
248 void *raw;
249 } h;
250
69e3c75f
JB
251 smp_rmb();
252
bbd6ef87
PM
253 h.raw = frame;
254 switch (po->tp_version) {
255 case TPACKET_V1:
69e3c75f
JB
256 flush_dcache_page(virt_to_page(&h.h1->tp_status));
257 return h.h1->tp_status;
bbd6ef87 258 case TPACKET_V2:
69e3c75f
JB
259 flush_dcache_page(virt_to_page(&h.h2->tp_status));
260 return h.h2->tp_status;
261 default:
40d4e3df 262 pr_err("TPACKET version not supported\n");
69e3c75f
JB
263 BUG();
264 return 0;
bbd6ef87 265 }
1da177e4 266}
69e3c75f
JB
267
268static void *packet_lookup_frame(struct packet_sock *po,
269 struct packet_ring_buffer *rb,
270 unsigned int position,
271 int status)
272{
273 unsigned int pg_vec_pos, frame_offset;
274 union {
275 struct tpacket_hdr *h1;
276 struct tpacket2_hdr *h2;
277 void *raw;
278 } h;
279
280 pg_vec_pos = position / rb->frames_per_block;
281 frame_offset = position % rb->frames_per_block;
282
283 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
284
285 if (status != __packet_get_status(po, h.raw))
286 return NULL;
287
288 return h.raw;
289}
290
291static inline void *packet_current_frame(struct packet_sock *po,
292 struct packet_ring_buffer *rb,
293 int status)
294{
295 return packet_lookup_frame(po, rb, rb->head, status);
296}
297
298static inline void *packet_previous_frame(struct packet_sock *po,
299 struct packet_ring_buffer *rb,
300 int status)
301{
302 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
303 return packet_lookup_frame(po, rb, previous, status);
304}
305
306static inline void packet_increment_head(struct packet_ring_buffer *buff)
307{
308 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
309}
310
1da177e4
LT
311static inline struct packet_sock *pkt_sk(struct sock *sk)
312{
313 return (struct packet_sock *)sk;
314}
315
316static void packet_sock_destruct(struct sock *sk)
317{
547b792c
IJ
318 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
319 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
320
321 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 322 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
323 return;
324 }
325
17ab56a2 326 sk_refcnt_debug_dec(sk);
1da177e4
LT
327}
328
329
90ddc4f0 330static const struct proto_ops packet_ops;
1da177e4 331
90ddc4f0 332static const struct proto_ops packet_ops_spkt;
1da177e4 333
40d4e3df
ED
334static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
335 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
336{
337 struct sock *sk;
338 struct sockaddr_pkt *spkt;
339
340 /*
341 * When we registered the protocol we saved the socket in the data
342 * field for just this event.
343 */
344
345 sk = pt->af_packet_priv;
1ce4f28b 346
1da177e4
LT
347 /*
348 * Yank back the headers [hope the device set this
349 * right or kerboom...]
350 *
351 * Incoming packets have ll header pulled,
352 * push it back.
353 *
98e399f8 354 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
355 * so that this procedure is noop.
356 */
357
358 if (skb->pkt_type == PACKET_LOOPBACK)
359 goto out;
360
09ad9bc7 361 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
362 goto out;
363
40d4e3df
ED
364 skb = skb_share_check(skb, GFP_ATOMIC);
365 if (skb == NULL)
1da177e4
LT
366 goto oom;
367
368 /* drop any routing info */
adf30907 369 skb_dst_drop(skb);
1da177e4 370
84531c24
PO
371 /* drop conntrack reference */
372 nf_reset(skb);
373
ffbc6111 374 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 375
98e399f8 376 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
377
378 /*
379 * The SOCK_PACKET socket receives _all_ frames.
380 */
381
382 spkt->spkt_family = dev->type;
383 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
384 spkt->spkt_protocol = skb->protocol;
385
386 /*
387 * Charge the memory to the socket. This is done specifically
388 * to prevent sockets using all the memory up.
389 */
390
40d4e3df 391 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
392 return 0;
393
394out:
395 kfree_skb(skb);
396oom:
397 return 0;
398}
399
400
401/*
402 * Output a raw packet to a device layer. This bypasses all the other
403 * protocol layers and you must therefore supply it with a complete frame
404 */
1ce4f28b 405
1da177e4
LT
406static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
407 struct msghdr *msg, size_t len)
408{
409 struct sock *sk = sock->sk;
40d4e3df 410 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1a35ca80 411 struct sk_buff *skb = NULL;
1da177e4 412 struct net_device *dev;
40d4e3df 413 __be16 proto = 0;
1da177e4 414 int err;
1ce4f28b 415
1da177e4 416 /*
1ce4f28b 417 * Get and verify the address.
1da177e4
LT
418 */
419
40d4e3df 420 if (saddr) {
1da177e4 421 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
422 return -EINVAL;
423 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
424 proto = saddr->spkt_protocol;
425 } else
426 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
427
428 /*
1ce4f28b 429 * Find the device first to size check it
1da177e4
LT
430 */
431
432 saddr->spkt_device[13] = 0;
1a35ca80 433retry:
654d1f8a
ED
434 rcu_read_lock();
435 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
436 err = -ENODEV;
437 if (dev == NULL)
438 goto out_unlock;
1ce4f28b 439
d5e76b0a
DM
440 err = -ENETDOWN;
441 if (!(dev->flags & IFF_UP))
442 goto out_unlock;
443
1da177e4 444 /*
40d4e3df
ED
445 * You may not queue a frame bigger than the mtu. This is the lowest level
446 * raw protocol and you must do your own fragmentation at this level.
1da177e4 447 */
1ce4f28b 448
1da177e4 449 err = -EMSGSIZE;
8ae55f04 450 if (len > dev->mtu + dev->hard_header_len)
1da177e4
LT
451 goto out_unlock;
452
1a35ca80
ED
453 if (!skb) {
454 size_t reserved = LL_RESERVED_SPACE(dev);
455 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
456
457 rcu_read_unlock();
458 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
459 if (skb == NULL)
460 return -ENOBUFS;
461 /* FIXME: Save some space for broken drivers that write a hard
462 * header at transmission time by themselves. PPP is the notable
463 * one here. This should really be fixed at the driver level.
464 */
465 skb_reserve(skb, reserved);
466 skb_reset_network_header(skb);
467
468 /* Try to align data part correctly */
469 if (hhlen) {
470 skb->data -= hhlen;
471 skb->tail -= hhlen;
472 if (len < hhlen)
473 skb_reset_network_header(skb);
474 }
475 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
476 if (err)
477 goto out_free;
478 goto retry;
1da177e4
LT
479 }
480
1a35ca80 481
1da177e4
LT
482 skb->protocol = proto;
483 skb->dev = dev;
484 skb->priority = sk->sk_priority;
2d37a186 485 skb->mark = sk->sk_mark;
1da177e4
LT
486
487 dev_queue_xmit(skb);
654d1f8a 488 rcu_read_unlock();
40d4e3df 489 return len;
1da177e4 490
1da177e4 491out_unlock:
654d1f8a 492 rcu_read_unlock();
1a35ca80
ED
493out_free:
494 kfree_skb(skb);
1da177e4
LT
495 return err;
496}
1da177e4 497
dbcb5855
DM
498static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
499 unsigned int res)
1da177e4
LT
500{
501 struct sk_filter *filter;
fda9ef5d
DM
502
503 rcu_read_lock_bh();
a898def2 504 filter = rcu_dereference_bh(sk->sk_filter);
dbcb5855
DM
505 if (filter != NULL)
506 res = sk_run_filter(skb, filter->insns, filter->len);
fda9ef5d 507 rcu_read_unlock_bh();
1da177e4 508
dbcb5855 509 return res;
1da177e4
LT
510}
511
512/*
513 This function makes lazy skb cloning in hope that most of packets
514 are discarded by BPF.
515
516 Note tricky part: we DO mangle shared skb! skb->data, skb->len
517 and skb->cb are mangled. It works because (and until) packets
518 falling here are owned by current CPU. Output packets are cloned
519 by dev_queue_xmit_nit(), input packets are processed by net_bh
520 sequencially, so that if we return skb to original state on exit,
521 we will not harm anyone.
522 */
523
40d4e3df
ED
524static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
525 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
526{
527 struct sock *sk;
528 struct sockaddr_ll *sll;
529 struct packet_sock *po;
40d4e3df 530 u8 *skb_head = skb->data;
1da177e4 531 int skb_len = skb->len;
dbcb5855 532 unsigned int snaplen, res;
1da177e4
LT
533
534 if (skb->pkt_type == PACKET_LOOPBACK)
535 goto drop;
536
537 sk = pt->af_packet_priv;
538 po = pkt_sk(sk);
539
09ad9bc7 540 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
541 goto drop;
542
1da177e4
LT
543 skb->dev = dev;
544
3b04ddde 545 if (dev->header_ops) {
1da177e4
LT
546 /* The device has an explicit notion of ll header,
547 exported to higher levels.
548
549 Otherwise, the device hides datails of it frame
550 structure, so that corresponding packet head
551 never delivered to user.
552 */
553 if (sk->sk_type != SOCK_DGRAM)
98e399f8 554 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
555 else if (skb->pkt_type == PACKET_OUTGOING) {
556 /* Special case: outgoing packets have ll header at head */
bbe735e4 557 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
558 }
559 }
560
561 snaplen = skb->len;
562
dbcb5855
DM
563 res = run_filter(skb, sk, snaplen);
564 if (!res)
fda9ef5d 565 goto drop_n_restore;
dbcb5855
DM
566 if (snaplen > res)
567 snaplen = res;
1da177e4
LT
568
569 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
570 (unsigned)sk->sk_rcvbuf)
571 goto drop_n_acct;
572
573 if (skb_shared(skb)) {
574 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
575 if (nskb == NULL)
576 goto drop_n_acct;
577
578 if (skb_head != skb->data) {
579 skb->data = skb_head;
580 skb->len = skb_len;
581 }
582 kfree_skb(skb);
583 skb = nskb;
584 }
585
ffbc6111
HX
586 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
587 sizeof(skb->cb));
588
589 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4
LT
590 sll->sll_family = AF_PACKET;
591 sll->sll_hatype = dev->type;
592 sll->sll_protocol = skb->protocol;
593 sll->sll_pkttype = skb->pkt_type;
8032b464 594 if (unlikely(po->origdev))
80feaacb
PWJ
595 sll->sll_ifindex = orig_dev->ifindex;
596 else
597 sll->sll_ifindex = dev->ifindex;
1da177e4 598
b95cce35 599 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 600
ffbc6111 601 PACKET_SKB_CB(skb)->origlen = skb->len;
8dc41944 602
1da177e4
LT
603 if (pskb_trim(skb, snaplen))
604 goto drop_n_acct;
605
606 skb_set_owner_r(skb, sk);
607 skb->dev = NULL;
adf30907 608 skb_dst_drop(skb);
1da177e4 609
84531c24
PO
610 /* drop conntrack reference */
611 nf_reset(skb);
612
1da177e4
LT
613 spin_lock(&sk->sk_receive_queue.lock);
614 po->stats.tp_packets++;
3b885787 615 skb->dropcount = atomic_read(&sk->sk_drops);
1da177e4
LT
616 __skb_queue_tail(&sk->sk_receive_queue, skb);
617 spin_unlock(&sk->sk_receive_queue.lock);
618 sk->sk_data_ready(sk, skb->len);
619 return 0;
620
621drop_n_acct:
3b885787 622 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
1da177e4
LT
623
624drop_n_restore:
625 if (skb_head != skb->data && skb_shared(skb)) {
626 skb->data = skb_head;
627 skb->len = skb_len;
628 }
629drop:
ead2ceb0 630 consume_skb(skb);
1da177e4
LT
631 return 0;
632}
633
40d4e3df
ED
634static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
635 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
636{
637 struct sock *sk;
638 struct packet_sock *po;
639 struct sockaddr_ll *sll;
bbd6ef87
PM
640 union {
641 struct tpacket_hdr *h1;
642 struct tpacket2_hdr *h2;
643 void *raw;
644 } h;
40d4e3df 645 u8 *skb_head = skb->data;
1da177e4 646 int skb_len = skb->len;
dbcb5855 647 unsigned int snaplen, res;
1da177e4 648 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
bbd6ef87 649 unsigned short macoff, netoff, hdrlen;
1da177e4 650 struct sk_buff *copy_skb = NULL;
b7aa0bf7 651 struct timeval tv;
bbd6ef87 652 struct timespec ts;
1da177e4
LT
653
654 if (skb->pkt_type == PACKET_LOOPBACK)
655 goto drop;
656
657 sk = pt->af_packet_priv;
658 po = pkt_sk(sk);
659
09ad9bc7 660 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
661 goto drop;
662
3b04ddde 663 if (dev->header_ops) {
1da177e4 664 if (sk->sk_type != SOCK_DGRAM)
98e399f8 665 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
666 else if (skb->pkt_type == PACKET_OUTGOING) {
667 /* Special case: outgoing packets have ll header at head */
bbe735e4 668 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
669 }
670 }
671
8dc41944
HX
672 if (skb->ip_summed == CHECKSUM_PARTIAL)
673 status |= TP_STATUS_CSUMNOTREADY;
674
1da177e4
LT
675 snaplen = skb->len;
676
dbcb5855
DM
677 res = run_filter(skb, sk, snaplen);
678 if (!res)
fda9ef5d 679 goto drop_n_restore;
dbcb5855
DM
680 if (snaplen > res)
681 snaplen = res;
1da177e4
LT
682
683 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
684 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
685 po->tp_reserve;
1da177e4 686 } else {
bbe735e4 687 unsigned maclen = skb_network_offset(skb);
bbd6ef87 688 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a
PM
689 (maclen < 16 ? 16 : maclen)) +
690 po->tp_reserve;
1da177e4
LT
691 macoff = netoff - maclen;
692 }
693
69e3c75f 694 if (macoff + snaplen > po->rx_ring.frame_size) {
1da177e4
LT
695 if (po->copy_thresh &&
696 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
697 (unsigned)sk->sk_rcvbuf) {
698 if (skb_shared(skb)) {
699 copy_skb = skb_clone(skb, GFP_ATOMIC);
700 } else {
701 copy_skb = skb_get(skb);
702 skb_head = skb->data;
703 }
704 if (copy_skb)
705 skb_set_owner_r(copy_skb, sk);
706 }
69e3c75f 707 snaplen = po->rx_ring.frame_size - macoff;
1da177e4
LT
708 if ((int)snaplen < 0)
709 snaplen = 0;
710 }
1da177e4
LT
711
712 spin_lock(&sk->sk_receive_queue.lock);
69e3c75f 713 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
bbd6ef87 714 if (!h.raw)
1da177e4 715 goto ring_is_full;
69e3c75f 716 packet_increment_head(&po->rx_ring);
1da177e4
LT
717 po->stats.tp_packets++;
718 if (copy_skb) {
719 status |= TP_STATUS_COPY;
720 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
721 }
722 if (!po->stats.tp_drops)
723 status &= ~TP_STATUS_LOSING;
724 spin_unlock(&sk->sk_receive_queue.lock);
725
bbd6ef87 726 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1da177e4 727
bbd6ef87
PM
728 switch (po->tp_version) {
729 case TPACKET_V1:
730 h.h1->tp_len = skb->len;
731 h.h1->tp_snaplen = snaplen;
732 h.h1->tp_mac = macoff;
733 h.h1->tp_net = netoff;
734 if (skb->tstamp.tv64)
735 tv = ktime_to_timeval(skb->tstamp);
736 else
737 do_gettimeofday(&tv);
738 h.h1->tp_sec = tv.tv_sec;
739 h.h1->tp_usec = tv.tv_usec;
740 hdrlen = sizeof(*h.h1);
741 break;
742 case TPACKET_V2:
743 h.h2->tp_len = skb->len;
744 h.h2->tp_snaplen = snaplen;
745 h.h2->tp_mac = macoff;
746 h.h2->tp_net = netoff;
747 if (skb->tstamp.tv64)
748 ts = ktime_to_timespec(skb->tstamp);
749 else
750 getnstimeofday(&ts);
751 h.h2->tp_sec = ts.tv_sec;
752 h.h2->tp_nsec = ts.tv_nsec;
05423b24 753 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
bbd6ef87
PM
754 hdrlen = sizeof(*h.h2);
755 break;
756 default:
757 BUG();
758 }
1da177e4 759
bbd6ef87 760 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 761 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
762 sll->sll_family = AF_PACKET;
763 sll->sll_hatype = dev->type;
764 sll->sll_protocol = skb->protocol;
765 sll->sll_pkttype = skb->pkt_type;
8032b464 766 if (unlikely(po->origdev))
80feaacb
PWJ
767 sll->sll_ifindex = orig_dev->ifindex;
768 else
769 sll->sll_ifindex = dev->ifindex;
1da177e4 770
bbd6ef87 771 __packet_set_status(po, h.raw, status);
e16aa207 772 smp_mb();
1da177e4
LT
773 {
774 struct page *p_start, *p_end;
bbd6ef87 775 u8 *h_end = h.raw + macoff + snaplen - 1;
1da177e4 776
bbd6ef87 777 p_start = virt_to_page(h.raw);
1da177e4
LT
778 p_end = virt_to_page(h_end);
779 while (p_start <= p_end) {
780 flush_dcache_page(p_start);
781 p_start++;
782 }
783 }
784
785 sk->sk_data_ready(sk, 0);
786
787drop_n_restore:
788 if (skb_head != skb->data && skb_shared(skb)) {
789 skb->data = skb_head;
790 skb->len = skb_len;
791 }
792drop:
1ce4f28b 793 kfree_skb(skb);
1da177e4
LT
794 return 0;
795
796ring_is_full:
797 po->stats.tp_drops++;
798 spin_unlock(&sk->sk_receive_queue.lock);
799
800 sk->sk_data_ready(sk, 0);
acb5d75b 801 kfree_skb(copy_skb);
1da177e4
LT
802 goto drop_n_restore;
803}
804
69e3c75f
JB
805static void tpacket_destruct_skb(struct sk_buff *skb)
806{
807 struct packet_sock *po = pkt_sk(skb->sk);
40d4e3df 808 void *ph;
1da177e4 809
69e3c75f 810 BUG_ON(skb == NULL);
1da177e4 811
69e3c75f
JB
812 if (likely(po->tx_ring.pg_vec)) {
813 ph = skb_shinfo(skb)->destructor_arg;
814 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
815 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
816 atomic_dec(&po->tx_ring.pending);
817 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
818 }
819
820 sock_wfree(skb);
821}
822
40d4e3df
ED
823static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
824 void *frame, struct net_device *dev, int size_max,
825 __be16 proto, unsigned char *addr)
69e3c75f
JB
826{
827 union {
828 struct tpacket_hdr *h1;
829 struct tpacket2_hdr *h2;
830 void *raw;
831 } ph;
832 int to_write, offset, len, tp_len, nr_frags, len_max;
833 struct socket *sock = po->sk.sk_socket;
834 struct page *page;
835 void *data;
836 int err;
837
838 ph.raw = frame;
839
840 skb->protocol = proto;
841 skb->dev = dev;
842 skb->priority = po->sk.sk_priority;
2d37a186 843 skb->mark = po->sk.sk_mark;
69e3c75f
JB
844 skb_shinfo(skb)->destructor_arg = ph.raw;
845
846 switch (po->tp_version) {
847 case TPACKET_V2:
848 tp_len = ph.h2->tp_len;
849 break;
850 default:
851 tp_len = ph.h1->tp_len;
852 break;
853 }
854 if (unlikely(tp_len > size_max)) {
40d4e3df 855 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
69e3c75f
JB
856 return -EMSGSIZE;
857 }
858
859 skb_reserve(skb, LL_RESERVED_SPACE(dev));
860 skb_reset_network_header(skb);
861
862 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
863 to_write = tp_len;
864
865 if (sock->type == SOCK_DGRAM) {
866 err = dev_hard_header(skb, dev, ntohs(proto), addr,
867 NULL, tp_len);
868 if (unlikely(err < 0))
869 return -EINVAL;
40d4e3df 870 } else if (dev->hard_header_len) {
69e3c75f
JB
871 /* net device doesn't like empty head */
872 if (unlikely(tp_len <= dev->hard_header_len)) {
40d4e3df
ED
873 pr_err("packet size is too short (%d < %d)\n",
874 tp_len, dev->hard_header_len);
69e3c75f
JB
875 return -EINVAL;
876 }
877
878 skb_push(skb, dev->hard_header_len);
879 err = skb_store_bits(skb, 0, data,
880 dev->hard_header_len);
881 if (unlikely(err))
882 return err;
883
884 data += dev->hard_header_len;
885 to_write -= dev->hard_header_len;
886 }
887
888 err = -EFAULT;
889 page = virt_to_page(data);
890 offset = offset_in_page(data);
891 len_max = PAGE_SIZE - offset;
892 len = ((to_write > len_max) ? len_max : to_write);
893
894 skb->data_len = to_write;
895 skb->len += to_write;
896 skb->truesize += to_write;
897 atomic_add(to_write, &po->sk.sk_wmem_alloc);
898
899 while (likely(to_write)) {
900 nr_frags = skb_shinfo(skb)->nr_frags;
901
902 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
903 pr_err("Packet exceed the number of skb frags(%lu)\n",
904 MAX_SKB_FRAGS);
69e3c75f
JB
905 return -EFAULT;
906 }
907
908 flush_dcache_page(page);
909 get_page(page);
910 skb_fill_page_desc(skb,
911 nr_frags,
912 page++, offset, len);
913 to_write -= len;
914 offset = 0;
915 len_max = PAGE_SIZE;
916 len = ((to_write > len_max) ? len_max : to_write);
917 }
918
919 return tp_len;
920}
921
922static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
923{
924 struct socket *sock;
925 struct sk_buff *skb;
926 struct net_device *dev;
927 __be16 proto;
928 int ifindex, err, reserve = 0;
40d4e3df
ED
929 void *ph;
930 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
69e3c75f
JB
931 int tp_len, size_max;
932 unsigned char *addr;
933 int len_sum = 0;
934 int status = 0;
935
936 sock = po->sk.sk_socket;
937
938 mutex_lock(&po->pg_vec_lock);
939
940 err = -EBUSY;
941 if (saddr == NULL) {
942 ifindex = po->ifindex;
943 proto = po->num;
944 addr = NULL;
945 } else {
946 err = -EINVAL;
947 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
948 goto out;
949 if (msg->msg_namelen < (saddr->sll_halen
950 + offsetof(struct sockaddr_ll,
951 sll_addr)))
952 goto out;
953 ifindex = saddr->sll_ifindex;
954 proto = saddr->sll_protocol;
955 addr = saddr->sll_addr;
956 }
957
958 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
959 err = -ENXIO;
960 if (unlikely(dev == NULL))
961 goto out;
962
963 reserve = dev->hard_header_len;
964
965 err = -ENETDOWN;
966 if (unlikely(!(dev->flags & IFF_UP)))
967 goto out_put;
968
969 size_max = po->tx_ring.frame_size
b5dd884e 970 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f
JB
971
972 if (size_max > dev->mtu + reserve)
973 size_max = dev->mtu + reserve;
974
975 do {
976 ph = packet_current_frame(po, &po->tx_ring,
977 TP_STATUS_SEND_REQUEST);
978
979 if (unlikely(ph == NULL)) {
980 schedule();
981 continue;
982 }
983
984 status = TP_STATUS_SEND_REQUEST;
985 skb = sock_alloc_send_skb(&po->sk,
986 LL_ALLOCATED_SPACE(dev)
987 + sizeof(struct sockaddr_ll),
988 0, &err);
989
990 if (unlikely(skb == NULL))
991 goto out_status;
992
993 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
994 addr);
995
996 if (unlikely(tp_len < 0)) {
997 if (po->tp_loss) {
998 __packet_set_status(po, ph,
999 TP_STATUS_AVAILABLE);
1000 packet_increment_head(&po->tx_ring);
1001 kfree_skb(skb);
1002 continue;
1003 } else {
1004 status = TP_STATUS_WRONG_FORMAT;
1005 err = tp_len;
1006 goto out_status;
1007 }
1008 }
1009
1010 skb->destructor = tpacket_destruct_skb;
1011 __packet_set_status(po, ph, TP_STATUS_SENDING);
1012 atomic_inc(&po->tx_ring.pending);
1013
1014 status = TP_STATUS_SEND_REQUEST;
1015 err = dev_queue_xmit(skb);
eb70df13
JP
1016 if (unlikely(err > 0)) {
1017 err = net_xmit_errno(err);
1018 if (err && __packet_get_status(po, ph) ==
1019 TP_STATUS_AVAILABLE) {
1020 /* skb was destructed already */
1021 skb = NULL;
1022 goto out_status;
1023 }
1024 /*
1025 * skb was dropped but not destructed yet;
1026 * let's treat it like congestion or err < 0
1027 */
1028 err = 0;
1029 }
69e3c75f
JB
1030 packet_increment_head(&po->tx_ring);
1031 len_sum += tp_len;
f64f9e71
JP
1032 } while (likely((ph != NULL) ||
1033 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1034 (atomic_read(&po->tx_ring.pending))))
1035 );
69e3c75f
JB
1036
1037 err = len_sum;
1038 goto out_put;
1039
69e3c75f
JB
1040out_status:
1041 __packet_set_status(po, ph, status);
1042 kfree_skb(skb);
1043out_put:
1044 dev_put(dev);
1045out:
1046 mutex_unlock(&po->pg_vec_lock);
1047 return err;
1048}
69e3c75f 1049
bfd5f4a3
SS
1050static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1051 size_t reserve, size_t len,
1052 size_t linear, int noblock,
1053 int *err)
1054{
1055 struct sk_buff *skb;
1056
1057 /* Under a page? Don't bother with paged skb. */
1058 if (prepad + len < PAGE_SIZE || !linear)
1059 linear = len;
1060
1061 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1062 err);
1063 if (!skb)
1064 return NULL;
1065
1066 skb_reserve(skb, reserve);
1067 skb_put(skb, linear);
1068 skb->data_len = len - linear;
1069 skb->len += len - linear;
1070
1071 return skb;
1072}
1073
69e3c75f 1074static int packet_snd(struct socket *sock,
1da177e4
LT
1075 struct msghdr *msg, size_t len)
1076{
1077 struct sock *sk = sock->sk;
40d4e3df 1078 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1da177e4
LT
1079 struct sk_buff *skb;
1080 struct net_device *dev;
0e11c91e 1081 __be16 proto;
1da177e4
LT
1082 unsigned char *addr;
1083 int ifindex, err, reserve = 0;
bfd5f4a3
SS
1084 struct virtio_net_hdr vnet_hdr = { 0 };
1085 int offset = 0;
1086 int vnet_hdr_len;
1087 struct packet_sock *po = pkt_sk(sk);
1088 unsigned short gso_type = 0;
1da177e4
LT
1089
1090 /*
1ce4f28b 1091 * Get and verify the address.
1da177e4 1092 */
1ce4f28b 1093
1da177e4 1094 if (saddr == NULL) {
1da177e4
LT
1095 ifindex = po->ifindex;
1096 proto = po->num;
1097 addr = NULL;
1098 } else {
1099 err = -EINVAL;
1100 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1101 goto out;
0fb375fb
EB
1102 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1103 goto out;
1da177e4
LT
1104 ifindex = saddr->sll_ifindex;
1105 proto = saddr->sll_protocol;
1106 addr = saddr->sll_addr;
1107 }
1108
1109
3b1e0a65 1110 dev = dev_get_by_index(sock_net(sk), ifindex);
1da177e4
LT
1111 err = -ENXIO;
1112 if (dev == NULL)
1113 goto out_unlock;
1114 if (sock->type == SOCK_RAW)
1115 reserve = dev->hard_header_len;
1116
d5e76b0a
DM
1117 err = -ENETDOWN;
1118 if (!(dev->flags & IFF_UP))
1119 goto out_unlock;
1120
bfd5f4a3
SS
1121 if (po->has_vnet_hdr) {
1122 vnet_hdr_len = sizeof(vnet_hdr);
1123
1124 err = -EINVAL;
1125 if (len < vnet_hdr_len)
1126 goto out_unlock;
1127
1128 len -= vnet_hdr_len;
1129
1130 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1131 vnet_hdr_len);
1132 if (err < 0)
1133 goto out_unlock;
1134
1135 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1136 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1137 vnet_hdr.hdr_len))
1138 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1139 vnet_hdr.csum_offset + 2;
1140
1141 err = -EINVAL;
1142 if (vnet_hdr.hdr_len > len)
1143 goto out_unlock;
1144
1145 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1146 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1147 case VIRTIO_NET_HDR_GSO_TCPV4:
1148 gso_type = SKB_GSO_TCPV4;
1149 break;
1150 case VIRTIO_NET_HDR_GSO_TCPV6:
1151 gso_type = SKB_GSO_TCPV6;
1152 break;
1153 case VIRTIO_NET_HDR_GSO_UDP:
1154 gso_type = SKB_GSO_UDP;
1155 break;
1156 default:
1157 goto out_unlock;
1158 }
1159
1160 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1161 gso_type |= SKB_GSO_TCP_ECN;
1162
1163 if (vnet_hdr.gso_size == 0)
1164 goto out_unlock;
1165
1166 }
1167 }
1168
1da177e4 1169 err = -EMSGSIZE;
bfd5f4a3 1170 if (!gso_type && (len > dev->mtu+reserve))
1da177e4
LT
1171 goto out_unlock;
1172
bfd5f4a3
SS
1173 err = -ENOBUFS;
1174 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1175 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1176 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 1177 if (skb == NULL)
1da177e4
LT
1178 goto out_unlock;
1179
bfd5f4a3 1180 skb_set_network_header(skb, reserve);
1da177e4 1181
0c4e8581
SH
1182 err = -EINVAL;
1183 if (sock->type == SOCK_DGRAM &&
bfd5f4a3 1184 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
0c4e8581 1185 goto out_free;
1da177e4
LT
1186
1187 /* Returns -EFAULT on error */
bfd5f4a3 1188 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1da177e4
LT
1189 if (err)
1190 goto out_free;
1191
1192 skb->protocol = proto;
1193 skb->dev = dev;
1194 skb->priority = sk->sk_priority;
2d37a186 1195 skb->mark = sk->sk_mark;
1da177e4 1196
bfd5f4a3
SS
1197 if (po->has_vnet_hdr) {
1198 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1199 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1200 vnet_hdr.csum_offset)) {
1201 err = -EINVAL;
1202 goto out_free;
1203 }
1204 }
1205
1206 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1207 skb_shinfo(skb)->gso_type = gso_type;
1208
1209 /* Header must be checked, and gso_segs computed. */
1210 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1211 skb_shinfo(skb)->gso_segs = 0;
1212
1213 len += vnet_hdr_len;
1214 }
1215
1da177e4
LT
1216 /*
1217 * Now send it
1218 */
1219
1220 err = dev_queue_xmit(skb);
1221 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1222 goto out_unlock;
1223
1224 dev_put(dev);
1225
40d4e3df 1226 return len;
1da177e4
LT
1227
1228out_free:
1229 kfree_skb(skb);
1230out_unlock:
1231 if (dev)
1232 dev_put(dev);
1233out:
1234 return err;
1235}
1236
69e3c75f
JB
1237static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1238 struct msghdr *msg, size_t len)
1239{
69e3c75f
JB
1240 struct sock *sk = sock->sk;
1241 struct packet_sock *po = pkt_sk(sk);
1242 if (po->tx_ring.pg_vec)
1243 return tpacket_snd(po, msg);
1244 else
69e3c75f
JB
1245 return packet_snd(sock, msg, len);
1246}
1247
1da177e4
LT
1248/*
1249 * Close a PACKET socket. This is fairly simple. We immediately go
1250 * to 'closed' state and remove our protocol entry in the device list.
1251 */
1252
1253static int packet_release(struct socket *sock)
1254{
1255 struct sock *sk = sock->sk;
1256 struct packet_sock *po;
d12d01d6 1257 struct net *net;
69e3c75f 1258 struct tpacket_req req;
1da177e4
LT
1259
1260 if (!sk)
1261 return 0;
1262
3b1e0a65 1263 net = sock_net(sk);
1da177e4
LT
1264 po = pkt_sk(sk);
1265
808f5114 1266 spin_lock_bh(&net->packet.sklist_lock);
1267 sk_del_node_init_rcu(sk);
920de804 1268 sock_prot_inuse_add(net, sk->sk_prot, -1);
808f5114 1269 spin_unlock_bh(&net->packet.sklist_lock);
1da177e4 1270
808f5114 1271 spin_lock(&po->bind_lock);
1da177e4
LT
1272 if (po->running) {
1273 /*
808f5114 1274 * Remove from protocol table
1da177e4 1275 */
1da177e4
LT
1276 po->running = 0;
1277 po->num = 0;
808f5114 1278 __dev_remove_pack(&po->prot_hook);
1da177e4
LT
1279 __sock_put(sk);
1280 }
808f5114 1281 spin_unlock(&po->bind_lock);
1da177e4 1282
1da177e4 1283 packet_flush_mclist(sk);
1da177e4 1284
69e3c75f
JB
1285 memset(&req, 0, sizeof(req));
1286
1287 if (po->rx_ring.pg_vec)
1288 packet_set_ring(sk, &req, 1, 0);
1289
1290 if (po->tx_ring.pg_vec)
1291 packet_set_ring(sk, &req, 1, 1);
1da177e4 1292
808f5114 1293 synchronize_net();
1da177e4
LT
1294 /*
1295 * Now the socket is dead. No more input will appear.
1296 */
1da177e4
LT
1297 sock_orphan(sk);
1298 sock->sk = NULL;
1299
1300 /* Purge queues */
1301
1302 skb_queue_purge(&sk->sk_receive_queue);
17ab56a2 1303 sk_refcnt_debug_release(sk);
1da177e4
LT
1304
1305 sock_put(sk);
1306 return 0;
1307}
1308
1309/*
1310 * Attach a packet hook.
1311 */
1312
0e11c91e 1313static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1da177e4
LT
1314{
1315 struct packet_sock *po = pkt_sk(sk);
1316 /*
1317 * Detach an existing hook if present.
1318 */
1319
1320 lock_sock(sk);
1321
1322 spin_lock(&po->bind_lock);
1323 if (po->running) {
1324 __sock_put(sk);
1325 po->running = 0;
1326 po->num = 0;
1327 spin_unlock(&po->bind_lock);
1328 dev_remove_pack(&po->prot_hook);
1329 spin_lock(&po->bind_lock);
1330 }
1331
1332 po->num = protocol;
1333 po->prot_hook.type = protocol;
1334 po->prot_hook.dev = dev;
1335
1336 po->ifindex = dev ? dev->ifindex : 0;
1337
1338 if (protocol == 0)
1339 goto out_unlock;
1340
be85d4ad 1341 if (!dev || (dev->flags & IFF_UP)) {
1da177e4
LT
1342 dev_add_pack(&po->prot_hook);
1343 sock_hold(sk);
1344 po->running = 1;
be85d4ad
UT
1345 } else {
1346 sk->sk_err = ENETDOWN;
1347 if (!sock_flag(sk, SOCK_DEAD))
1348 sk->sk_error_report(sk);
1da177e4
LT
1349 }
1350
1351out_unlock:
1352 spin_unlock(&po->bind_lock);
1353 release_sock(sk);
1354 return 0;
1355}
1356
1357/*
1358 * Bind a packet socket to a device
1359 */
1360
40d4e3df
ED
1361static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1362 int addr_len)
1da177e4 1363{
40d4e3df 1364 struct sock *sk = sock->sk;
1da177e4
LT
1365 char name[15];
1366 struct net_device *dev;
1367 int err = -ENODEV;
1ce4f28b 1368
1da177e4
LT
1369 /*
1370 * Check legality
1371 */
1ce4f28b 1372
8ae55f04 1373 if (addr_len != sizeof(struct sockaddr))
1da177e4 1374 return -EINVAL;
40d4e3df 1375 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 1376
3b1e0a65 1377 dev = dev_get_by_name(sock_net(sk), name);
1da177e4
LT
1378 if (dev) {
1379 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1380 dev_put(dev);
1381 }
1382 return err;
1383}
1da177e4
LT
1384
1385static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1386{
40d4e3df
ED
1387 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1388 struct sock *sk = sock->sk;
1da177e4
LT
1389 struct net_device *dev = NULL;
1390 int err;
1391
1392
1393 /*
1394 * Check legality
1395 */
1ce4f28b 1396
1da177e4
LT
1397 if (addr_len < sizeof(struct sockaddr_ll))
1398 return -EINVAL;
1399 if (sll->sll_family != AF_PACKET)
1400 return -EINVAL;
1401
1402 if (sll->sll_ifindex) {
1403 err = -ENODEV;
3b1e0a65 1404 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1da177e4
LT
1405 if (dev == NULL)
1406 goto out;
1407 }
1408 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1409 if (dev)
1410 dev_put(dev);
1411
1412out:
1413 return err;
1414}
1415
1416static struct proto packet_proto = {
1417 .name = "PACKET",
1418 .owner = THIS_MODULE,
1419 .obj_size = sizeof(struct packet_sock),
1420};
1421
1422/*
1ce4f28b 1423 * Create a packet of type SOCK_PACKET.
1da177e4
LT
1424 */
1425
3f378b68
EP
1426static int packet_create(struct net *net, struct socket *sock, int protocol,
1427 int kern)
1da177e4
LT
1428{
1429 struct sock *sk;
1430 struct packet_sock *po;
0e11c91e 1431 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
1432 int err;
1433
1434 if (!capable(CAP_NET_RAW))
1435 return -EPERM;
be02097c
DM
1436 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1437 sock->type != SOCK_PACKET)
1da177e4
LT
1438 return -ESOCKTNOSUPPORT;
1439
1440 sock->state = SS_UNCONNECTED;
1441
1442 err = -ENOBUFS;
6257ff21 1443 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1da177e4
LT
1444 if (sk == NULL)
1445 goto out;
1446
1447 sock->ops = &packet_ops;
1da177e4
LT
1448 if (sock->type == SOCK_PACKET)
1449 sock->ops = &packet_ops_spkt;
be02097c 1450
1da177e4
LT
1451 sock_init_data(sock, sk);
1452
1453 po = pkt_sk(sk);
1454 sk->sk_family = PF_PACKET;
0e11c91e 1455 po->num = proto;
1da177e4
LT
1456
1457 sk->sk_destruct = packet_sock_destruct;
17ab56a2 1458 sk_refcnt_debug_inc(sk);
1da177e4
LT
1459
1460 /*
1461 * Attach a protocol block
1462 */
1463
1464 spin_lock_init(&po->bind_lock);
905db440 1465 mutex_init(&po->pg_vec_lock);
1da177e4 1466 po->prot_hook.func = packet_rcv;
be02097c 1467
1da177e4
LT
1468 if (sock->type == SOCK_PACKET)
1469 po->prot_hook.func = packet_rcv_spkt;
be02097c 1470
1da177e4
LT
1471 po->prot_hook.af_packet_priv = sk;
1472
0e11c91e
AV
1473 if (proto) {
1474 po->prot_hook.type = proto;
1da177e4
LT
1475 dev_add_pack(&po->prot_hook);
1476 sock_hold(sk);
1477 po->running = 1;
1478 }
1479
808f5114 1480 spin_lock_bh(&net->packet.sklist_lock);
1481 sk_add_node_rcu(sk, &net->packet.sklist);
3680453c 1482 sock_prot_inuse_add(net, &packet_proto, 1);
808f5114 1483 spin_unlock_bh(&net->packet.sklist_lock);
1484
40d4e3df 1485 return 0;
1da177e4
LT
1486out:
1487 return err;
1488}
1489
1490/*
1491 * Pull a packet from our receive queue and hand it to the user.
1492 * If necessary we block.
1493 */
1494
1495static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1496 struct msghdr *msg, size_t len, int flags)
1497{
1498 struct sock *sk = sock->sk;
1499 struct sk_buff *skb;
1500 int copied, err;
0fb375fb 1501 struct sockaddr_ll *sll;
bfd5f4a3 1502 int vnet_hdr_len = 0;
1da177e4
LT
1503
1504 err = -EINVAL;
1505 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1506 goto out;
1507
1508#if 0
1509 /* What error should we return now? EUNATTACH? */
1510 if (pkt_sk(sk)->ifindex < 0)
1511 return -ENODEV;
1512#endif
1513
1da177e4
LT
1514 /*
1515 * Call the generic datagram receiver. This handles all sorts
1516 * of horrible races and re-entrancy so we can forget about it
1517 * in the protocol layers.
1518 *
1519 * Now it will return ENETDOWN, if device have just gone down,
1520 * but then it will block.
1521 */
1522
40d4e3df 1523 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
1524
1525 /*
1ce4f28b 1526 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
1527 * handles the blocking we don't see and worry about blocking
1528 * retries.
1529 */
1530
8ae55f04 1531 if (skb == NULL)
1da177e4
LT
1532 goto out;
1533
bfd5f4a3
SS
1534 if (pkt_sk(sk)->has_vnet_hdr) {
1535 struct virtio_net_hdr vnet_hdr = { 0 };
1536
1537 err = -EINVAL;
1538 vnet_hdr_len = sizeof(vnet_hdr);
1539 if ((len -= vnet_hdr_len) < 0)
1540 goto out_free;
1541
1542 if (skb_is_gso(skb)) {
1543 struct skb_shared_info *sinfo = skb_shinfo(skb);
1544
1545 /* This is a hint as to how much should be linear. */
1546 vnet_hdr.hdr_len = skb_headlen(skb);
1547 vnet_hdr.gso_size = sinfo->gso_size;
1548 if (sinfo->gso_type & SKB_GSO_TCPV4)
1549 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1550 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1551 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1552 else if (sinfo->gso_type & SKB_GSO_UDP)
1553 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1554 else if (sinfo->gso_type & SKB_GSO_FCOE)
1555 goto out_free;
1556 else
1557 BUG();
1558 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1559 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1560 } else
1561 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1562
1563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1564 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1565 vnet_hdr.csum_start = skb->csum_start -
1566 skb_headroom(skb);
1567 vnet_hdr.csum_offset = skb->csum_offset;
1568 } /* else everything is zero */
1569
1570 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1571 vnet_hdr_len);
1572 if (err < 0)
1573 goto out_free;
1574 }
1575
0fb375fb
EB
1576 /*
1577 * If the address length field is there to be filled in, we fill
1578 * it in now.
1579 */
1580
ffbc6111 1581 sll = &PACKET_SKB_CB(skb)->sa.ll;
0fb375fb
EB
1582 if (sock->type == SOCK_PACKET)
1583 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1584 else
1585 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1586
1da177e4
LT
1587 /*
1588 * You lose any data beyond the buffer you gave. If it worries a
1589 * user program they can ask the device for its MTU anyway.
1590 */
1591
1592 copied = skb->len;
40d4e3df
ED
1593 if (copied > len) {
1594 copied = len;
1595 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
1596 }
1597
1598 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1599 if (err)
1600 goto out_free;
1601
3b885787 1602 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4
LT
1603
1604 if (msg->msg_name)
ffbc6111
HX
1605 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1606 msg->msg_namelen);
1da177e4 1607
8dc41944 1608 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
1609 struct tpacket_auxdata aux;
1610
1611 aux.tp_status = TP_STATUS_USER;
1612 if (skb->ip_summed == CHECKSUM_PARTIAL)
1613 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1614 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1615 aux.tp_snaplen = skb->len;
1616 aux.tp_mac = 0;
bbe735e4 1617 aux.tp_net = skb_network_offset(skb);
05423b24 1618 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
ffbc6111
HX
1619
1620 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
1621 }
1622
1da177e4
LT
1623 /*
1624 * Free or return the buffer as appropriate. Again this
1625 * hides all the races and re-entrancy issues from us.
1626 */
bfd5f4a3 1627 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
1628
1629out_free:
1630 skb_free_datagram(sk, skb);
1631out:
1632 return err;
1633}
1634
1da177e4
LT
1635static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1636 int *uaddr_len, int peer)
1637{
1638 struct net_device *dev;
1639 struct sock *sk = sock->sk;
1640
1641 if (peer)
1642 return -EOPNOTSUPP;
1643
1644 uaddr->sa_family = AF_PACKET;
654d1f8a
ED
1645 rcu_read_lock();
1646 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1647 if (dev)
1da177e4 1648 strlcpy(uaddr->sa_data, dev->name, 15);
654d1f8a 1649 else
1da177e4 1650 memset(uaddr->sa_data, 0, 14);
654d1f8a 1651 rcu_read_unlock();
1da177e4
LT
1652 *uaddr_len = sizeof(*uaddr);
1653
1654 return 0;
1655}
1da177e4
LT
1656
1657static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1658 int *uaddr_len, int peer)
1659{
1660 struct net_device *dev;
1661 struct sock *sk = sock->sk;
1662 struct packet_sock *po = pkt_sk(sk);
13cfa97b 1663 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
1664
1665 if (peer)
1666 return -EOPNOTSUPP;
1667
1668 sll->sll_family = AF_PACKET;
1669 sll->sll_ifindex = po->ifindex;
1670 sll->sll_protocol = po->num;
654d1f8a
ED
1671 rcu_read_lock();
1672 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
1673 if (dev) {
1674 sll->sll_hatype = dev->type;
1675 sll->sll_halen = dev->addr_len;
1676 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1677 } else {
1678 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1679 sll->sll_halen = 0;
1680 }
654d1f8a 1681 rcu_read_unlock();
0fb375fb 1682 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
1683
1684 return 0;
1685}
1686
2aeb0b88
WC
1687static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1688 int what)
1da177e4
LT
1689{
1690 switch (i->type) {
1691 case PACKET_MR_MULTICAST:
1162563f
JP
1692 if (i->alen != dev->addr_len)
1693 return -EINVAL;
1da177e4 1694 if (what > 0)
d95ed927 1695 return dev_mc_add(dev, i->addr, i->alen, 0);
1da177e4 1696 else
d95ed927 1697 return dev_mc_delete(dev, i->addr, i->alen, 0);
1da177e4
LT
1698 break;
1699 case PACKET_MR_PROMISC:
2aeb0b88 1700 return dev_set_promiscuity(dev, what);
1da177e4
LT
1701 break;
1702 case PACKET_MR_ALLMULTI:
2aeb0b88 1703 return dev_set_allmulti(dev, what);
1da177e4 1704 break;
d95ed927 1705 case PACKET_MR_UNICAST:
1162563f
JP
1706 if (i->alen != dev->addr_len)
1707 return -EINVAL;
d95ed927 1708 if (what > 0)
ccffad25 1709 return dev_unicast_add(dev, i->addr);
d95ed927 1710 else
ccffad25 1711 return dev_unicast_delete(dev, i->addr);
d95ed927 1712 break;
40d4e3df
ED
1713 default:
1714 break;
1da177e4 1715 }
2aeb0b88 1716 return 0;
1da177e4
LT
1717}
1718
1719static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1720{
40d4e3df 1721 for ( ; i; i = i->next) {
1da177e4
LT
1722 if (i->ifindex == dev->ifindex)
1723 packet_dev_mc(dev, i, what);
1724 }
1725}
1726
0fb375fb 1727static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
1728{
1729 struct packet_sock *po = pkt_sk(sk);
1730 struct packet_mclist *ml, *i;
1731 struct net_device *dev;
1732 int err;
1733
1734 rtnl_lock();
1735
1736 err = -ENODEV;
3b1e0a65 1737 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
1738 if (!dev)
1739 goto done;
1740
1741 err = -EINVAL;
1162563f 1742 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
1743 goto done;
1744
1745 err = -ENOBUFS;
8b3a7005 1746 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
1747 if (i == NULL)
1748 goto done;
1749
1750 err = 0;
1751 for (ml = po->mclist; ml; ml = ml->next) {
1752 if (ml->ifindex == mreq->mr_ifindex &&
1753 ml->type == mreq->mr_type &&
1754 ml->alen == mreq->mr_alen &&
1755 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1756 ml->count++;
1757 /* Free the new element ... */
1758 kfree(i);
1759 goto done;
1760 }
1761 }
1762
1763 i->type = mreq->mr_type;
1764 i->ifindex = mreq->mr_ifindex;
1765 i->alen = mreq->mr_alen;
1766 memcpy(i->addr, mreq->mr_address, i->alen);
1767 i->count = 1;
1768 i->next = po->mclist;
1769 po->mclist = i;
2aeb0b88
WC
1770 err = packet_dev_mc(dev, i, 1);
1771 if (err) {
1772 po->mclist = i->next;
1773 kfree(i);
1774 }
1da177e4
LT
1775
1776done:
1777 rtnl_unlock();
1778 return err;
1779}
1780
0fb375fb 1781static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
1782{
1783 struct packet_mclist *ml, **mlp;
1784
1785 rtnl_lock();
1786
1787 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1788 if (ml->ifindex == mreq->mr_ifindex &&
1789 ml->type == mreq->mr_type &&
1790 ml->alen == mreq->mr_alen &&
1791 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1792 if (--ml->count == 0) {
1793 struct net_device *dev;
1794 *mlp = ml->next;
ad959e76
ED
1795 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1796 if (dev)
1da177e4 1797 packet_dev_mc(dev, ml, -1);
1da177e4
LT
1798 kfree(ml);
1799 }
1800 rtnl_unlock();
1801 return 0;
1802 }
1803 }
1804 rtnl_unlock();
1805 return -EADDRNOTAVAIL;
1806}
1807
1808static void packet_flush_mclist(struct sock *sk)
1809{
1810 struct packet_sock *po = pkt_sk(sk);
1811 struct packet_mclist *ml;
1812
1813 if (!po->mclist)
1814 return;
1815
1816 rtnl_lock();
1817 while ((ml = po->mclist) != NULL) {
1818 struct net_device *dev;
1819
1820 po->mclist = ml->next;
ad959e76
ED
1821 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1822 if (dev != NULL)
1da177e4 1823 packet_dev_mc(dev, ml, -1);
1da177e4
LT
1824 kfree(ml);
1825 }
1826 rtnl_unlock();
1827}
1da177e4
LT
1828
1829static int
b7058842 1830packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
1831{
1832 struct sock *sk = sock->sk;
8dc41944 1833 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
1834 int ret;
1835
1836 if (level != SOL_PACKET)
1837 return -ENOPROTOOPT;
1838
69e3c75f 1839 switch (optname) {
1ce4f28b 1840 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
1841 case PACKET_DROP_MEMBERSHIP:
1842 {
0fb375fb
EB
1843 struct packet_mreq_max mreq;
1844 int len = optlen;
1845 memset(&mreq, 0, sizeof(mreq));
1846 if (len < sizeof(struct packet_mreq))
1da177e4 1847 return -EINVAL;
0fb375fb
EB
1848 if (len > sizeof(mreq))
1849 len = sizeof(mreq);
40d4e3df 1850 if (copy_from_user(&mreq, optval, len))
1da177e4 1851 return -EFAULT;
0fb375fb
EB
1852 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1853 return -EINVAL;
1da177e4
LT
1854 if (optname == PACKET_ADD_MEMBERSHIP)
1855 ret = packet_mc_add(sk, &mreq);
1856 else
1857 ret = packet_mc_drop(sk, &mreq);
1858 return ret;
1859 }
a2efcfa0 1860
1da177e4 1861 case PACKET_RX_RING:
69e3c75f 1862 case PACKET_TX_RING:
1da177e4
LT
1863 {
1864 struct tpacket_req req;
1865
40d4e3df 1866 if (optlen < sizeof(req))
1da177e4 1867 return -EINVAL;
bfd5f4a3
SS
1868 if (pkt_sk(sk)->has_vnet_hdr)
1869 return -EINVAL;
40d4e3df 1870 if (copy_from_user(&req, optval, sizeof(req)))
1da177e4 1871 return -EFAULT;
69e3c75f 1872 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1da177e4
LT
1873 }
1874 case PACKET_COPY_THRESH:
1875 {
1876 int val;
1877
40d4e3df 1878 if (optlen != sizeof(val))
1da177e4 1879 return -EINVAL;
40d4e3df 1880 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
1881 return -EFAULT;
1882
1883 pkt_sk(sk)->copy_thresh = val;
1884 return 0;
1885 }
bbd6ef87
PM
1886 case PACKET_VERSION:
1887 {
1888 int val;
1889
1890 if (optlen != sizeof(val))
1891 return -EINVAL;
69e3c75f 1892 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
1893 return -EBUSY;
1894 if (copy_from_user(&val, optval, sizeof(val)))
1895 return -EFAULT;
1896 switch (val) {
1897 case TPACKET_V1:
1898 case TPACKET_V2:
1899 po->tp_version = val;
1900 return 0;
1901 default:
1902 return -EINVAL;
1903 }
1904 }
8913336a
PM
1905 case PACKET_RESERVE:
1906 {
1907 unsigned int val;
1908
1909 if (optlen != sizeof(val))
1910 return -EINVAL;
69e3c75f 1911 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
1912 return -EBUSY;
1913 if (copy_from_user(&val, optval, sizeof(val)))
1914 return -EFAULT;
1915 po->tp_reserve = val;
1916 return 0;
1917 }
69e3c75f
JB
1918 case PACKET_LOSS:
1919 {
1920 unsigned int val;
1921
1922 if (optlen != sizeof(val))
1923 return -EINVAL;
1924 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1925 return -EBUSY;
1926 if (copy_from_user(&val, optval, sizeof(val)))
1927 return -EFAULT;
1928 po->tp_loss = !!val;
1929 return 0;
1930 }
8dc41944
HX
1931 case PACKET_AUXDATA:
1932 {
1933 int val;
1934
1935 if (optlen < sizeof(val))
1936 return -EINVAL;
1937 if (copy_from_user(&val, optval, sizeof(val)))
1938 return -EFAULT;
1939
1940 po->auxdata = !!val;
1941 return 0;
1942 }
80feaacb
PWJ
1943 case PACKET_ORIGDEV:
1944 {
1945 int val;
1946
1947 if (optlen < sizeof(val))
1948 return -EINVAL;
1949 if (copy_from_user(&val, optval, sizeof(val)))
1950 return -EFAULT;
1951
1952 po->origdev = !!val;
1953 return 0;
1954 }
bfd5f4a3
SS
1955 case PACKET_VNET_HDR:
1956 {
1957 int val;
1958
1959 if (sock->type != SOCK_RAW)
1960 return -EINVAL;
1961 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1962 return -EBUSY;
1963 if (optlen < sizeof(val))
1964 return -EINVAL;
1965 if (copy_from_user(&val, optval, sizeof(val)))
1966 return -EFAULT;
1967
1968 po->has_vnet_hdr = !!val;
1969 return 0;
1970 }
1da177e4
LT
1971 default:
1972 return -ENOPROTOOPT;
1973 }
1974}
1975
1976static int packet_getsockopt(struct socket *sock, int level, int optname,
1977 char __user *optval, int __user *optlen)
1978{
1979 int len;
8dc41944 1980 int val;
1da177e4
LT
1981 struct sock *sk = sock->sk;
1982 struct packet_sock *po = pkt_sk(sk);
8dc41944
HX
1983 void *data;
1984 struct tpacket_stats st;
1da177e4
LT
1985
1986 if (level != SOL_PACKET)
1987 return -ENOPROTOOPT;
1988
8ae55f04
KK
1989 if (get_user(len, optlen))
1990 return -EFAULT;
1da177e4
LT
1991
1992 if (len < 0)
1993 return -EINVAL;
1ce4f28b 1994
69e3c75f 1995 switch (optname) {
1da177e4 1996 case PACKET_STATISTICS:
1da177e4
LT
1997 if (len > sizeof(struct tpacket_stats))
1998 len = sizeof(struct tpacket_stats);
1999 spin_lock_bh(&sk->sk_receive_queue.lock);
2000 st = po->stats;
2001 memset(&po->stats, 0, sizeof(st));
2002 spin_unlock_bh(&sk->sk_receive_queue.lock);
2003 st.tp_packets += st.tp_drops;
2004
8dc41944
HX
2005 data = &st;
2006 break;
2007 case PACKET_AUXDATA:
2008 if (len > sizeof(int))
2009 len = sizeof(int);
2010 val = po->auxdata;
2011
80feaacb
PWJ
2012 data = &val;
2013 break;
2014 case PACKET_ORIGDEV:
2015 if (len > sizeof(int))
2016 len = sizeof(int);
2017 val = po->origdev;
2018
bfd5f4a3
SS
2019 data = &val;
2020 break;
2021 case PACKET_VNET_HDR:
2022 if (len > sizeof(int))
2023 len = sizeof(int);
2024 val = po->has_vnet_hdr;
2025
8dc41944 2026 data = &val;
1da177e4 2027 break;
bbd6ef87
PM
2028 case PACKET_VERSION:
2029 if (len > sizeof(int))
2030 len = sizeof(int);
2031 val = po->tp_version;
2032 data = &val;
2033 break;
2034 case PACKET_HDRLEN:
2035 if (len > sizeof(int))
2036 len = sizeof(int);
2037 if (copy_from_user(&val, optval, len))
2038 return -EFAULT;
2039 switch (val) {
2040 case TPACKET_V1:
2041 val = sizeof(struct tpacket_hdr);
2042 break;
2043 case TPACKET_V2:
2044 val = sizeof(struct tpacket2_hdr);
2045 break;
2046 default:
2047 return -EINVAL;
2048 }
2049 data = &val;
2050 break;
8913336a
PM
2051 case PACKET_RESERVE:
2052 if (len > sizeof(unsigned int))
2053 len = sizeof(unsigned int);
2054 val = po->tp_reserve;
2055 data = &val;
2056 break;
69e3c75f
JB
2057 case PACKET_LOSS:
2058 if (len > sizeof(unsigned int))
2059 len = sizeof(unsigned int);
2060 val = po->tp_loss;
2061 data = &val;
2062 break;
1da177e4
LT
2063 default:
2064 return -ENOPROTOOPT;
2065 }
2066
8ae55f04
KK
2067 if (put_user(len, optlen))
2068 return -EFAULT;
8dc41944
HX
2069 if (copy_to_user(optval, data, len))
2070 return -EFAULT;
8ae55f04 2071 return 0;
1da177e4
LT
2072}
2073
2074
2075static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2076{
2077 struct sock *sk;
2078 struct hlist_node *node;
ad930650 2079 struct net_device *dev = data;
c346dca1 2080 struct net *net = dev_net(dev);
1da177e4 2081
808f5114 2082 rcu_read_lock();
2083 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1da177e4
LT
2084 struct packet_sock *po = pkt_sk(sk);
2085
2086 switch (msg) {
2087 case NETDEV_UNREGISTER:
1da177e4
LT
2088 if (po->mclist)
2089 packet_dev_mclist(dev, po->mclist, -1);
a2efcfa0
DM
2090 /* fallthrough */
2091
1da177e4
LT
2092 case NETDEV_DOWN:
2093 if (dev->ifindex == po->ifindex) {
2094 spin_lock(&po->bind_lock);
2095 if (po->running) {
2096 __dev_remove_pack(&po->prot_hook);
2097 __sock_put(sk);
2098 po->running = 0;
2099 sk->sk_err = ENETDOWN;
2100 if (!sock_flag(sk, SOCK_DEAD))
2101 sk->sk_error_report(sk);
2102 }
2103 if (msg == NETDEV_UNREGISTER) {
2104 po->ifindex = -1;
2105 po->prot_hook.dev = NULL;
2106 }
2107 spin_unlock(&po->bind_lock);
2108 }
2109 break;
2110 case NETDEV_UP:
808f5114 2111 if (dev->ifindex == po->ifindex) {
2112 spin_lock(&po->bind_lock);
2113 if (po->num && !po->running) {
2114 dev_add_pack(&po->prot_hook);
2115 sock_hold(sk);
2116 po->running = 1;
2117 }
2118 spin_unlock(&po->bind_lock);
1da177e4 2119 }
1da177e4
LT
2120 break;
2121 }
2122 }
808f5114 2123 rcu_read_unlock();
1da177e4
LT
2124 return NOTIFY_DONE;
2125}
2126
2127
2128static int packet_ioctl(struct socket *sock, unsigned int cmd,
2129 unsigned long arg)
2130{
2131 struct sock *sk = sock->sk;
2132
69e3c75f 2133 switch (cmd) {
40d4e3df
ED
2134 case SIOCOUTQ:
2135 {
2136 int amount = sk_wmem_alloc_get(sk);
31e6d363 2137
40d4e3df
ED
2138 return put_user(amount, (int __user *)arg);
2139 }
2140 case SIOCINQ:
2141 {
2142 struct sk_buff *skb;
2143 int amount = 0;
2144
2145 spin_lock_bh(&sk->sk_receive_queue.lock);
2146 skb = skb_peek(&sk->sk_receive_queue);
2147 if (skb)
2148 amount = skb->len;
2149 spin_unlock_bh(&sk->sk_receive_queue.lock);
2150 return put_user(amount, (int __user *)arg);
2151 }
2152 case SIOCGSTAMP:
2153 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2154 case SIOCGSTAMPNS:
2155 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 2156
1da177e4 2157#ifdef CONFIG_INET
40d4e3df
ED
2158 case SIOCADDRT:
2159 case SIOCDELRT:
2160 case SIOCDARP:
2161 case SIOCGARP:
2162 case SIOCSARP:
2163 case SIOCGIFADDR:
2164 case SIOCSIFADDR:
2165 case SIOCGIFBRDADDR:
2166 case SIOCSIFBRDADDR:
2167 case SIOCGIFNETMASK:
2168 case SIOCSIFNETMASK:
2169 case SIOCGIFDSTADDR:
2170 case SIOCSIFDSTADDR:
2171 case SIOCSIFFLAGS:
2172 if (!net_eq(sock_net(sk), &init_net))
2173 return -ENOIOCTLCMD;
2174 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
2175#endif
2176
40d4e3df
ED
2177 default:
2178 return -ENOIOCTLCMD;
1da177e4
LT
2179 }
2180 return 0;
2181}
2182
40d4e3df 2183static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
2184 poll_table *wait)
2185{
2186 struct sock *sk = sock->sk;
2187 struct packet_sock *po = pkt_sk(sk);
2188 unsigned int mask = datagram_poll(file, sock, wait);
2189
2190 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
2191 if (po->rx_ring.pg_vec) {
2192 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
1da177e4
LT
2193 mask |= POLLIN | POLLRDNORM;
2194 }
2195 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
2196 spin_lock_bh(&sk->sk_write_queue.lock);
2197 if (po->tx_ring.pg_vec) {
2198 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2199 mask |= POLLOUT | POLLWRNORM;
2200 }
2201 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
2202 return mask;
2203}
2204
2205
2206/* Dirty? Well, I still did not learn better way to account
2207 * for user mmaps.
2208 */
2209
2210static void packet_mm_open(struct vm_area_struct *vma)
2211{
2212 struct file *file = vma->vm_file;
40d4e3df 2213 struct socket *sock = file->private_data;
1da177e4 2214 struct sock *sk = sock->sk;
1ce4f28b 2215
1da177e4
LT
2216 if (sk)
2217 atomic_inc(&pkt_sk(sk)->mapped);
2218}
2219
2220static void packet_mm_close(struct vm_area_struct *vma)
2221{
2222 struct file *file = vma->vm_file;
40d4e3df 2223 struct socket *sock = file->private_data;
1da177e4 2224 struct sock *sk = sock->sk;
1ce4f28b 2225
1da177e4
LT
2226 if (sk)
2227 atomic_dec(&pkt_sk(sk)->mapped);
2228}
2229
f0f37e2f 2230static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
2231 .open = packet_mm_open,
2232 .close = packet_mm_close,
1da177e4
LT
2233};
2234
4ebf0ae2 2235static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1da177e4
LT
2236{
2237 int i;
2238
4ebf0ae2
DM
2239 for (i = 0; i < len; i++) {
2240 if (likely(pg_vec[i]))
2241 free_pages((unsigned long) pg_vec[i], order);
1da177e4
LT
2242 }
2243 kfree(pg_vec);
2244}
2245
4ebf0ae2
DM
2246static inline char *alloc_one_pg_vec_page(unsigned long order)
2247{
719bfeaa
ED
2248 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2249
2250 return (char *) __get_free_pages(gfp_flags, order);
4ebf0ae2
DM
2251}
2252
2253static char **alloc_pg_vec(struct tpacket_req *req, int order)
2254{
2255 unsigned int block_nr = req->tp_block_nr;
2256 char **pg_vec;
2257 int i;
2258
2259 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2260 if (unlikely(!pg_vec))
2261 goto out;
2262
2263 for (i = 0; i < block_nr; i++) {
2264 pg_vec[i] = alloc_one_pg_vec_page(order);
2265 if (unlikely(!pg_vec[i]))
2266 goto out_free_pgvec;
2267 }
2268
2269out:
2270 return pg_vec;
2271
2272out_free_pgvec:
2273 free_pg_vec(pg_vec, order, block_nr);
2274 pg_vec = NULL;
2275 goto out;
2276}
1da177e4 2277
69e3c75f
JB
2278static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2279 int closing, int tx_ring)
1da177e4
LT
2280{
2281 char **pg_vec = NULL;
2282 struct packet_sock *po = pkt_sk(sk);
0e11c91e 2283 int was_running, order = 0;
69e3c75f
JB
2284 struct packet_ring_buffer *rb;
2285 struct sk_buff_head *rb_queue;
0e11c91e 2286 __be16 num;
69e3c75f 2287 int err;
1ce4f28b 2288
69e3c75f
JB
2289 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2290 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 2291
69e3c75f
JB
2292 err = -EBUSY;
2293 if (!closing) {
2294 if (atomic_read(&po->mapped))
2295 goto out;
2296 if (atomic_read(&rb->pending))
2297 goto out;
2298 }
1da177e4 2299
69e3c75f
JB
2300 if (req->tp_block_nr) {
2301 /* Sanity tests and some calculations */
2302 err = -EBUSY;
2303 if (unlikely(rb->pg_vec))
2304 goto out;
1da177e4 2305
bbd6ef87
PM
2306 switch (po->tp_version) {
2307 case TPACKET_V1:
2308 po->tp_hdrlen = TPACKET_HDRLEN;
2309 break;
2310 case TPACKET_V2:
2311 po->tp_hdrlen = TPACKET2_HDRLEN;
2312 break;
2313 }
2314
69e3c75f 2315 err = -EINVAL;
4ebf0ae2 2316 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 2317 goto out;
4ebf0ae2 2318 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
69e3c75f 2319 goto out;
8913336a 2320 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
2321 po->tp_reserve))
2322 goto out;
4ebf0ae2 2323 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 2324 goto out;
1da177e4 2325
69e3c75f
JB
2326 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2327 if (unlikely(rb->frames_per_block <= 0))
2328 goto out;
2329 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2330 req->tp_frame_nr))
2331 goto out;
1da177e4
LT
2332
2333 err = -ENOMEM;
4ebf0ae2
DM
2334 order = get_order(req->tp_block_size);
2335 pg_vec = alloc_pg_vec(req, order);
2336 if (unlikely(!pg_vec))
1da177e4 2337 goto out;
69e3c75f
JB
2338 }
2339 /* Done */
2340 else {
2341 err = -EINVAL;
4ebf0ae2 2342 if (unlikely(req->tp_frame_nr))
69e3c75f 2343 goto out;
1da177e4
LT
2344 }
2345
2346 lock_sock(sk);
2347
2348 /* Detach socket from network */
2349 spin_lock(&po->bind_lock);
2350 was_running = po->running;
2351 num = po->num;
2352 if (was_running) {
2353 __dev_remove_pack(&po->prot_hook);
2354 po->num = 0;
2355 po->running = 0;
2356 __sock_put(sk);
2357 }
2358 spin_unlock(&po->bind_lock);
1ce4f28b 2359
1da177e4
LT
2360 synchronize_net();
2361
2362 err = -EBUSY;
905db440 2363 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
2364 if (closing || atomic_read(&po->mapped) == 0) {
2365 err = 0;
2366#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
69e3c75f
JB
2367 spin_lock_bh(&rb_queue->lock);
2368 pg_vec = XC(rb->pg_vec, pg_vec);
2369 rb->frame_max = (req->tp_frame_nr - 1);
2370 rb->head = 0;
2371 rb->frame_size = req->tp_frame_size;
2372 spin_unlock_bh(&rb_queue->lock);
2373
2374 order = XC(rb->pg_vec_order, order);
2375 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2376
2377 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2378 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2379 tpacket_rcv : packet_rcv;
2380 skb_queue_purge(rb_queue);
1da177e4
LT
2381#undef XC
2382 if (atomic_read(&po->mapped))
40d4e3df
ED
2383 pr_err("packet_mmap: vma is busy: %d\n",
2384 atomic_read(&po->mapped));
1da177e4 2385 }
905db440 2386 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
2387
2388 spin_lock(&po->bind_lock);
2389 if (was_running && !po->running) {
2390 sock_hold(sk);
2391 po->running = 1;
2392 po->num = num;
2393 dev_add_pack(&po->prot_hook);
2394 }
2395 spin_unlock(&po->bind_lock);
2396
2397 release_sock(sk);
2398
1da177e4
LT
2399 if (pg_vec)
2400 free_pg_vec(pg_vec, order, req->tp_block_nr);
2401out:
2402 return err;
2403}
2404
69e3c75f
JB
2405static int packet_mmap(struct file *file, struct socket *sock,
2406 struct vm_area_struct *vma)
1da177e4
LT
2407{
2408 struct sock *sk = sock->sk;
2409 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
2410 unsigned long size, expected_size;
2411 struct packet_ring_buffer *rb;
1da177e4
LT
2412 unsigned long start;
2413 int err = -EINVAL;
2414 int i;
2415
2416 if (vma->vm_pgoff)
2417 return -EINVAL;
2418
905db440 2419 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
2420
2421 expected_size = 0;
2422 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2423 if (rb->pg_vec) {
2424 expected_size += rb->pg_vec_len
2425 * rb->pg_vec_pages
2426 * PAGE_SIZE;
2427 }
2428 }
2429
2430 if (expected_size == 0)
1da177e4 2431 goto out;
69e3c75f
JB
2432
2433 size = vma->vm_end - vma->vm_start;
2434 if (size != expected_size)
1da177e4
LT
2435 goto out;
2436
1da177e4 2437 start = vma->vm_start;
69e3c75f
JB
2438 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2439 if (rb->pg_vec == NULL)
2440 continue;
2441
2442 for (i = 0; i < rb->pg_vec_len; i++) {
2443 struct page *page = virt_to_page(rb->pg_vec[i]);
2444 int pg_num;
2445
2446 for (pg_num = 0; pg_num < rb->pg_vec_pages;
40d4e3df 2447 pg_num++, page++) {
69e3c75f
JB
2448 err = vm_insert_page(vma, start, page);
2449 if (unlikely(err))
2450 goto out;
2451 start += PAGE_SIZE;
2452 }
4ebf0ae2 2453 }
1da177e4 2454 }
69e3c75f 2455
4ebf0ae2 2456 atomic_inc(&po->mapped);
1da177e4
LT
2457 vma->vm_ops = &packet_mmap_ops;
2458 err = 0;
2459
2460out:
905db440 2461 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
2462 return err;
2463}
1da177e4 2464
90ddc4f0 2465static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
2466 .family = PF_PACKET,
2467 .owner = THIS_MODULE,
2468 .release = packet_release,
2469 .bind = packet_bind_spkt,
2470 .connect = sock_no_connect,
2471 .socketpair = sock_no_socketpair,
2472 .accept = sock_no_accept,
2473 .getname = packet_getname_spkt,
2474 .poll = datagram_poll,
2475 .ioctl = packet_ioctl,
2476 .listen = sock_no_listen,
2477 .shutdown = sock_no_shutdown,
2478 .setsockopt = sock_no_setsockopt,
2479 .getsockopt = sock_no_getsockopt,
2480 .sendmsg = packet_sendmsg_spkt,
2481 .recvmsg = packet_recvmsg,
2482 .mmap = sock_no_mmap,
2483 .sendpage = sock_no_sendpage,
2484};
1da177e4 2485
90ddc4f0 2486static const struct proto_ops packet_ops = {
1da177e4
LT
2487 .family = PF_PACKET,
2488 .owner = THIS_MODULE,
2489 .release = packet_release,
2490 .bind = packet_bind,
2491 .connect = sock_no_connect,
2492 .socketpair = sock_no_socketpair,
2493 .accept = sock_no_accept,
1ce4f28b 2494 .getname = packet_getname,
1da177e4
LT
2495 .poll = packet_poll,
2496 .ioctl = packet_ioctl,
2497 .listen = sock_no_listen,
2498 .shutdown = sock_no_shutdown,
2499 .setsockopt = packet_setsockopt,
2500 .getsockopt = packet_getsockopt,
2501 .sendmsg = packet_sendmsg,
2502 .recvmsg = packet_recvmsg,
2503 .mmap = packet_mmap,
2504 .sendpage = sock_no_sendpage,
2505};
2506
ec1b4cf7 2507static const struct net_proto_family packet_family_ops = {
1da177e4
LT
2508 .family = PF_PACKET,
2509 .create = packet_create,
2510 .owner = THIS_MODULE,
2511};
2512
2513static struct notifier_block packet_netdev_notifier = {
40d4e3df 2514 .notifier_call = packet_notifier,
1da177e4
LT
2515};
2516
2517#ifdef CONFIG_PROC_FS
1da177e4
LT
2518
2519static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 2520 __acquires(RCU)
1da177e4 2521{
e372c414 2522 struct net *net = seq_file_net(seq);
808f5114 2523
2524 rcu_read_lock();
2525 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
2526}
2527
2528static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2529{
1bf40954 2530 struct net *net = seq_file_net(seq);
808f5114 2531 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
2532}
2533
2534static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 2535 __releases(RCU)
1da177e4 2536{
808f5114 2537 rcu_read_unlock();
1da177e4
LT
2538}
2539
1ce4f28b 2540static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
2541{
2542 if (v == SEQ_START_TOKEN)
2543 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2544 else {
b7ceabd9 2545 struct sock *s = sk_entry(v);
1da177e4
LT
2546 const struct packet_sock *po = pkt_sk(s);
2547
2548 seq_printf(seq,
2549 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2550 s,
2551 atomic_read(&s->sk_refcnt),
2552 s->sk_type,
2553 ntohs(po->num),
2554 po->ifindex,
2555 po->running,
2556 atomic_read(&s->sk_rmem_alloc),
2557 sock_i_uid(s),
40d4e3df 2558 sock_i_ino(s));
1da177e4
LT
2559 }
2560
2561 return 0;
2562}
2563
56b3d975 2564static const struct seq_operations packet_seq_ops = {
1da177e4
LT
2565 .start = packet_seq_start,
2566 .next = packet_seq_next,
2567 .stop = packet_seq_stop,
2568 .show = packet_seq_show,
2569};
2570
2571static int packet_seq_open(struct inode *inode, struct file *file)
2572{
e372c414
DL
2573 return seq_open_net(inode, file, &packet_seq_ops,
2574 sizeof(struct seq_net_private));
1da177e4
LT
2575}
2576
da7071d7 2577static const struct file_operations packet_seq_fops = {
1da177e4
LT
2578 .owner = THIS_MODULE,
2579 .open = packet_seq_open,
2580 .read = seq_read,
2581 .llseek = seq_lseek,
e372c414 2582 .release = seq_release_net,
1da177e4
LT
2583};
2584
2585#endif
2586
2c8c1e72 2587static int __net_init packet_net_init(struct net *net)
d12d01d6 2588{
808f5114 2589 spin_lock_init(&net->packet.sklist_lock);
2aaef4e4 2590 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6
DL
2591
2592 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2593 return -ENOMEM;
2594
2595 return 0;
2596}
2597
2c8c1e72 2598static void __net_exit packet_net_exit(struct net *net)
d12d01d6
DL
2599{
2600 proc_net_remove(net, "packet");
2601}
2602
2603static struct pernet_operations packet_net_ops = {
2604 .init = packet_net_init,
2605 .exit = packet_net_exit,
2606};
2607
2608
1da177e4
LT
2609static void __exit packet_exit(void)
2610{
1da177e4 2611 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 2612 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
2613 sock_unregister(PF_PACKET);
2614 proto_unregister(&packet_proto);
2615}
2616
2617static int __init packet_init(void)
2618{
2619 int rc = proto_register(&packet_proto, 0);
2620
2621 if (rc != 0)
2622 goto out;
2623
2624 sock_register(&packet_family_ops);
d12d01d6 2625 register_pernet_subsys(&packet_net_ops);
1da177e4 2626 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
2627out:
2628 return rc;
2629}
2630
2631module_init(packet_init);
2632module_exit(packet_exit);
2633MODULE_LICENSE("GPL");
2634MODULE_ALIAS_NETPROTO(PF_PACKET);