2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
89 /* No hurry in this branch */
90 static void *__load_pointer(struct sk_buff *skb, int k)
95 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
96 else if (k >= SKF_LL_OFF)
97 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
99 if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
104 static inline void *load_pointer(struct sk_buff *skb, int k,
105 unsigned int size, void *buffer)
108 return skb_header_pointer(skb, k, size, buffer);
112 return __load_pointer(skb, k);
117 * sk_filter - run a packet through a socket filter
118 * @sk: sock associated with &sk_buff
119 * @skb: buffer to filter
121 * Run the filter code and then cut skb->data to correct size returned by
122 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
123 * than pkt_len we keep whole skb->data. This is the socket level
124 * wrapper to sk_run_filter. It returns 0 if the packet should
125 * be accepted or -EPERM if the packet should be tossed.
128 int sk_filter(struct sock *sk, struct sk_buff *skb)
131 struct sk_filter *filter;
133 err = security_sock_rcv_skb(sk, skb);
138 filter = rcu_dereference_bh(sk->sk_filter);
140 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
142 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
144 rcu_read_unlock_bh();
148 EXPORT_SYMBOL(sk_filter);
151 * sk_run_filter - run a filter on a socket
152 * @skb: buffer to run the filter on
153 * @filter: filter to apply
154 * @flen: length of filter
156 * Decode and apply filter instructions to the skb->data.
157 * Return length to keep, 0 for none. skb is the data we are
158 * filtering, filter is the array of filter instructions, and
159 * len is the number of filter blocks in the array.
161 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
164 u32 A = 0; /* Accumulator */
165 u32 X = 0; /* Index Register */
166 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
167 unsigned long memvalid = 0;
172 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
174 * Process array of filter instructions.
176 for (pc = 0; pc < flen; pc++) {
177 const struct sock_filter *fentry = &filter[pc];
180 switch (fentry->code) {
181 case BPF_S_ALU_ADD_X:
184 case BPF_S_ALU_ADD_K:
187 case BPF_S_ALU_SUB_X:
190 case BPF_S_ALU_SUB_K:
193 case BPF_S_ALU_MUL_X:
196 case BPF_S_ALU_MUL_K:
199 case BPF_S_ALU_DIV_X:
204 case BPF_S_ALU_DIV_K:
207 case BPF_S_ALU_AND_X:
210 case BPF_S_ALU_AND_K:
219 case BPF_S_ALU_LSH_X:
222 case BPF_S_ALU_LSH_K:
225 case BPF_S_ALU_RSH_X:
228 case BPF_S_ALU_RSH_K:
237 case BPF_S_JMP_JGT_K:
238 pc += (A > f_k) ? fentry->jt : fentry->jf;
240 case BPF_S_JMP_JGE_K:
241 pc += (A >= f_k) ? fentry->jt : fentry->jf;
243 case BPF_S_JMP_JEQ_K:
244 pc += (A == f_k) ? fentry->jt : fentry->jf;
246 case BPF_S_JMP_JSET_K:
247 pc += (A & f_k) ? fentry->jt : fentry->jf;
249 case BPF_S_JMP_JGT_X:
250 pc += (A > X) ? fentry->jt : fentry->jf;
252 case BPF_S_JMP_JGE_X:
253 pc += (A >= X) ? fentry->jt : fentry->jf;
255 case BPF_S_JMP_JEQ_X:
256 pc += (A == X) ? fentry->jt : fentry->jf;
258 case BPF_S_JMP_JSET_X:
259 pc += (A & X) ? fentry->jt : fentry->jf;
264 ptr = load_pointer(skb, k, 4, &tmp);
266 A = get_unaligned_be32(ptr);
273 ptr = load_pointer(skb, k, 2, &tmp);
275 A = get_unaligned_be16(ptr);
282 ptr = load_pointer(skb, k, 1, &tmp);
291 case BPF_S_LDX_W_LEN:
303 case BPF_S_LDX_B_MSH:
304 ptr = load_pointer(skb, f_k, 1, &tmp);
306 X = (*(u8 *)ptr & 0xf) << 2;
317 A = (memvalid & (1UL << f_k)) ?
321 X = (memvalid & (1UL << f_k)) ?
335 memvalid |= 1UL << f_k;
339 memvalid |= 1UL << f_k;
348 * Handle ancillary data, which are impossible
349 * (or very difficult) to get parsing packet contents.
351 switch (k-SKF_AD_OFF) {
352 case SKF_AD_PROTOCOL:
353 A = ntohs(skb->protocol);
361 A = skb->dev->ifindex;
367 A = skb->queue_mapping;
374 case SKF_AD_NLATTR: {
377 if (skb_is_nonlinear(skb))
379 if (A > skb->len - sizeof(struct nlattr))
382 nla = nla_find((struct nlattr *)&skb->data[A],
385 A = (void *)nla - (void *)skb->data;
390 case SKF_AD_NLATTR_NEST: {
393 if (skb_is_nonlinear(skb))
395 if (A > skb->len - sizeof(struct nlattr))
398 nla = (struct nlattr *)&skb->data[A];
399 if (nla->nla_len > A - skb->len)
402 nla = nla_find_nested(nla, X);
404 A = (void *)nla - (void *)skb->data;
416 EXPORT_SYMBOL(sk_run_filter);
419 * sk_chk_filter - verify socket filter code
420 * @filter: filter to verify
421 * @flen: length of filter
423 * Check the user's filter code. If we let some ugly
424 * filter code slip through kaboom! The filter must contain
425 * no references or jumps that are out of range, no illegal
426 * instructions, and must end with a RET instruction.
428 * All jumps are forward as they are not signed.
430 * Returns 0 if the rule set is legal or -EINVAL if not.
432 int sk_chk_filter(struct sock_filter *filter, int flen)
435 * Valid instructions are initialized to non-0.
436 * Invalid instructions are initialized to 0.
438 static const u8 codes[] = {
439 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K + 1,
440 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X + 1,
441 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K + 1,
442 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X + 1,
443 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K + 1,
444 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X + 1,
445 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X + 1,
446 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K + 1,
447 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X + 1,
448 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K + 1,
449 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X + 1,
450 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K + 1,
451 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X + 1,
452 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K + 1,
453 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X + 1,
454 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG + 1,
455 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS + 1,
456 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS + 1,
457 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS + 1,
458 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN + 1,
459 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND + 1,
460 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND + 1,
461 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND + 1,
462 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM + 1,
463 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN + 1,
464 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH + 1,
465 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM + 1,
466 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX + 1,
467 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA + 1,
468 [BPF_RET|BPF_K] = BPF_S_RET_K + 1,
469 [BPF_RET|BPF_A] = BPF_S_RET_A + 1,
470 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K + 1,
471 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM + 1,
472 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM + 1,
473 [BPF_ST] = BPF_S_ST + 1,
474 [BPF_STX] = BPF_S_STX + 1,
475 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA + 1,
476 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K + 1,
477 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X + 1,
478 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K + 1,
479 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X + 1,
480 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K + 1,
481 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X + 1,
482 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1,
483 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1,
487 if (flen == 0 || flen > BPF_MAXINSNS)
490 /* check the filter code now */
491 for (pc = 0; pc < flen; pc++) {
492 struct sock_filter *ftest = &filter[pc];
493 u16 code = ftest->code;
495 if (code >= ARRAY_SIZE(codes))
498 /* Undo the '+ 1' in codes[] after validation. */
501 /* Some instructions need special checks */
503 case BPF_S_ALU_DIV_K:
504 /* check for division by zero */
512 /* check for invalid memory addresses */
513 if (ftest->k >= BPF_MEMWORDS)
518 * Note, the large ftest->k might cause loops.
519 * Compare this with conditional jumps below,
520 * where offsets are limited. --ANK (981016)
522 if (ftest->k >= (unsigned)(flen-pc-1))
525 case BPF_S_JMP_JEQ_K:
526 case BPF_S_JMP_JEQ_X:
527 case BPF_S_JMP_JGE_K:
528 case BPF_S_JMP_JGE_X:
529 case BPF_S_JMP_JGT_K:
530 case BPF_S_JMP_JGT_X:
531 case BPF_S_JMP_JSET_X:
532 case BPF_S_JMP_JSET_K:
533 /* for conditionals both must be safe */
534 if (pc + ftest->jt + 1 >= flen ||
535 pc + ftest->jf + 1 >= flen)
542 /* last instruction must be a RET code */
543 switch (filter[flen - 1].code) {
550 EXPORT_SYMBOL(sk_chk_filter);
553 * sk_filter_rcu_release: Release a socket filter by rcu_head
554 * @rcu: rcu_head that contains the sk_filter to free
556 static void sk_filter_rcu_release(struct rcu_head *rcu)
558 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
560 sk_filter_release(fp);
563 static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
565 unsigned int size = sk_filter_len(fp);
567 atomic_sub(size, &sk->sk_omem_alloc);
568 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
572 * sk_attach_filter - attach a socket filter
573 * @fprog: the filter program
574 * @sk: the socket to use
576 * Attach the user's filter code. We first run some sanity checks on
577 * it to make sure it does not explode on us later. If an error
578 * occurs or there is insufficient memory for the filter a negative
579 * errno code is returned. On success the return is zero.
581 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
583 struct sk_filter *fp, *old_fp;
584 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
587 /* Make sure new filter is there and in the right amounts. */
588 if (fprog->filter == NULL)
591 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
594 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
595 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
599 atomic_set(&fp->refcnt, 1);
600 fp->len = fprog->len;
602 err = sk_chk_filter(fp->insns, fp->len);
604 sk_filter_uncharge(sk, fp);
608 old_fp = rcu_dereference_protected(sk->sk_filter,
609 sock_owned_by_user(sk));
610 rcu_assign_pointer(sk->sk_filter, fp);
613 sk_filter_delayed_uncharge(sk, old_fp);
616 EXPORT_SYMBOL_GPL(sk_attach_filter);
618 int sk_detach_filter(struct sock *sk)
621 struct sk_filter *filter;
623 filter = rcu_dereference_protected(sk->sk_filter,
624 sock_owned_by_user(sk));
626 rcu_assign_pointer(sk->sk_filter, NULL);
627 sk_filter_delayed_uncharge(sk, filter);
632 EXPORT_SYMBOL_GPL(sk_detach_filter);