]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/filter.c
[IPV6]: Fix addrconf dead lock.
[net-next-2.6.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
6 *
7 * Based on the design of:
8 * - The Berkeley Packet Filter
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Andi Kleen - Fix a few bad bugs and races.
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/fcntl.h>
23#include <linux/socket.h>
24#include <linux/in.h>
25#include <linux/inet.h>
26#include <linux/netdevice.h>
27#include <linux/if_packet.h>
28#include <net/ip.h>
29#include <net/protocol.h>
30#include <linux/skbuff.h>
31#include <net/sock.h>
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <linux/filter.h>
37
38/* No hurry in this branch */
0b05b2a4 39static void *__load_pointer(struct sk_buff *skb, int k)
1da177e4
LT
40{
41 u8 *ptr = NULL;
42
43 if (k >= SKF_NET_OFF)
44 ptr = skb->nh.raw + k - SKF_NET_OFF;
45 else if (k >= SKF_LL_OFF)
46 ptr = skb->mac.raw + k - SKF_LL_OFF;
47
48 if (ptr >= skb->head && ptr < skb->tail)
49 return ptr;
50 return NULL;
51}
52
0b05b2a4
PM
53static inline void *load_pointer(struct sk_buff *skb, int k,
54 unsigned int size, void *buffer)
55{
56 if (k >= 0)
57 return skb_header_pointer(skb, k, size, buffer);
58 else {
59 if (k >= SKF_AD_OFF)
60 return NULL;
61 return __load_pointer(skb, k);
62 }
63}
64
1da177e4
LT
65/**
66 * sk_run_filter - run a filter on a socket
67 * @skb: buffer to run the filter on
68 * @filter: filter to apply
69 * @flen: length of filter
70 *
71 * Decode and apply filter instructions to the skb->data.
72 * Return length to keep, 0 for none. skb is the data we are
73 * filtering, filter is the array of filter instructions, and
74 * len is the number of filter blocks in the array.
75 */
76
77int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
78{
1da177e4 79 struct sock_filter *fentry; /* We walk down these */
0b05b2a4 80 void *ptr;
1da177e4
LT
81 u32 A = 0; /* Accumulator */
82 u32 X = 0; /* Index Register */
83 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
0b05b2a4 84 u32 tmp;
1da177e4
LT
85 int k;
86 int pc;
87
88 /*
89 * Process array of filter instructions.
90 */
91 for (pc = 0; pc < flen; pc++) {
92 fentry = &filter[pc];
93
94 switch (fentry->code) {
95 case BPF_ALU|BPF_ADD|BPF_X:
96 A += X;
97 continue;
98 case BPF_ALU|BPF_ADD|BPF_K:
99 A += fentry->k;
100 continue;
101 case BPF_ALU|BPF_SUB|BPF_X:
102 A -= X;
103 continue;
104 case BPF_ALU|BPF_SUB|BPF_K:
105 A -= fentry->k;
106 continue;
107 case BPF_ALU|BPF_MUL|BPF_X:
108 A *= X;
109 continue;
110 case BPF_ALU|BPF_MUL|BPF_K:
111 A *= fentry->k;
112 continue;
113 case BPF_ALU|BPF_DIV|BPF_X:
114 if (X == 0)
115 return 0;
116 A /= X;
117 continue;
118 case BPF_ALU|BPF_DIV|BPF_K:
1da177e4
LT
119 A /= fentry->k;
120 continue;
121 case BPF_ALU|BPF_AND|BPF_X:
122 A &= X;
123 continue;
124 case BPF_ALU|BPF_AND|BPF_K:
125 A &= fentry->k;
126 continue;
127 case BPF_ALU|BPF_OR|BPF_X:
128 A |= X;
129 continue;
130 case BPF_ALU|BPF_OR|BPF_K:
131 A |= fentry->k;
132 continue;
133 case BPF_ALU|BPF_LSH|BPF_X:
134 A <<= X;
135 continue;
136 case BPF_ALU|BPF_LSH|BPF_K:
137 A <<= fentry->k;
138 continue;
139 case BPF_ALU|BPF_RSH|BPF_X:
140 A >>= X;
141 continue;
142 case BPF_ALU|BPF_RSH|BPF_K:
143 A >>= fentry->k;
144 continue;
145 case BPF_ALU|BPF_NEG:
146 A = -A;
147 continue;
148 case BPF_JMP|BPF_JA:
149 pc += fentry->k;
150 continue;
151 case BPF_JMP|BPF_JGT|BPF_K:
152 pc += (A > fentry->k) ? fentry->jt : fentry->jf;
153 continue;
154 case BPF_JMP|BPF_JGE|BPF_K:
155 pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
156 continue;
157 case BPF_JMP|BPF_JEQ|BPF_K:
158 pc += (A == fentry->k) ? fentry->jt : fentry->jf;
159 continue;
160 case BPF_JMP|BPF_JSET|BPF_K:
161 pc += (A & fentry->k) ? fentry->jt : fentry->jf;
162 continue;
163 case BPF_JMP|BPF_JGT|BPF_X:
164 pc += (A > X) ? fentry->jt : fentry->jf;
165 continue;
166 case BPF_JMP|BPF_JGE|BPF_X:
167 pc += (A >= X) ? fentry->jt : fentry->jf;
168 continue;
169 case BPF_JMP|BPF_JEQ|BPF_X:
170 pc += (A == X) ? fentry->jt : fentry->jf;
171 continue;
172 case BPF_JMP|BPF_JSET|BPF_X:
173 pc += (A & X) ? fentry->jt : fentry->jf;
174 continue;
175 case BPF_LD|BPF_W|BPF_ABS:
176 k = fentry->k;
177 load_w:
0b05b2a4
PM
178 ptr = load_pointer(skb, k, 4, &tmp);
179 if (ptr != NULL) {
180 A = ntohl(*(u32 *)ptr);
181 continue;
1da177e4 182 }
1198ad00 183 break;
1da177e4
LT
184 case BPF_LD|BPF_H|BPF_ABS:
185 k = fentry->k;
186 load_h:
0b05b2a4
PM
187 ptr = load_pointer(skb, k, 2, &tmp);
188 if (ptr != NULL) {
189 A = ntohs(*(u16 *)ptr);
190 continue;
1da177e4 191 }
1198ad00 192 break;
1da177e4
LT
193 case BPF_LD|BPF_B|BPF_ABS:
194 k = fentry->k;
195load_b:
0b05b2a4
PM
196 ptr = load_pointer(skb, k, 1, &tmp);
197 if (ptr != NULL) {
198 A = *(u8 *)ptr;
199 continue;
1da177e4 200 }
1198ad00 201 break;
1da177e4 202 case BPF_LD|BPF_W|BPF_LEN:
3154e540 203 A = skb->len;
1da177e4
LT
204 continue;
205 case BPF_LDX|BPF_W|BPF_LEN:
3154e540 206 X = skb->len;
1da177e4
LT
207 continue;
208 case BPF_LD|BPF_W|BPF_IND:
209 k = X + fentry->k;
210 goto load_w;
211 case BPF_LD|BPF_H|BPF_IND:
212 k = X + fentry->k;
213 goto load_h;
214 case BPF_LD|BPF_B|BPF_IND:
215 k = X + fentry->k;
216 goto load_b;
217 case BPF_LDX|BPF_B|BPF_MSH:
0b05b2a4
PM
218 ptr = load_pointer(skb, fentry->k, 1, &tmp);
219 if (ptr != NULL) {
220 X = (*(u8 *)ptr & 0xf) << 2;
221 continue;
222 }
223 return 0;
1da177e4
LT
224 case BPF_LD|BPF_IMM:
225 A = fentry->k;
226 continue;
227 case BPF_LDX|BPF_IMM:
228 X = fentry->k;
229 continue;
230 case BPF_LD|BPF_MEM:
231 A = mem[fentry->k];
232 continue;
233 case BPF_LDX|BPF_MEM:
234 X = mem[fentry->k];
235 continue;
236 case BPF_MISC|BPF_TAX:
237 X = A;
238 continue;
239 case BPF_MISC|BPF_TXA:
240 A = X;
241 continue;
242 case BPF_RET|BPF_K:
243 return ((unsigned int)fentry->k);
244 case BPF_RET|BPF_A:
245 return ((unsigned int)A);
246 case BPF_ST:
247 mem[fentry->k] = A;
248 continue;
249 case BPF_STX:
250 mem[fentry->k] = X;
251 continue;
252 default:
253 /* Invalid instruction counts as RET */
254 return 0;
255 }
256
257 /*
258 * Handle ancillary data, which are impossible
259 * (or very difficult) to get parsing packet contents.
260 */
261 switch (k-SKF_AD_OFF) {
262 case SKF_AD_PROTOCOL:
263 A = htons(skb->protocol);
264 continue;
265 case SKF_AD_PKTTYPE:
266 A = skb->pkt_type;
267 continue;
268 case SKF_AD_IFINDEX:
269 A = skb->dev->ifindex;
270 continue;
271 default:
272 return 0;
273 }
274 }
275
276 return 0;
277}
278
279/**
280 * sk_chk_filter - verify socket filter code
281 * @filter: filter to verify
282 * @flen: length of filter
283 *
284 * Check the user's filter code. If we let some ugly
285 * filter code slip through kaboom! The filter must contain
286 * no references or jumps that are out of range, no illegal instructions
287 * and no backward jumps. It must end with a RET instruction
288 *
289 * Returns 0 if the rule set is legal or a negative errno code if not.
290 */
291int sk_chk_filter(struct sock_filter *filter, int flen)
292{
293 struct sock_filter *ftest;
294 int pc;
295
296 if (((unsigned int)flen >= (~0U / sizeof(struct sock_filter))) || flen == 0)
297 return -EINVAL;
298
299 /* check the filter code now */
300 for (pc = 0; pc < flen; pc++) {
301 /* all jumps are forward as they are not signed */
302 ftest = &filter[pc];
303 if (BPF_CLASS(ftest->code) == BPF_JMP) {
304 /* but they mustn't jump off the end */
305 if (BPF_OP(ftest->code) == BPF_JA) {
306 /*
307 * Note, the large ftest->k might cause loops.
308 * Compare this with conditional jumps below,
309 * where offsets are limited. --ANK (981016)
310 */
311 if (ftest->k >= (unsigned)(flen-pc-1))
312 return -EINVAL;
313 } else {
314 /* for conditionals both must be safe */
315 if (pc + ftest->jt +1 >= flen ||
316 pc + ftest->jf +1 >= flen)
317 return -EINVAL;
318 }
319 }
320
fb0d366b
KK
321 /* check for division by zero -Kris Katterjohn 2005-10-30 */
322 if (ftest->code == (BPF_ALU|BPF_DIV|BPF_K) && ftest->k == 0)
323 return -EINVAL;
324
1da177e4
LT
325 /* check that memory operations use valid addresses. */
326 if (ftest->k >= BPF_MEMWORDS) {
327 /* but it might not be a memory operation... */
328 switch (ftest->code) {
329 case BPF_ST:
330 case BPF_STX:
331 case BPF_LD|BPF_MEM:
332 case BPF_LDX|BPF_MEM:
333 return -EINVAL;
334 }
335 }
336 }
337
338 /*
339 * The program must end with a return. We don't care where they
340 * jumped within the script (its always forwards) but in the end
341 * they _will_ hit this.
342 */
343 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
344}
345
346/**
347 * sk_attach_filter - attach a socket filter
348 * @fprog: the filter program
349 * @sk: the socket to use
350 *
351 * Attach the user's filter code. We first run some sanity checks on
352 * it to make sure it does not explode on us later. If an error
353 * occurs or there is insufficient memory for the filter a negative
354 * errno code is returned. On success the return is zero.
355 */
356int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
357{
358 struct sk_filter *fp;
359 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
360 int err;
361
362 /* Make sure new filter is there and in the right amounts. */
363 if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS)
364 return -EINVAL;
365
366 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
367 if (!fp)
368 return -ENOMEM;
369 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
370 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
371 return -EFAULT;
372 }
373
374 atomic_set(&fp->refcnt, 1);
375 fp->len = fprog->len;
376
377 err = sk_chk_filter(fp->insns, fp->len);
378 if (!err) {
379 struct sk_filter *old_fp;
380
381 spin_lock_bh(&sk->sk_lock.slock);
382 old_fp = sk->sk_filter;
383 sk->sk_filter = fp;
384 spin_unlock_bh(&sk->sk_lock.slock);
385 fp = old_fp;
386 }
387
388 if (fp)
389 sk_filter_release(sk, fp);
390 return err;
391}
392
393EXPORT_SYMBOL(sk_chk_filter);
394EXPORT_SYMBOL(sk_run_filter);