]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Linux Socket Filter - Kernel level socket filtering | |
3 | * | |
4 | * Author: | |
5 | * Jay Schulist <jschlst@samba.org> | |
6 | * | |
7 | * Based on the design of: | |
8 | * - The Berkeley Packet Filter | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * Andi Kleen - Fix a few bad bugs and races. | |
16 | * Kris Katterjohn - Added many additional checks in sk_chk_filter() | |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/fcntl.h> | |
23 | #include <linux/socket.h> | |
24 | #include <linux/in.h> | |
25 | #include <linux/inet.h> | |
26 | #include <linux/netdevice.h> | |
27 | #include <linux/if_packet.h> | |
28 | #include <linux/gfp.h> | |
29 | #include <net/ip.h> | |
30 | #include <net/protocol.h> | |
31 | #include <net/netlink.h> | |
32 | #include <linux/skbuff.h> | |
33 | #include <net/sock.h> | |
34 | #include <linux/errno.h> | |
35 | #include <linux/timer.h> | |
36 | #include <asm/system.h> | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/unaligned.h> | |
39 | #include <linux/filter.h> | |
40 | ||
41 | /* No hurry in this branch */ | |
42 | static void *__load_pointer(struct sk_buff *skb, int k) | |
43 | { | |
44 | u8 *ptr = NULL; | |
45 | ||
46 | if (k >= SKF_NET_OFF) | |
47 | ptr = skb_network_header(skb) + k - SKF_NET_OFF; | |
48 | else if (k >= SKF_LL_OFF) | |
49 | ptr = skb_mac_header(skb) + k - SKF_LL_OFF; | |
50 | ||
51 | if (ptr >= skb->head && ptr < skb_tail_pointer(skb)) | |
52 | return ptr; | |
53 | return NULL; | |
54 | } | |
55 | ||
56 | static inline void *load_pointer(struct sk_buff *skb, int k, | |
57 | unsigned int size, void *buffer) | |
58 | { | |
59 | if (k >= 0) | |
60 | return skb_header_pointer(skb, k, size, buffer); | |
61 | else { | |
62 | if (k >= SKF_AD_OFF) | |
63 | return NULL; | |
64 | return __load_pointer(skb, k); | |
65 | } | |
66 | } | |
67 | ||
68 | /** | |
69 | * sk_filter - run a packet through a socket filter | |
70 | * @sk: sock associated with &sk_buff | |
71 | * @skb: buffer to filter | |
72 | * | |
73 | * Run the filter code and then cut skb->data to correct size returned by | |
74 | * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller | |
75 | * than pkt_len we keep whole skb->data. This is the socket level | |
76 | * wrapper to sk_run_filter. It returns 0 if the packet should | |
77 | * be accepted or -EPERM if the packet should be tossed. | |
78 | * | |
79 | */ | |
80 | int sk_filter(struct sock *sk, struct sk_buff *skb) | |
81 | { | |
82 | int err; | |
83 | struct sk_filter *filter; | |
84 | ||
85 | err = security_sock_rcv_skb(sk, skb); | |
86 | if (err) | |
87 | return err; | |
88 | ||
89 | rcu_read_lock_bh(); | |
90 | filter = rcu_dereference_bh(sk->sk_filter); | |
91 | if (filter) { | |
92 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); | |
93 | ||
94 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | |
95 | } | |
96 | rcu_read_unlock_bh(); | |
97 | ||
98 | return err; | |
99 | } | |
100 | EXPORT_SYMBOL(sk_filter); | |
101 | ||
102 | /** | |
103 | * sk_run_filter - run a filter on a socket | |
104 | * @skb: buffer to run the filter on | |
105 | * @filter: filter to apply | |
106 | * @flen: length of filter | |
107 | * | |
108 | * Decode and apply filter instructions to the skb->data. | |
109 | * Return length to keep, 0 for none. skb is the data we are | |
110 | * filtering, filter is the array of filter instructions, and | |
111 | * len is the number of filter blocks in the array. | |
112 | */ | |
113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) | |
114 | { | |
115 | void *ptr; | |
116 | u32 A = 0; /* Accumulator */ | |
117 | u32 X = 0; /* Index Register */ | |
118 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | |
119 | unsigned long memvalid = 0; | |
120 | u32 tmp; | |
121 | int k; | |
122 | int pc; | |
123 | ||
124 | BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); | |
125 | /* | |
126 | * Process array of filter instructions. | |
127 | */ | |
128 | for (pc = 0; pc < flen; pc++) { | |
129 | const struct sock_filter *fentry = &filter[pc]; | |
130 | u32 f_k = fentry->k; | |
131 | ||
132 | switch (fentry->code) { | |
133 | case BPF_S_ALU_ADD_X: | |
134 | A += X; | |
135 | continue; | |
136 | case BPF_S_ALU_ADD_K: | |
137 | A += f_k; | |
138 | continue; | |
139 | case BPF_S_ALU_SUB_X: | |
140 | A -= X; | |
141 | continue; | |
142 | case BPF_S_ALU_SUB_K: | |
143 | A -= f_k; | |
144 | continue; | |
145 | case BPF_S_ALU_MUL_X: | |
146 | A *= X; | |
147 | continue; | |
148 | case BPF_S_ALU_MUL_K: | |
149 | A *= f_k; | |
150 | continue; | |
151 | case BPF_S_ALU_DIV_X: | |
152 | if (X == 0) | |
153 | return 0; | |
154 | A /= X; | |
155 | continue; | |
156 | case BPF_S_ALU_DIV_K: | |
157 | A /= f_k; | |
158 | continue; | |
159 | case BPF_S_ALU_AND_X: | |
160 | A &= X; | |
161 | continue; | |
162 | case BPF_S_ALU_AND_K: | |
163 | A &= f_k; | |
164 | continue; | |
165 | case BPF_S_ALU_OR_X: | |
166 | A |= X; | |
167 | continue; | |
168 | case BPF_S_ALU_OR_K: | |
169 | A |= f_k; | |
170 | continue; | |
171 | case BPF_S_ALU_LSH_X: | |
172 | A <<= X; | |
173 | continue; | |
174 | case BPF_S_ALU_LSH_K: | |
175 | A <<= f_k; | |
176 | continue; | |
177 | case BPF_S_ALU_RSH_X: | |
178 | A >>= X; | |
179 | continue; | |
180 | case BPF_S_ALU_RSH_K: | |
181 | A >>= f_k; | |
182 | continue; | |
183 | case BPF_S_ALU_NEG: | |
184 | A = -A; | |
185 | continue; | |
186 | case BPF_S_JMP_JA: | |
187 | pc += f_k; | |
188 | continue; | |
189 | case BPF_S_JMP_JGT_K: | |
190 | pc += (A > f_k) ? fentry->jt : fentry->jf; | |
191 | continue; | |
192 | case BPF_S_JMP_JGE_K: | |
193 | pc += (A >= f_k) ? fentry->jt : fentry->jf; | |
194 | continue; | |
195 | case BPF_S_JMP_JEQ_K: | |
196 | pc += (A == f_k) ? fentry->jt : fentry->jf; | |
197 | continue; | |
198 | case BPF_S_JMP_JSET_K: | |
199 | pc += (A & f_k) ? fentry->jt : fentry->jf; | |
200 | continue; | |
201 | case BPF_S_JMP_JGT_X: | |
202 | pc += (A > X) ? fentry->jt : fentry->jf; | |
203 | continue; | |
204 | case BPF_S_JMP_JGE_X: | |
205 | pc += (A >= X) ? fentry->jt : fentry->jf; | |
206 | continue; | |
207 | case BPF_S_JMP_JEQ_X: | |
208 | pc += (A == X) ? fentry->jt : fentry->jf; | |
209 | continue; | |
210 | case BPF_S_JMP_JSET_X: | |
211 | pc += (A & X) ? fentry->jt : fentry->jf; | |
212 | continue; | |
213 | case BPF_S_LD_W_ABS: | |
214 | k = f_k; | |
215 | load_w: | |
216 | ptr = load_pointer(skb, k, 4, &tmp); | |
217 | if (ptr != NULL) { | |
218 | A = get_unaligned_be32(ptr); | |
219 | continue; | |
220 | } | |
221 | break; | |
222 | case BPF_S_LD_H_ABS: | |
223 | k = f_k; | |
224 | load_h: | |
225 | ptr = load_pointer(skb, k, 2, &tmp); | |
226 | if (ptr != NULL) { | |
227 | A = get_unaligned_be16(ptr); | |
228 | continue; | |
229 | } | |
230 | break; | |
231 | case BPF_S_LD_B_ABS: | |
232 | k = f_k; | |
233 | load_b: | |
234 | ptr = load_pointer(skb, k, 1, &tmp); | |
235 | if (ptr != NULL) { | |
236 | A = *(u8 *)ptr; | |
237 | continue; | |
238 | } | |
239 | break; | |
240 | case BPF_S_LD_W_LEN: | |
241 | A = skb->len; | |
242 | continue; | |
243 | case BPF_S_LDX_W_LEN: | |
244 | X = skb->len; | |
245 | continue; | |
246 | case BPF_S_LD_W_IND: | |
247 | k = X + f_k; | |
248 | goto load_w; | |
249 | case BPF_S_LD_H_IND: | |
250 | k = X + f_k; | |
251 | goto load_h; | |
252 | case BPF_S_LD_B_IND: | |
253 | k = X + f_k; | |
254 | goto load_b; | |
255 | case BPF_S_LDX_B_MSH: | |
256 | ptr = load_pointer(skb, f_k, 1, &tmp); | |
257 | if (ptr != NULL) { | |
258 | X = (*(u8 *)ptr & 0xf) << 2; | |
259 | continue; | |
260 | } | |
261 | return 0; | |
262 | case BPF_S_LD_IMM: | |
263 | A = f_k; | |
264 | continue; | |
265 | case BPF_S_LDX_IMM: | |
266 | X = f_k; | |
267 | continue; | |
268 | case BPF_S_LD_MEM: | |
269 | A = (memvalid & (1UL << f_k)) ? | |
270 | mem[f_k] : 0; | |
271 | continue; | |
272 | case BPF_S_LDX_MEM: | |
273 | X = (memvalid & (1UL << f_k)) ? | |
274 | mem[f_k] : 0; | |
275 | continue; | |
276 | case BPF_S_MISC_TAX: | |
277 | X = A; | |
278 | continue; | |
279 | case BPF_S_MISC_TXA: | |
280 | A = X; | |
281 | continue; | |
282 | case BPF_S_RET_K: | |
283 | return f_k; | |
284 | case BPF_S_RET_A: | |
285 | return A; | |
286 | case BPF_S_ST: | |
287 | memvalid |= 1UL << f_k; | |
288 | mem[f_k] = A; | |
289 | continue; | |
290 | case BPF_S_STX: | |
291 | memvalid |= 1UL << f_k; | |
292 | mem[f_k] = X; | |
293 | continue; | |
294 | default: | |
295 | WARN_ON(1); | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* | |
300 | * Handle ancillary data, which are impossible | |
301 | * (or very difficult) to get parsing packet contents. | |
302 | */ | |
303 | switch (k-SKF_AD_OFF) { | |
304 | case SKF_AD_PROTOCOL: | |
305 | A = ntohs(skb->protocol); | |
306 | continue; | |
307 | case SKF_AD_PKTTYPE: | |
308 | A = skb->pkt_type; | |
309 | continue; | |
310 | case SKF_AD_IFINDEX: | |
311 | if (!skb->dev) | |
312 | return 0; | |
313 | A = skb->dev->ifindex; | |
314 | continue; | |
315 | case SKF_AD_MARK: | |
316 | A = skb->mark; | |
317 | continue; | |
318 | case SKF_AD_QUEUE: | |
319 | A = skb->queue_mapping; | |
320 | continue; | |
321 | case SKF_AD_HATYPE: | |
322 | if (!skb->dev) | |
323 | return 0; | |
324 | A = skb->dev->type; | |
325 | continue; | |
326 | case SKF_AD_NLATTR: { | |
327 | struct nlattr *nla; | |
328 | ||
329 | if (skb_is_nonlinear(skb)) | |
330 | return 0; | |
331 | if (A > skb->len - sizeof(struct nlattr)) | |
332 | return 0; | |
333 | ||
334 | nla = nla_find((struct nlattr *)&skb->data[A], | |
335 | skb->len - A, X); | |
336 | if (nla) | |
337 | A = (void *)nla - (void *)skb->data; | |
338 | else | |
339 | A = 0; | |
340 | continue; | |
341 | } | |
342 | case SKF_AD_NLATTR_NEST: { | |
343 | struct nlattr *nla; | |
344 | ||
345 | if (skb_is_nonlinear(skb)) | |
346 | return 0; | |
347 | if (A > skb->len - sizeof(struct nlattr)) | |
348 | return 0; | |
349 | ||
350 | nla = (struct nlattr *)&skb->data[A]; | |
351 | if (nla->nla_len > A - skb->len) | |
352 | return 0; | |
353 | ||
354 | nla = nla_find_nested(nla, X); | |
355 | if (nla) | |
356 | A = (void *)nla - (void *)skb->data; | |
357 | else | |
358 | A = 0; | |
359 | continue; | |
360 | } | |
361 | default: | |
362 | return 0; | |
363 | } | |
364 | } | |
365 | ||
366 | return 0; | |
367 | } | |
368 | EXPORT_SYMBOL(sk_run_filter); | |
369 | ||
370 | /** | |
371 | * sk_chk_filter - verify socket filter code | |
372 | * @filter: filter to verify | |
373 | * @flen: length of filter | |
374 | * | |
375 | * Check the user's filter code. If we let some ugly | |
376 | * filter code slip through kaboom! The filter must contain | |
377 | * no references or jumps that are out of range, no illegal | |
378 | * instructions, and must end with a RET instruction. | |
379 | * | |
380 | * All jumps are forward as they are not signed. | |
381 | * | |
382 | * Returns 0 if the rule set is legal or -EINVAL if not. | |
383 | */ | |
384 | int sk_chk_filter(struct sock_filter *filter, int flen) | |
385 | { | |
386 | /* | |
387 | * Valid instructions are initialized to non-0. | |
388 | * Invalid instructions are initialized to 0. | |
389 | */ | |
390 | static const u8 codes[] = { | |
391 | [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K + 1, | |
392 | [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X + 1, | |
393 | [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K + 1, | |
394 | [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X + 1, | |
395 | [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K + 1, | |
396 | [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X + 1, | |
397 | [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X + 1, | |
398 | [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K + 1, | |
399 | [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X + 1, | |
400 | [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K + 1, | |
401 | [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X + 1, | |
402 | [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K + 1, | |
403 | [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X + 1, | |
404 | [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K + 1, | |
405 | [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X + 1, | |
406 | [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG + 1, | |
407 | [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS + 1, | |
408 | [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS + 1, | |
409 | [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS + 1, | |
410 | [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN + 1, | |
411 | [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND + 1, | |
412 | [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND + 1, | |
413 | [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND + 1, | |
414 | [BPF_LD|BPF_IMM] = BPF_S_LD_IMM + 1, | |
415 | [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN + 1, | |
416 | [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH + 1, | |
417 | [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM + 1, | |
418 | [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX + 1, | |
419 | [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA + 1, | |
420 | [BPF_RET|BPF_K] = BPF_S_RET_K + 1, | |
421 | [BPF_RET|BPF_A] = BPF_S_RET_A + 1, | |
422 | [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K + 1, | |
423 | [BPF_LD|BPF_MEM] = BPF_S_LD_MEM + 1, | |
424 | [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM + 1, | |
425 | [BPF_ST] = BPF_S_ST + 1, | |
426 | [BPF_STX] = BPF_S_STX + 1, | |
427 | [BPF_JMP|BPF_JA] = BPF_S_JMP_JA + 1, | |
428 | [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K + 1, | |
429 | [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X + 1, | |
430 | [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K + 1, | |
431 | [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X + 1, | |
432 | [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K + 1, | |
433 | [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X + 1, | |
434 | [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1, | |
435 | [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1, | |
436 | }; | |
437 | int pc; | |
438 | ||
439 | if (flen == 0 || flen > BPF_MAXINSNS) | |
440 | return -EINVAL; | |
441 | ||
442 | /* check the filter code now */ | |
443 | for (pc = 0; pc < flen; pc++) { | |
444 | struct sock_filter *ftest = &filter[pc]; | |
445 | u16 code = ftest->code; | |
446 | ||
447 | if (code >= ARRAY_SIZE(codes)) | |
448 | return -EINVAL; | |
449 | code = codes[code]; | |
450 | /* Undo the '+ 1' in codes[] after validation. */ | |
451 | if (!code--) | |
452 | return -EINVAL; | |
453 | /* Some instructions need special checks */ | |
454 | switch (code) { | |
455 | case BPF_S_ALU_DIV_K: | |
456 | /* check for division by zero */ | |
457 | if (ftest->k == 0) | |
458 | return -EINVAL; | |
459 | break; | |
460 | case BPF_S_LD_MEM: | |
461 | case BPF_S_LDX_MEM: | |
462 | case BPF_S_ST: | |
463 | case BPF_S_STX: | |
464 | /* check for invalid memory addresses */ | |
465 | if (ftest->k >= BPF_MEMWORDS) | |
466 | return -EINVAL; | |
467 | break; | |
468 | case BPF_S_JMP_JA: | |
469 | /* | |
470 | * Note, the large ftest->k might cause loops. | |
471 | * Compare this with conditional jumps below, | |
472 | * where offsets are limited. --ANK (981016) | |
473 | */ | |
474 | if (ftest->k >= (unsigned)(flen-pc-1)) | |
475 | return -EINVAL; | |
476 | break; | |
477 | case BPF_S_JMP_JEQ_K: | |
478 | case BPF_S_JMP_JEQ_X: | |
479 | case BPF_S_JMP_JGE_K: | |
480 | case BPF_S_JMP_JGE_X: | |
481 | case BPF_S_JMP_JGT_K: | |
482 | case BPF_S_JMP_JGT_X: | |
483 | case BPF_S_JMP_JSET_X: | |
484 | case BPF_S_JMP_JSET_K: | |
485 | /* for conditionals both must be safe */ | |
486 | if (pc + ftest->jt + 1 >= flen || | |
487 | pc + ftest->jf + 1 >= flen) | |
488 | return -EINVAL; | |
489 | break; | |
490 | } | |
491 | ftest->code = code; | |
492 | } | |
493 | ||
494 | /* last instruction must be a RET code */ | |
495 | switch (filter[flen - 1].code) { | |
496 | case BPF_S_RET_K: | |
497 | case BPF_S_RET_A: | |
498 | return 0; | |
499 | } | |
500 | return -EINVAL; | |
501 | } | |
502 | EXPORT_SYMBOL(sk_chk_filter); | |
503 | ||
504 | /** | |
505 | * sk_filter_rcu_release: Release a socket filter by rcu_head | |
506 | * @rcu: rcu_head that contains the sk_filter to free | |
507 | */ | |
508 | static void sk_filter_rcu_release(struct rcu_head *rcu) | |
509 | { | |
510 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | |
511 | ||
512 | sk_filter_release(fp); | |
513 | } | |
514 | ||
515 | static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) | |
516 | { | |
517 | unsigned int size = sk_filter_len(fp); | |
518 | ||
519 | atomic_sub(size, &sk->sk_omem_alloc); | |
520 | call_rcu_bh(&fp->rcu, sk_filter_rcu_release); | |
521 | } | |
522 | ||
523 | /** | |
524 | * sk_attach_filter - attach a socket filter | |
525 | * @fprog: the filter program | |
526 | * @sk: the socket to use | |
527 | * | |
528 | * Attach the user's filter code. We first run some sanity checks on | |
529 | * it to make sure it does not explode on us later. If an error | |
530 | * occurs or there is insufficient memory for the filter a negative | |
531 | * errno code is returned. On success the return is zero. | |
532 | */ | |
533 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |
534 | { | |
535 | struct sk_filter *fp, *old_fp; | |
536 | unsigned int fsize = sizeof(struct sock_filter) * fprog->len; | |
537 | int err; | |
538 | ||
539 | /* Make sure new filter is there and in the right amounts. */ | |
540 | if (fprog->filter == NULL) | |
541 | return -EINVAL; | |
542 | ||
543 | fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); | |
544 | if (!fp) | |
545 | return -ENOMEM; | |
546 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { | |
547 | sock_kfree_s(sk, fp, fsize+sizeof(*fp)); | |
548 | return -EFAULT; | |
549 | } | |
550 | ||
551 | atomic_set(&fp->refcnt, 1); | |
552 | fp->len = fprog->len; | |
553 | ||
554 | err = sk_chk_filter(fp->insns, fp->len); | |
555 | if (err) { | |
556 | sk_filter_uncharge(sk, fp); | |
557 | return err; | |
558 | } | |
559 | ||
560 | old_fp = rcu_dereference_protected(sk->sk_filter, | |
561 | sock_owned_by_user(sk)); | |
562 | rcu_assign_pointer(sk->sk_filter, fp); | |
563 | ||
564 | if (old_fp) | |
565 | sk_filter_delayed_uncharge(sk, old_fp); | |
566 | return 0; | |
567 | } | |
568 | EXPORT_SYMBOL_GPL(sk_attach_filter); | |
569 | ||
570 | int sk_detach_filter(struct sock *sk) | |
571 | { | |
572 | int ret = -ENOENT; | |
573 | struct sk_filter *filter; | |
574 | ||
575 | filter = rcu_dereference_protected(sk->sk_filter, | |
576 | sock_owned_by_user(sk)); | |
577 | if (filter) { | |
578 | rcu_assign_pointer(sk->sk_filter, NULL); | |
579 | sk_filter_delayed_uncharge(sk, filter); | |
580 | ret = 0; | |
581 | } | |
582 | return ret; | |
583 | } | |
584 | EXPORT_SYMBOL_GPL(sk_detach_filter); |