2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
29 #include <net/protocol.h>
30 #include <net/netlink.h>
31 #include <linux/skbuff.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
40 /* No hurry in this branch */
41 static void *__load_pointer(struct sk_buff *skb, int k)
46 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
47 else if (k >= SKF_LL_OFF)
48 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
50 if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
55 static inline void *load_pointer(struct sk_buff *skb, int k,
56 unsigned int size, void *buffer)
59 return skb_header_pointer(skb, k, size, buffer);
63 return __load_pointer(skb, k);
68 * sk_filter - run a packet through a socket filter
69 * @sk: sock associated with &sk_buff
70 * @skb: buffer to filter
71 * @needlock: set to 1 if the sock is not locked by caller.
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
80 int sk_filter(struct sock *sk, struct sk_buff *skb)
83 struct sk_filter *filter;
85 err = security_sock_rcv_skb(sk, skb);
90 filter = rcu_dereference(sk->sk_filter);
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
100 EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on
105 * @filter: filter to apply
106 * @flen: length of filter
108 * Decode and apply filter instructions to the skb->data.
109 * Return length to keep, 0 for none. skb is the data we are
110 * filtering, filter is the array of filter instructions, and
111 * len is the number of filter blocks in the array.
113 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
115 struct sock_filter *fentry; /* We walk down these */
117 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
125 * Process array of filter instructions.
127 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc];
130 switch (fentry->code) {
131 case BPF_ALU|BPF_ADD|BPF_X:
134 case BPF_ALU|BPF_ADD|BPF_K:
137 case BPF_ALU|BPF_SUB|BPF_X:
140 case BPF_ALU|BPF_SUB|BPF_K:
143 case BPF_ALU|BPF_MUL|BPF_X:
146 case BPF_ALU|BPF_MUL|BPF_K:
149 case BPF_ALU|BPF_DIV|BPF_X:
154 case BPF_ALU|BPF_DIV|BPF_K:
157 case BPF_ALU|BPF_AND|BPF_X:
160 case BPF_ALU|BPF_AND|BPF_K:
163 case BPF_ALU|BPF_OR|BPF_X:
166 case BPF_ALU|BPF_OR|BPF_K:
169 case BPF_ALU|BPF_LSH|BPF_X:
172 case BPF_ALU|BPF_LSH|BPF_K:
175 case BPF_ALU|BPF_RSH|BPF_X:
178 case BPF_ALU|BPF_RSH|BPF_K:
181 case BPF_ALU|BPF_NEG:
187 case BPF_JMP|BPF_JGT|BPF_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf;
190 case BPF_JMP|BPF_JGE|BPF_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
193 case BPF_JMP|BPF_JEQ|BPF_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf;
196 case BPF_JMP|BPF_JSET|BPF_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf;
199 case BPF_JMP|BPF_JGT|BPF_X:
200 pc += (A > X) ? fentry->jt : fentry->jf;
202 case BPF_JMP|BPF_JGE|BPF_X:
203 pc += (A >= X) ? fentry->jt : fentry->jf;
205 case BPF_JMP|BPF_JEQ|BPF_X:
206 pc += (A == X) ? fentry->jt : fentry->jf;
208 case BPF_JMP|BPF_JSET|BPF_X:
209 pc += (A & X) ? fentry->jt : fentry->jf;
211 case BPF_LD|BPF_W|BPF_ABS:
214 ptr = load_pointer(skb, k, 4, &tmp);
216 A = get_unaligned_be32(ptr);
220 case BPF_LD|BPF_H|BPF_ABS:
223 ptr = load_pointer(skb, k, 2, &tmp);
225 A = get_unaligned_be16(ptr);
229 case BPF_LD|BPF_B|BPF_ABS:
232 ptr = load_pointer(skb, k, 1, &tmp);
238 case BPF_LD|BPF_W|BPF_LEN:
241 case BPF_LDX|BPF_W|BPF_LEN:
244 case BPF_LD|BPF_W|BPF_IND:
247 case BPF_LD|BPF_H|BPF_IND:
250 case BPF_LD|BPF_B|BPF_IND:
253 case BPF_LDX|BPF_B|BPF_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp);
256 X = (*(u8 *)ptr & 0xf) << 2;
263 case BPF_LDX|BPF_IMM:
269 case BPF_LDX|BPF_MEM:
272 case BPF_MISC|BPF_TAX:
275 case BPF_MISC|BPF_TXA:
294 * Handle ancillary data, which are impossible
295 * (or very difficult) to get parsing packet contents.
297 switch (k-SKF_AD_OFF) {
298 case SKF_AD_PROTOCOL:
299 A = ntohs(skb->protocol);
305 A = skb->dev->ifindex;
307 case SKF_AD_NLATTR: {
310 if (skb_is_nonlinear(skb))
312 if (A > skb->len - sizeof(struct nlattr))
315 nla = nla_find((struct nlattr *)&skb->data[A],
318 A = (void *)nla - (void *)skb->data;
330 EXPORT_SYMBOL(sk_run_filter);
333 * sk_chk_filter - verify socket filter code
334 * @filter: filter to verify
335 * @flen: length of filter
337 * Check the user's filter code. If we let some ugly
338 * filter code slip through kaboom! The filter must contain
339 * no references or jumps that are out of range, no illegal
340 * instructions, and must end with a RET instruction.
342 * All jumps are forward as they are not signed.
344 * Returns 0 if the rule set is legal or -EINVAL if not.
346 int sk_chk_filter(struct sock_filter *filter, int flen)
348 struct sock_filter *ftest;
351 if (flen == 0 || flen > BPF_MAXINSNS)
354 /* check the filter code now */
355 for (pc = 0; pc < flen; pc++) {
358 /* Only allow valid instructions */
359 switch (ftest->code) {
360 case BPF_ALU|BPF_ADD|BPF_K:
361 case BPF_ALU|BPF_ADD|BPF_X:
362 case BPF_ALU|BPF_SUB|BPF_K:
363 case BPF_ALU|BPF_SUB|BPF_X:
364 case BPF_ALU|BPF_MUL|BPF_K:
365 case BPF_ALU|BPF_MUL|BPF_X:
366 case BPF_ALU|BPF_DIV|BPF_X:
367 case BPF_ALU|BPF_AND|BPF_K:
368 case BPF_ALU|BPF_AND|BPF_X:
369 case BPF_ALU|BPF_OR|BPF_K:
370 case BPF_ALU|BPF_OR|BPF_X:
371 case BPF_ALU|BPF_LSH|BPF_K:
372 case BPF_ALU|BPF_LSH|BPF_X:
373 case BPF_ALU|BPF_RSH|BPF_K:
374 case BPF_ALU|BPF_RSH|BPF_X:
375 case BPF_ALU|BPF_NEG:
376 case BPF_LD|BPF_W|BPF_ABS:
377 case BPF_LD|BPF_H|BPF_ABS:
378 case BPF_LD|BPF_B|BPF_ABS:
379 case BPF_LD|BPF_W|BPF_LEN:
380 case BPF_LD|BPF_W|BPF_IND:
381 case BPF_LD|BPF_H|BPF_IND:
382 case BPF_LD|BPF_B|BPF_IND:
384 case BPF_LDX|BPF_W|BPF_LEN:
385 case BPF_LDX|BPF_B|BPF_MSH:
386 case BPF_LDX|BPF_IMM:
387 case BPF_MISC|BPF_TAX:
388 case BPF_MISC|BPF_TXA:
393 /* Some instructions need special checks */
395 case BPF_ALU|BPF_DIV|BPF_K:
396 /* check for division by zero */
402 case BPF_LDX|BPF_MEM:
405 /* check for invalid memory addresses */
406 if (ftest->k >= BPF_MEMWORDS)
412 * Note, the large ftest->k might cause loops.
413 * Compare this with conditional jumps below,
414 * where offsets are limited. --ANK (981016)
416 if (ftest->k >= (unsigned)(flen-pc-1))
420 case BPF_JMP|BPF_JEQ|BPF_K:
421 case BPF_JMP|BPF_JEQ|BPF_X:
422 case BPF_JMP|BPF_JGE|BPF_K:
423 case BPF_JMP|BPF_JGE|BPF_X:
424 case BPF_JMP|BPF_JGT|BPF_K:
425 case BPF_JMP|BPF_JGT|BPF_X:
426 case BPF_JMP|BPF_JSET|BPF_K:
427 case BPF_JMP|BPF_JSET|BPF_X:
428 /* for conditionals both must be safe */
429 if (pc + ftest->jt + 1 >= flen ||
430 pc + ftest->jf + 1 >= flen)
439 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
441 EXPORT_SYMBOL(sk_chk_filter);
444 * sk_filter_rcu_release: Release a socket filter by rcu_head
445 * @rcu: rcu_head that contains the sk_filter to free
447 static void sk_filter_rcu_release(struct rcu_head *rcu)
449 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
451 sk_filter_release(fp);
454 static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
456 unsigned int size = sk_filter_len(fp);
458 atomic_sub(size, &sk->sk_omem_alloc);
459 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
463 * sk_attach_filter - attach a socket filter
464 * @fprog: the filter program
465 * @sk: the socket to use
467 * Attach the user's filter code. We first run some sanity checks on
468 * it to make sure it does not explode on us later. If an error
469 * occurs or there is insufficient memory for the filter a negative
470 * errno code is returned. On success the return is zero.
472 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
474 struct sk_filter *fp, *old_fp;
475 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
478 /* Make sure new filter is there and in the right amounts. */
479 if (fprog->filter == NULL)
482 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
485 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
486 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
490 atomic_set(&fp->refcnt, 1);
491 fp->len = fprog->len;
493 err = sk_chk_filter(fp->insns, fp->len);
495 sk_filter_uncharge(sk, fp);
500 old_fp = rcu_dereference(sk->sk_filter);
501 rcu_assign_pointer(sk->sk_filter, fp);
502 rcu_read_unlock_bh();
505 sk_filter_delayed_uncharge(sk, old_fp);
509 int sk_detach_filter(struct sock *sk)
512 struct sk_filter *filter;
515 filter = rcu_dereference(sk->sk_filter);
517 rcu_assign_pointer(sk->sk_filter, NULL);
518 sk_filter_delayed_uncharge(sk, filter);
521 rcu_read_unlock_bh();