]> The Tcpdump Group git mirrors - libpcap/blob - bpf_filter.c
update configure and cmake
[libpcap] / bpf_filter.c
1 /*-
2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 7.5 (Berkeley) 7/15/91
39 */
40
41 #ifdef HAVE_CONFIG_H
42 #include <config.h>
43 #endif
44
45 #include <pcap/pcap-inttypes.h>
46 #include "pcap-types.h"
47 #include "extract.h"
48
49 #define EXTRACT_SHORT EXTRACT_BE_U_2
50 #define EXTRACT_LONG EXTRACT_BE_U_4
51
52 #ifndef _WIN32
53 #include <sys/param.h>
54 #include <sys/types.h>
55 #include <sys/time.h>
56 #endif /* _WIN32 */
57
58 #include <pcap-int.h>
59
60 #include <stdlib.h>
61
62 #ifdef __linux__
63 #include <linux/types.h>
64 #include <linux/if_packet.h>
65 #include <linux/filter.h>
66 #endif
67
68 enum {
69 BPF_S_ANC_NONE,
70 BPF_S_ANC_VLAN_TAG,
71 BPF_S_ANC_VLAN_TAG_PRESENT,
72 };
73
74 /*
75 * Execute the filter program starting at pc on the packet p
76 * wirelen is the length of the original packet
77 * buflen is the amount of data present
78 * aux_data is auxiliary data, currently used only when interpreting
79 * filters intended for the Linux kernel in cases where the kernel
80 * rejects the filter; it contains VLAN tag information
81 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0,
82 * in all other cases, p is a pointer to a buffer and buflen is its size.
83 *
84 * Thanks to Ani Sinha <ani@arista.com> for providing initial implementation
85 */
86 #if defined(SKF_AD_VLAN_TAG_PRESENT)
87 u_int
88 pcap_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p,
89 u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data)
90 #else
91 u_int
92 pcap_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p,
93 u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data _U_)
94 #endif
95 {
96 register uint32_t A, X;
97 register bpf_u_int32 k;
98 uint32_t mem[BPF_MEMWORDS];
99
100 if (pc == 0)
101 /*
102 * No filter means accept all.
103 */
104 return (u_int)-1;
105 A = 0;
106 X = 0;
107 --pc;
108 for (;;) {
109 ++pc;
110 switch (pc->code) {
111
112 default:
113 abort();
114 case BPF_RET|BPF_K:
115 return (u_int)pc->k;
116
117 case BPF_RET|BPF_A:
118 return (u_int)A;
119
120 case BPF_LD|BPF_W|BPF_ABS:
121 k = pc->k;
122 if (k > buflen || sizeof(int32_t) > buflen - k) {
123 return 0;
124 }
125 A = EXTRACT_LONG(&p[k]);
126 continue;
127
128 case BPF_LD|BPF_H|BPF_ABS:
129 k = pc->k;
130 if (k > buflen || sizeof(int16_t) > buflen - k) {
131 return 0;
132 }
133 A = EXTRACT_SHORT(&p[k]);
134 continue;
135
136 case BPF_LD|BPF_B|BPF_ABS:
137 switch (pc->k) {
138
139 #if defined(SKF_AD_VLAN_TAG_PRESENT)
140 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
141 if (!aux_data)
142 return 0;
143 A = aux_data->vlan_tag;
144 break;
145
146 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
147 if (!aux_data)
148 return 0;
149 A = aux_data->vlan_tag_present;
150 break;
151 #endif
152 default:
153 k = pc->k;
154 if (k >= buflen) {
155 return 0;
156 }
157 A = p[k];
158 break;
159 }
160 continue;
161
162 case BPF_LD|BPF_W|BPF_LEN:
163 A = wirelen;
164 continue;
165
166 case BPF_LDX|BPF_W|BPF_LEN:
167 X = wirelen;
168 continue;
169
170 case BPF_LD|BPF_W|BPF_IND:
171 k = X + pc->k;
172 if (pc->k > buflen || X > buflen - pc->k ||
173 sizeof(int32_t) > buflen - k) {
174 return 0;
175 }
176 A = EXTRACT_LONG(&p[k]);
177 continue;
178
179 case BPF_LD|BPF_H|BPF_IND:
180 k = X + pc->k;
181 if (X > buflen || pc->k > buflen - X ||
182 sizeof(int16_t) > buflen - k) {
183 return 0;
184 }
185 A = EXTRACT_SHORT(&p[k]);
186 continue;
187
188 case BPF_LD|BPF_B|BPF_IND:
189 k = X + pc->k;
190 if (pc->k >= buflen || X >= buflen - pc->k) {
191 return 0;
192 }
193 A = p[k];
194 continue;
195
196 case BPF_LDX|BPF_MSH|BPF_B:
197 k = pc->k;
198 if (k >= buflen) {
199 return 0;
200 }
201 X = (p[pc->k] & 0xf) << 2;
202 continue;
203
204 case BPF_LD|BPF_IMM:
205 A = pc->k;
206 continue;
207
208 case BPF_LDX|BPF_IMM:
209 X = pc->k;
210 continue;
211
212 case BPF_LD|BPF_MEM:
213 A = mem[pc->k];
214 continue;
215
216 case BPF_LDX|BPF_MEM:
217 X = mem[pc->k];
218 continue;
219
220 case BPF_ST:
221 mem[pc->k] = A;
222 continue;
223
224 case BPF_STX:
225 mem[pc->k] = X;
226 continue;
227
228 case BPF_JMP|BPF_JA:
229 /*
230 * XXX - we currently implement "ip6 protochain"
231 * with backward jumps, so sign-extend pc->k.
232 */
233 pc += (bpf_int32)pc->k;
234 continue;
235
236 case BPF_JMP|BPF_JGT|BPF_K:
237 pc += (A > pc->k) ? pc->jt : pc->jf;
238 continue;
239
240 case BPF_JMP|BPF_JGE|BPF_K:
241 pc += (A >= pc->k) ? pc->jt : pc->jf;
242 continue;
243
244 case BPF_JMP|BPF_JEQ|BPF_K:
245 pc += (A == pc->k) ? pc->jt : pc->jf;
246 continue;
247
248 case BPF_JMP|BPF_JSET|BPF_K:
249 pc += (A & pc->k) ? pc->jt : pc->jf;
250 continue;
251
252 case BPF_JMP|BPF_JGT|BPF_X:
253 pc += (A > X) ? pc->jt : pc->jf;
254 continue;
255
256 case BPF_JMP|BPF_JGE|BPF_X:
257 pc += (A >= X) ? pc->jt : pc->jf;
258 continue;
259
260 case BPF_JMP|BPF_JEQ|BPF_X:
261 pc += (A == X) ? pc->jt : pc->jf;
262 continue;
263
264 case BPF_JMP|BPF_JSET|BPF_X:
265 pc += (A & X) ? pc->jt : pc->jf;
266 continue;
267
268 case BPF_ALU|BPF_ADD|BPF_X:
269 A += X;
270 continue;
271
272 case BPF_ALU|BPF_SUB|BPF_X:
273 A -= X;
274 continue;
275
276 case BPF_ALU|BPF_MUL|BPF_X:
277 A *= X;
278 continue;
279
280 case BPF_ALU|BPF_DIV|BPF_X:
281 if (X == 0)
282 return 0;
283 A /= X;
284 continue;
285
286 case BPF_ALU|BPF_MOD|BPF_X:
287 if (X == 0)
288 return 0;
289 A %= X;
290 continue;
291
292 case BPF_ALU|BPF_AND|BPF_X:
293 A &= X;
294 continue;
295
296 case BPF_ALU|BPF_OR|BPF_X:
297 A |= X;
298 continue;
299
300 case BPF_ALU|BPF_XOR|BPF_X:
301 A ^= X;
302 continue;
303
304 case BPF_ALU|BPF_LSH|BPF_X:
305 if (X < 32)
306 A <<= X;
307 else
308 A = 0;
309 continue;
310
311 case BPF_ALU|BPF_RSH|BPF_X:
312 if (X < 32)
313 A >>= X;
314 else
315 A = 0;
316 continue;
317
318 case BPF_ALU|BPF_ADD|BPF_K:
319 A += pc->k;
320 continue;
321
322 case BPF_ALU|BPF_SUB|BPF_K:
323 A -= pc->k;
324 continue;
325
326 case BPF_ALU|BPF_MUL|BPF_K:
327 A *= pc->k;
328 continue;
329
330 case BPF_ALU|BPF_DIV|BPF_K:
331 A /= pc->k;
332 continue;
333
334 case BPF_ALU|BPF_MOD|BPF_K:
335 A %= pc->k;
336 continue;
337
338 case BPF_ALU|BPF_AND|BPF_K:
339 A &= pc->k;
340 continue;
341
342 case BPF_ALU|BPF_OR|BPF_K:
343 A |= pc->k;
344 continue;
345
346 case BPF_ALU|BPF_XOR|BPF_K:
347 A ^= pc->k;
348 continue;
349
350 case BPF_ALU|BPF_LSH|BPF_K:
351 A <<= pc->k;
352 continue;
353
354 case BPF_ALU|BPF_RSH|BPF_K:
355 A >>= pc->k;
356 continue;
357
358 case BPF_ALU|BPF_NEG:
359 /*
360 * Most BPF arithmetic is unsigned, but negation
361 * can't be unsigned; respecify it as subtracting
362 * the accumulator from 0U, so that 1) we don't
363 * get compiler warnings about negating an unsigned
364 * value and 2) don't get UBSan warnings about
365 * the result of negating 0x80000000 being undefined.
366 */
367 A = (0U - A);
368 continue;
369
370 case BPF_MISC|BPF_TAX:
371 X = A;
372 continue;
373
374 case BPF_MISC|BPF_TXA:
375 A = X;
376 continue;
377 }
378 }
379 }
380
381 u_int
382 pcap_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
383 u_int buflen)
384 {
385 return pcap_filter_with_aux_data(pc, p, wirelen, buflen, NULL);
386 }
387
388 /*
389 * Return true if the 'fcode' is a valid filter program.
390 * The constraints are that each jump be forward and to a valid
391 * code, that memory accesses are within valid ranges (to the
392 * extent that this can be checked statically; loads of packet
393 * data have to be, and are, also checked at run time), and that
394 * the code terminates with either an accept or reject.
395 *
396 * The kernel needs to be able to verify an application's filter code.
397 * Otherwise, a bogus program could easily crash the system.
398 */
399 int
400 pcap_validate_filter(const struct bpf_insn *f, int len)
401 {
402 u_int i, from;
403 const struct bpf_insn *p;
404
405 if (len < 1)
406 return 0;
407
408 for (i = 0; i < (u_int)len; ++i) {
409 p = &f[i];
410 switch (BPF_CLASS(p->code)) {
411 /*
412 * Check that memory operations use valid addresses.
413 */
414 case BPF_LD:
415 case BPF_LDX:
416 switch (BPF_MODE(p->code)) {
417 case BPF_IMM:
418 break;
419 case BPF_ABS:
420 case BPF_IND:
421 case BPF_MSH:
422 /*
423 * There's no maximum packet data size
424 * in userland. The runtime packet length
425 * check suffices.
426 */
427 break;
428 case BPF_MEM:
429 if (p->k >= BPF_MEMWORDS)
430 return 0;
431 break;
432 case BPF_LEN:
433 break;
434 default:
435 return 0;
436 }
437 break;
438 case BPF_ST:
439 case BPF_STX:
440 if (p->k >= BPF_MEMWORDS)
441 return 0;
442 break;
443 case BPF_ALU:
444 switch (BPF_OP(p->code)) {
445 case BPF_ADD:
446 case BPF_SUB:
447 case BPF_MUL:
448 case BPF_OR:
449 case BPF_AND:
450 case BPF_XOR:
451 case BPF_LSH:
452 case BPF_RSH:
453 case BPF_NEG:
454 break;
455 case BPF_DIV:
456 case BPF_MOD:
457 /*
458 * Check for constant division or modulus
459 * by 0.
460 */
461 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
462 return 0;
463 break;
464 default:
465 return 0;
466 }
467 break;
468 case BPF_JMP:
469 /*
470 * Check that jumps are within the code block,
471 * and that unconditional branches don't go
472 * backwards as a result of an overflow.
473 * Unconditional branches have a 32-bit offset,
474 * so they could overflow; we check to make
475 * sure they don't. Conditional branches have
476 * an 8-bit offset, and the from address is <=
477 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
478 * is sufficiently small that adding 255 to it
479 * won't overflow.
480 *
481 * We know that len is <= BPF_MAXINSNS, and we
482 * assume that BPF_MAXINSNS is < the maximum size
483 * of a u_int, so that i + 1 doesn't overflow.
484 *
485 * For userland, we don't know that the from
486 * or len are <= BPF_MAXINSNS, but we know that
487 * from <= len, and, except on a 64-bit system,
488 * it's unlikely that len, if it truly reflects
489 * the size of the program we've been handed,
490 * will be anywhere near the maximum size of
491 * a u_int. We also don't check for backward
492 * branches, as we currently support them in
493 * userland for the protochain operation.
494 */
495 from = i + 1;
496 switch (BPF_OP(p->code)) {
497 case BPF_JA:
498 if (from + p->k >= (u_int)len)
499 return 0;
500 break;
501 case BPF_JEQ:
502 case BPF_JGT:
503 case BPF_JGE:
504 case BPF_JSET:
505 if (from + p->jt >= (u_int)len || from + p->jf >= (u_int)len)
506 return 0;
507 break;
508 default:
509 return 0;
510 }
511 break;
512 case BPF_RET:
513 break;
514 case BPF_MISC:
515 break;
516 default:
517 return 0;
518 }
519 }
520 return BPF_CLASS(f[len - 1].code) == BPF_RET;
521 }
522
523 /*
524 * Exported because older versions of libpcap exported them.
525 */
526 u_int
527 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
528 u_int buflen)
529 {
530 return pcap_filter(pc, p, wirelen, buflen);
531 }
532
533 int
534 bpf_validate(const struct bpf_insn *f, int len)
535 {
536 return pcap_validate_filter(f, len);
537 }