]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
Merge pull request #1082 from luizluca/realtek_tag_2
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for BPF code intermediate representation.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <pcap-types.h>
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <memory.h>
33 #include <setjmp.h>
34 #include <string.h>
35 #include <limits.h> /* for SIZE_MAX */
36 #include <errno.h>
37
38 #include "pcap-int.h"
39
40 #include "gencode.h"
41 #include "optimize.h"
42 #include "diag-control.h"
43
44 #ifdef HAVE_OS_PROTO_H
45 #include "os-proto.h"
46 #endif
47
48 #ifdef BDEBUG
49 /*
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
53 * defined.
54 */
55 static int pcap_optimizer_debug;
56
57 /*
58 * Routine to set that flag.
59 *
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
65 */
66 PCAP_API void pcap_set_optimizer_debug(int value);
67
68 PCAP_API_DEF void
69 pcap_set_optimizer_debug(int value)
70 {
71 pcap_optimizer_debug = value;
72 }
73
74 /*
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
78 * defined.
79 */
80 static int pcap_print_dot_graph;
81
82 /*
83 * Routine to set that flag.
84 *
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
90 */
91 PCAP_API void pcap_set_print_dot_graph(int value);
92
93 PCAP_API_DEF void
94 pcap_set_print_dot_graph(int value)
95 {
96 pcap_print_dot_graph = value;
97 }
98
99 #endif
100
101 /*
102 * lowest_set_bit().
103 *
104 * Takes a 32-bit integer as an argument.
105 *
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
108 *
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
112 *
113 * This is the same as the count of trailing zeroes in the word.
114 */
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116 /*
117 * GCC 3.4 and later; we have __builtin_ctz().
118 */
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
121 /*
122 * Visual Studio; we support only 2005 and later, so use
123 * _BitScanForward().
124 */
125 #include <intrin.h>
126
127 #ifndef __clang__
128 #pragma intrinsic(_BitScanForward)
129 #endif
130
131 static __forceinline u_int
132 lowest_set_bit(int mask)
133 {
134 unsigned long bit;
135
136 /*
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139 */
140 if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141 abort(); /* mask is zero */
142 return (u_int)bit;
143 }
144 #else
145 /*
146 * None of the above.
147 * Use a perfect-hash-function-based function.
148 */
149 static u_int
150 lowest_set_bit(int mask)
151 {
152 unsigned int v = (unsigned int)mask;
153
154 static const u_int MultiplyDeBruijnBitPosition[32] = {
155 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
156 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
157 };
158
159 /*
160 * We strip off all but the lowermost set bit (v & ~v),
161 * and perform a minimal perfect hash on it to look up the
162 * number of low-order zero bits in a table.
163 *
164 * See:
165 *
166 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
167 *
168 * http://supertech.csail.mit.edu/papers/debruijn.pdf
169 */
170 return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
171 }
172 #endif
173
174 /*
175 * Represents a deleted instruction.
176 */
177 #define NOP -1
178
179 /*
180 * Register numbers for use-def values.
181 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
182 * location. A_ATOM is the accumulator and X_ATOM is the index
183 * register.
184 */
185 #define A_ATOM BPF_MEMWORDS
186 #define X_ATOM (BPF_MEMWORDS+1)
187
188 /*
189 * This define is used to represent *both* the accumulator and
190 * x register in use-def computations.
191 * Currently, the use-def code assumes only one definition per instruction.
192 */
193 #define AX_ATOM N_ATOMS
194
195 /*
196 * These data structures are used in a Cocke and Schwartz style
197 * value numbering scheme. Since the flowgraph is acyclic,
198 * exit values can be propagated from a node's predecessors
199 * provided it is uniquely defined.
200 */
201 struct valnode {
202 int code;
203 bpf_u_int32 v0, v1;
204 int val; /* the value number */
205 struct valnode *next;
206 };
207
208 /* Integer constants mapped with the load immediate opcode. */
209 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
210
211 struct vmapinfo {
212 int is_const;
213 bpf_u_int32 const_val;
214 };
215
216 typedef struct {
217 /*
218 * Place to longjmp to on an error.
219 */
220 jmp_buf top_ctx;
221
222 /*
223 * The buffer into which to put error message.
224 */
225 char *errbuf;
226
227 /*
228 * A flag to indicate that further optimization is needed.
229 * Iterative passes are continued until a given pass yields no
230 * code simplification or branch movement.
231 */
232 int done;
233
234 /*
235 * XXX - detect loops that do nothing but repeated AND/OR pullups
236 * and edge moves.
237 * If 100 passes in a row do nothing but that, treat that as a
238 * sign that we're in a loop that just shuffles in a cycle in
239 * which each pass just shuffles the code and we eventually
240 * get back to the original configuration.
241 *
242 * XXX - we need a non-heuristic way of detecting, or preventing,
243 * such a cycle.
244 */
245 int non_branch_movement_performed;
246
247 u_int n_blocks; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
248 struct block **blocks;
249 u_int n_edges; /* twice n_blocks, so guaranteed to be > 0 */
250 struct edge **edges;
251
252 /*
253 * A bit vector set representation of the dominators.
254 * We round up the set size to the next power of two.
255 */
256 u_int nodewords; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
257 u_int edgewords; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
258 struct block **levels;
259 bpf_u_int32 *space;
260
261 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
262 /*
263 * True if a is in uset {p}
264 */
265 #define SET_MEMBER(p, a) \
266 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
267
268 /*
269 * Add 'a' to uset p.
270 */
271 #define SET_INSERT(p, a) \
272 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
273
274 /*
275 * Delete 'a' from uset p.
276 */
277 #define SET_DELETE(p, a) \
278 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
279
280 /*
281 * a := a intersect b
282 * n must be guaranteed to be > 0
283 */
284 #define SET_INTERSECT(a, b, n)\
285 {\
286 register bpf_u_int32 *_x = a, *_y = b;\
287 register u_int _n = n;\
288 do *_x++ &= *_y++; while (--_n != 0);\
289 }
290
291 /*
292 * a := a - b
293 * n must be guaranteed to be > 0
294 */
295 #define SET_SUBTRACT(a, b, n)\
296 {\
297 register bpf_u_int32 *_x = a, *_y = b;\
298 register u_int _n = n;\
299 do *_x++ &=~ *_y++; while (--_n != 0);\
300 }
301
302 /*
303 * a := a union b
304 * n must be guaranteed to be > 0
305 */
306 #define SET_UNION(a, b, n)\
307 {\
308 register bpf_u_int32 *_x = a, *_y = b;\
309 register u_int _n = n;\
310 do *_x++ |= *_y++; while (--_n != 0);\
311 }
312
313 uset all_dom_sets;
314 uset all_closure_sets;
315 uset all_edge_sets;
316
317 #define MODULUS 213
318 struct valnode *hashtbl[MODULUS];
319 bpf_u_int32 curval;
320 bpf_u_int32 maxval;
321
322 struct vmapinfo *vmap;
323 struct valnode *vnode_base;
324 struct valnode *next_vnode;
325 } opt_state_t;
326
327 typedef struct {
328 /*
329 * Place to longjmp to on an error.
330 */
331 jmp_buf top_ctx;
332
333 /*
334 * The buffer into which to put error message.
335 */
336 char *errbuf;
337
338 /*
339 * Some pointers used to convert the basic block form of the code,
340 * into the array form that BPF requires. 'fstart' will point to
341 * the malloc'd array while 'ftail' is used during the recursive
342 * traversal.
343 */
344 struct bpf_insn *fstart;
345 struct bpf_insn *ftail;
346 } conv_state_t;
347
348 static void opt_init(opt_state_t *, struct icode *);
349 static void opt_cleanup(opt_state_t *);
350 static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
351 PCAP_PRINTFLIKE(2, 3);
352
353 static void intern_blocks(opt_state_t *, struct icode *);
354
355 static void find_inedges(opt_state_t *, struct block *);
356 #ifdef BDEBUG
357 static void opt_dump(opt_state_t *, struct icode *);
358 #endif
359
360 #ifndef MAX
361 #define MAX(a,b) ((a)>(b)?(a):(b))
362 #endif
363
364 static void
365 find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
366 {
367 int level;
368
369 if (isMarked(ic, b))
370 return;
371
372 Mark(ic, b);
373 b->link = 0;
374
375 if (JT(b)) {
376 find_levels_r(opt_state, ic, JT(b));
377 find_levels_r(opt_state, ic, JF(b));
378 level = MAX(JT(b)->level, JF(b)->level) + 1;
379 } else
380 level = 0;
381 b->level = level;
382 b->link = opt_state->levels[level];
383 opt_state->levels[level] = b;
384 }
385
386 /*
387 * Level graph. The levels go from 0 at the leaves to
388 * N_LEVELS at the root. The opt_state->levels[] array points to the
389 * first node of the level list, whose elements are linked
390 * with the 'link' field of the struct block.
391 */
392 static void
393 find_levels(opt_state_t *opt_state, struct icode *ic)
394 {
395 memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
396 unMarkAll(ic);
397 find_levels_r(opt_state, ic, ic->root);
398 }
399
400 /*
401 * Find dominator relationships.
402 * Assumes graph has been leveled.
403 */
404 static void
405 find_dom(opt_state_t *opt_state, struct block *root)
406 {
407 u_int i;
408 int level;
409 struct block *b;
410 bpf_u_int32 *x;
411
412 /*
413 * Initialize sets to contain all nodes.
414 */
415 x = opt_state->all_dom_sets;
416 /*
417 * In opt_init(), we've made sure the product doesn't overflow.
418 */
419 i = opt_state->n_blocks * opt_state->nodewords;
420 while (i != 0) {
421 --i;
422 *x++ = 0xFFFFFFFFU;
423 }
424 /* Root starts off empty. */
425 for (i = opt_state->nodewords; i != 0;) {
426 --i;
427 root->dom[i] = 0;
428 }
429
430 /* root->level is the highest level no found. */
431 for (level = root->level; level >= 0; --level) {
432 for (b = opt_state->levels[level]; b; b = b->link) {
433 SET_INSERT(b->dom, b->id);
434 if (JT(b) == 0)
435 continue;
436 SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
437 SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
438 }
439 }
440 }
441
442 static void
443 propedom(opt_state_t *opt_state, struct edge *ep)
444 {
445 SET_INSERT(ep->edom, ep->id);
446 if (ep->succ) {
447 SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
448 SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
449 }
450 }
451
452 /*
453 * Compute edge dominators.
454 * Assumes graph has been leveled and predecessors established.
455 */
456 static void
457 find_edom(opt_state_t *opt_state, struct block *root)
458 {
459 u_int i;
460 uset x;
461 int level;
462 struct block *b;
463
464 x = opt_state->all_edge_sets;
465 /*
466 * In opt_init(), we've made sure the product doesn't overflow.
467 */
468 for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
469 --i;
470 x[i] = 0xFFFFFFFFU;
471 }
472
473 /* root->level is the highest level no found. */
474 memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
475 memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
476 for (level = root->level; level >= 0; --level) {
477 for (b = opt_state->levels[level]; b != 0; b = b->link) {
478 propedom(opt_state, &b->et);
479 propedom(opt_state, &b->ef);
480 }
481 }
482 }
483
484 /*
485 * Find the backwards transitive closure of the flow graph. These sets
486 * are backwards in the sense that we find the set of nodes that reach
487 * a given node, not the set of nodes that can be reached by a node.
488 *
489 * Assumes graph has been leveled.
490 */
491 static void
492 find_closure(opt_state_t *opt_state, struct block *root)
493 {
494 int level;
495 struct block *b;
496
497 /*
498 * Initialize sets to contain no nodes.
499 */
500 memset((char *)opt_state->all_closure_sets, 0,
501 opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
502
503 /* root->level is the highest level no found. */
504 for (level = root->level; level >= 0; --level) {
505 for (b = opt_state->levels[level]; b; b = b->link) {
506 SET_INSERT(b->closure, b->id);
507 if (JT(b) == 0)
508 continue;
509 SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
510 SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
511 }
512 }
513 }
514
515 /*
516 * Return the register number that is used by s.
517 *
518 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
519 * are used, the scratch memory location's number if a scratch memory
520 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
521 *
522 * The implementation should probably change to an array access.
523 */
524 static int
525 atomuse(struct stmt *s)
526 {
527 register int c = s->code;
528
529 if (c == NOP)
530 return -1;
531
532 switch (BPF_CLASS(c)) {
533
534 case BPF_RET:
535 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
536 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
537
538 case BPF_LD:
539 case BPF_LDX:
540 /*
541 * As there are fewer than 2^31 memory locations,
542 * s->k should be convertible to int without problems.
543 */
544 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
545 (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
546
547 case BPF_ST:
548 return A_ATOM;
549
550 case BPF_STX:
551 return X_ATOM;
552
553 case BPF_JMP:
554 case BPF_ALU:
555 if (BPF_SRC(c) == BPF_X)
556 return AX_ATOM;
557 return A_ATOM;
558
559 case BPF_MISC:
560 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
561 }
562 abort();
563 /* NOTREACHED */
564 }
565
566 /*
567 * Return the register number that is defined by 's'. We assume that
568 * a single stmt cannot define more than one register. If no register
569 * is defined, return -1.
570 *
571 * The implementation should probably change to an array access.
572 */
573 static int
574 atomdef(struct stmt *s)
575 {
576 if (s->code == NOP)
577 return -1;
578
579 switch (BPF_CLASS(s->code)) {
580
581 case BPF_LD:
582 case BPF_ALU:
583 return A_ATOM;
584
585 case BPF_LDX:
586 return X_ATOM;
587
588 case BPF_ST:
589 case BPF_STX:
590 return s->k;
591
592 case BPF_MISC:
593 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
594 }
595 return -1;
596 }
597
598 /*
599 * Compute the sets of registers used, defined, and killed by 'b'.
600 *
601 * "Used" means that a statement in 'b' uses the register before any
602 * statement in 'b' defines it, i.e. it uses the value left in
603 * that register by a predecessor block of this block.
604 * "Defined" means that a statement in 'b' defines it.
605 * "Killed" means that a statement in 'b' defines it before any
606 * statement in 'b' uses it, i.e. it kills the value left in that
607 * register by a predecessor block of this block.
608 */
609 static void
610 compute_local_ud(struct block *b)
611 {
612 struct slist *s;
613 atomset def = 0, use = 0, killed = 0;
614 int atom;
615
616 for (s = b->stmts; s; s = s->next) {
617 if (s->s.code == NOP)
618 continue;
619 atom = atomuse(&s->s);
620 if (atom >= 0) {
621 if (atom == AX_ATOM) {
622 if (!ATOMELEM(def, X_ATOM))
623 use |= ATOMMASK(X_ATOM);
624 if (!ATOMELEM(def, A_ATOM))
625 use |= ATOMMASK(A_ATOM);
626 }
627 else if (atom < N_ATOMS) {
628 if (!ATOMELEM(def, atom))
629 use |= ATOMMASK(atom);
630 }
631 else
632 abort();
633 }
634 atom = atomdef(&s->s);
635 if (atom >= 0) {
636 if (!ATOMELEM(use, atom))
637 killed |= ATOMMASK(atom);
638 def |= ATOMMASK(atom);
639 }
640 }
641 if (BPF_CLASS(b->s.code) == BPF_JMP) {
642 /*
643 * XXX - what about RET?
644 */
645 atom = atomuse(&b->s);
646 if (atom >= 0) {
647 if (atom == AX_ATOM) {
648 if (!ATOMELEM(def, X_ATOM))
649 use |= ATOMMASK(X_ATOM);
650 if (!ATOMELEM(def, A_ATOM))
651 use |= ATOMMASK(A_ATOM);
652 }
653 else if (atom < N_ATOMS) {
654 if (!ATOMELEM(def, atom))
655 use |= ATOMMASK(atom);
656 }
657 else
658 abort();
659 }
660 }
661
662 b->def = def;
663 b->kill = killed;
664 b->in_use = use;
665 }
666
667 /*
668 * Assume graph is already leveled.
669 */
670 static void
671 find_ud(opt_state_t *opt_state, struct block *root)
672 {
673 int i, maxlevel;
674 struct block *p;
675
676 /*
677 * root->level is the highest level no found;
678 * count down from there.
679 */
680 maxlevel = root->level;
681 for (i = maxlevel; i >= 0; --i)
682 for (p = opt_state->levels[i]; p; p = p->link) {
683 compute_local_ud(p);
684 p->out_use = 0;
685 }
686
687 for (i = 1; i <= maxlevel; ++i) {
688 for (p = opt_state->levels[i]; p; p = p->link) {
689 p->out_use |= JT(p)->in_use | JF(p)->in_use;
690 p->in_use |= p->out_use &~ p->kill;
691 }
692 }
693 }
694 static void
695 init_val(opt_state_t *opt_state)
696 {
697 opt_state->curval = 0;
698 opt_state->next_vnode = opt_state->vnode_base;
699 memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
700 memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
701 }
702
703 /*
704 * Because we really don't have an IR, this stuff is a little messy.
705 *
706 * This routine looks in the table of existing value number for a value
707 * with generated from an operation with the specified opcode and
708 * the specified values. If it finds it, it returns its value number,
709 * otherwise it makes a new entry in the table and returns the
710 * value number of that entry.
711 */
712 static bpf_u_int32
713 F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
714 {
715 u_int hash;
716 bpf_u_int32 val;
717 struct valnode *p;
718
719 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
720 hash %= MODULUS;
721
722 for (p = opt_state->hashtbl[hash]; p; p = p->next)
723 if (p->code == code && p->v0 == v0 && p->v1 == v1)
724 return p->val;
725
726 /*
727 * Not found. Allocate a new value, and assign it a new
728 * value number.
729 *
730 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
731 * increment it before using it as the new value number, which
732 * means we never assign VAL_UNKNOWN.
733 *
734 * XXX - unless we overflow, but we probably won't have 2^32-1
735 * values; we treat 32 bits as effectively infinite.
736 */
737 val = ++opt_state->curval;
738 if (BPF_MODE(code) == BPF_IMM &&
739 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
740 opt_state->vmap[val].const_val = v0;
741 opt_state->vmap[val].is_const = 1;
742 }
743 p = opt_state->next_vnode++;
744 p->val = val;
745 p->code = code;
746 p->v0 = v0;
747 p->v1 = v1;
748 p->next = opt_state->hashtbl[hash];
749 opt_state->hashtbl[hash] = p;
750
751 return val;
752 }
753
754 static inline void
755 vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
756 {
757 if (alter && newval != VAL_UNKNOWN && *valp == newval)
758 s->code = NOP;
759 else
760 *valp = newval;
761 }
762
763 /*
764 * Do constant-folding on binary operators.
765 * (Unary operators are handled elsewhere.)
766 */
767 static void
768 fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
769 {
770 bpf_u_int32 a, b;
771
772 a = opt_state->vmap[v0].const_val;
773 b = opt_state->vmap[v1].const_val;
774
775 switch (BPF_OP(s->code)) {
776 case BPF_ADD:
777 a += b;
778 break;
779
780 case BPF_SUB:
781 a -= b;
782 break;
783
784 case BPF_MUL:
785 a *= b;
786 break;
787
788 case BPF_DIV:
789 if (b == 0)
790 opt_error(opt_state, "division by zero");
791 a /= b;
792 break;
793
794 case BPF_MOD:
795 if (b == 0)
796 opt_error(opt_state, "modulus by zero");
797 a %= b;
798 break;
799
800 case BPF_AND:
801 a &= b;
802 break;
803
804 case BPF_OR:
805 a |= b;
806 break;
807
808 case BPF_XOR:
809 a ^= b;
810 break;
811
812 case BPF_LSH:
813 /*
814 * A left shift of more than the width of the type
815 * is undefined in C; we'll just treat it as shifting
816 * all the bits out.
817 *
818 * XXX - the BPF interpreter doesn't check for this,
819 * so its behavior is dependent on the behavior of
820 * the processor on which it's running. There are
821 * processors on which it shifts all the bits out
822 * and processors on which it does no shift.
823 */
824 if (b < 32)
825 a <<= b;
826 else
827 a = 0;
828 break;
829
830 case BPF_RSH:
831 /*
832 * A right shift of more than the width of the type
833 * is undefined in C; we'll just treat it as shifting
834 * all the bits out.
835 *
836 * XXX - the BPF interpreter doesn't check for this,
837 * so its behavior is dependent on the behavior of
838 * the processor on which it's running. There are
839 * processors on which it shifts all the bits out
840 * and processors on which it does no shift.
841 */
842 if (b < 32)
843 a >>= b;
844 else
845 a = 0;
846 break;
847
848 default:
849 abort();
850 }
851 s->k = a;
852 s->code = BPF_LD|BPF_IMM;
853 /*
854 * XXX - optimizer loop detection.
855 */
856 opt_state->non_branch_movement_performed = 1;
857 opt_state->done = 0;
858 }
859
860 static inline struct slist *
861 this_op(struct slist *s)
862 {
863 while (s != 0 && s->s.code == NOP)
864 s = s->next;
865 return s;
866 }
867
868 static void
869 opt_not(struct block *b)
870 {
871 struct block *tmp = JT(b);
872
873 JT(b) = JF(b);
874 JF(b) = tmp;
875 }
876
877 static void
878 opt_peep(opt_state_t *opt_state, struct block *b)
879 {
880 struct slist *s;
881 struct slist *next, *last;
882 bpf_u_int32 val;
883
884 s = b->stmts;
885 if (s == 0)
886 return;
887
888 last = s;
889 for (/*empty*/; /*empty*/; s = next) {
890 /*
891 * Skip over nops.
892 */
893 s = this_op(s);
894 if (s == 0)
895 break; /* nothing left in the block */
896
897 /*
898 * Find the next real instruction after that one
899 * (skipping nops).
900 */
901 next = this_op(s->next);
902 if (next == 0)
903 break; /* no next instruction */
904 last = next;
905
906 /*
907 * st M[k] --> st M[k]
908 * ldx M[k] tax
909 */
910 if (s->s.code == BPF_ST &&
911 next->s.code == (BPF_LDX|BPF_MEM) &&
912 s->s.k == next->s.k) {
913 /*
914 * XXX - optimizer loop detection.
915 */
916 opt_state->non_branch_movement_performed = 1;
917 opt_state->done = 0;
918 next->s.code = BPF_MISC|BPF_TAX;
919 }
920 /*
921 * ld #k --> ldx #k
922 * tax txa
923 */
924 if (s->s.code == (BPF_LD|BPF_IMM) &&
925 next->s.code == (BPF_MISC|BPF_TAX)) {
926 s->s.code = BPF_LDX|BPF_IMM;
927 next->s.code = BPF_MISC|BPF_TXA;
928 /*
929 * XXX - optimizer loop detection.
930 */
931 opt_state->non_branch_movement_performed = 1;
932 opt_state->done = 0;
933 }
934 /*
935 * This is an ugly special case, but it happens
936 * when you say tcp[k] or udp[k] where k is a constant.
937 */
938 if (s->s.code == (BPF_LD|BPF_IMM)) {
939 struct slist *add, *tax, *ild;
940
941 /*
942 * Check that X isn't used on exit from this
943 * block (which the optimizer might cause).
944 * We know the code generator won't generate
945 * any local dependencies.
946 */
947 if (ATOMELEM(b->out_use, X_ATOM))
948 continue;
949
950 /*
951 * Check that the instruction following the ldi
952 * is an addx, or it's an ldxms with an addx
953 * following it (with 0 or more nops between the
954 * ldxms and addx).
955 */
956 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
957 add = next;
958 else
959 add = this_op(next->next);
960 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
961 continue;
962
963 /*
964 * Check that a tax follows that (with 0 or more
965 * nops between them).
966 */
967 tax = this_op(add->next);
968 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
969 continue;
970
971 /*
972 * Check that an ild follows that (with 0 or more
973 * nops between them).
974 */
975 ild = this_op(tax->next);
976 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
977 BPF_MODE(ild->s.code) != BPF_IND)
978 continue;
979 /*
980 * We want to turn this sequence:
981 *
982 * (004) ldi #0x2 {s}
983 * (005) ldxms [14] {next} -- optional
984 * (006) addx {add}
985 * (007) tax {tax}
986 * (008) ild [x+0] {ild}
987 *
988 * into this sequence:
989 *
990 * (004) nop
991 * (005) ldxms [14]
992 * (006) nop
993 * (007) nop
994 * (008) ild [x+2]
995 *
996 * XXX We need to check that X is not
997 * subsequently used, because we want to change
998 * what'll be in it after this sequence.
999 *
1000 * We know we can eliminate the accumulator
1001 * modifications earlier in the sequence since
1002 * it is defined by the last stmt of this sequence
1003 * (i.e., the last statement of the sequence loads
1004 * a value into the accumulator, so we can eliminate
1005 * earlier operations on the accumulator).
1006 */
1007 ild->s.k += s->s.k;
1008 s->s.code = NOP;
1009 add->s.code = NOP;
1010 tax->s.code = NOP;
1011 /*
1012 * XXX - optimizer loop detection.
1013 */
1014 opt_state->non_branch_movement_performed = 1;
1015 opt_state->done = 0;
1016 }
1017 }
1018 /*
1019 * If the comparison at the end of a block is an equality
1020 * comparison against a constant, and nobody uses the value
1021 * we leave in the A register at the end of a block, and
1022 * the operation preceding the comparison is an arithmetic
1023 * operation, we can sometime optimize it away.
1024 */
1025 if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1026 !ATOMELEM(b->out_use, A_ATOM)) {
1027 /*
1028 * We can optimize away certain subtractions of the
1029 * X register.
1030 */
1031 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1032 val = b->val[X_ATOM];
1033 if (opt_state->vmap[val].is_const) {
1034 /*
1035 * If we have a subtract to do a comparison,
1036 * and the X register is a known constant,
1037 * we can merge this value into the
1038 * comparison:
1039 *
1040 * sub x -> nop
1041 * jeq #y jeq #(x+y)
1042 */
1043 b->s.k += opt_state->vmap[val].const_val;
1044 last->s.code = NOP;
1045 /*
1046 * XXX - optimizer loop detection.
1047 */
1048 opt_state->non_branch_movement_performed = 1;
1049 opt_state->done = 0;
1050 } else if (b->s.k == 0) {
1051 /*
1052 * If the X register isn't a constant,
1053 * and the comparison in the test is
1054 * against 0, we can compare with the
1055 * X register, instead:
1056 *
1057 * sub x -> nop
1058 * jeq #0 jeq x
1059 */
1060 last->s.code = NOP;
1061 b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1062 /*
1063 * XXX - optimizer loop detection.
1064 */
1065 opt_state->non_branch_movement_performed = 1;
1066 opt_state->done = 0;
1067 }
1068 }
1069 /*
1070 * Likewise, a constant subtract can be simplified:
1071 *
1072 * sub #x -> nop
1073 * jeq #y -> jeq #(x+y)
1074 */
1075 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1076 last->s.code = NOP;
1077 b->s.k += last->s.k;
1078 /*
1079 * XXX - optimizer loop detection.
1080 */
1081 opt_state->non_branch_movement_performed = 1;
1082 opt_state->done = 0;
1083 }
1084 /*
1085 * And, similarly, a constant AND can be simplified
1086 * if we're testing against 0, i.e.:
1087 *
1088 * and #k nop
1089 * jeq #0 -> jset #k
1090 */
1091 else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1092 b->s.k == 0) {
1093 b->s.k = last->s.k;
1094 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1095 last->s.code = NOP;
1096 /*
1097 * XXX - optimizer loop detection.
1098 */
1099 opt_state->non_branch_movement_performed = 1;
1100 opt_state->done = 0;
1101 opt_not(b);
1102 }
1103 }
1104 /*
1105 * jset #0 -> never
1106 * jset #ffffffff -> always
1107 */
1108 if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1109 if (b->s.k == 0)
1110 JT(b) = JF(b);
1111 if (b->s.k == 0xffffffffU)
1112 JF(b) = JT(b);
1113 }
1114 /*
1115 * If we're comparing against the index register, and the index
1116 * register is a known constant, we can just compare against that
1117 * constant.
1118 */
1119 val = b->val[X_ATOM];
1120 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1121 bpf_u_int32 v = opt_state->vmap[val].const_val;
1122 b->s.code &= ~BPF_X;
1123 b->s.k = v;
1124 }
1125 /*
1126 * If the accumulator is a known constant, we can compute the
1127 * comparison result.
1128 */
1129 val = b->val[A_ATOM];
1130 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1131 bpf_u_int32 v = opt_state->vmap[val].const_val;
1132 switch (BPF_OP(b->s.code)) {
1133
1134 case BPF_JEQ:
1135 v = v == b->s.k;
1136 break;
1137
1138 case BPF_JGT:
1139 v = v > b->s.k;
1140 break;
1141
1142 case BPF_JGE:
1143 v = v >= b->s.k;
1144 break;
1145
1146 case BPF_JSET:
1147 v &= b->s.k;
1148 break;
1149
1150 default:
1151 abort();
1152 }
1153 if (JF(b) != JT(b)) {
1154 /*
1155 * XXX - optimizer loop detection.
1156 */
1157 opt_state->non_branch_movement_performed = 1;
1158 opt_state->done = 0;
1159 }
1160 if (v)
1161 JF(b) = JT(b);
1162 else
1163 JT(b) = JF(b);
1164 }
1165 }
1166
1167 /*
1168 * Compute the symbolic value of expression of 's', and update
1169 * anything it defines in the value table 'val'. If 'alter' is true,
1170 * do various optimizations. This code would be cleaner if symbolic
1171 * evaluation and code transformations weren't folded together.
1172 */
1173 static void
1174 opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1175 {
1176 int op;
1177 bpf_u_int32 v;
1178
1179 switch (s->code) {
1180
1181 case BPF_LD|BPF_ABS|BPF_W:
1182 case BPF_LD|BPF_ABS|BPF_H:
1183 case BPF_LD|BPF_ABS|BPF_B:
1184 v = F(opt_state, s->code, s->k, 0L);
1185 vstore(s, &val[A_ATOM], v, alter);
1186 break;
1187
1188 case BPF_LD|BPF_IND|BPF_W:
1189 case BPF_LD|BPF_IND|BPF_H:
1190 case BPF_LD|BPF_IND|BPF_B:
1191 v = val[X_ATOM];
1192 if (alter && opt_state->vmap[v].is_const) {
1193 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1194 s->k += opt_state->vmap[v].const_val;
1195 v = F(opt_state, s->code, s->k, 0L);
1196 /*
1197 * XXX - optimizer loop detection.
1198 */
1199 opt_state->non_branch_movement_performed = 1;
1200 opt_state->done = 0;
1201 }
1202 else
1203 v = F(opt_state, s->code, s->k, v);
1204 vstore(s, &val[A_ATOM], v, alter);
1205 break;
1206
1207 case BPF_LD|BPF_LEN:
1208 v = F(opt_state, s->code, 0L, 0L);
1209 vstore(s, &val[A_ATOM], v, alter);
1210 break;
1211
1212 case BPF_LD|BPF_IMM:
1213 v = K(s->k);
1214 vstore(s, &val[A_ATOM], v, alter);
1215 break;
1216
1217 case BPF_LDX|BPF_IMM:
1218 v = K(s->k);
1219 vstore(s, &val[X_ATOM], v, alter);
1220 break;
1221
1222 case BPF_LDX|BPF_MSH|BPF_B:
1223 v = F(opt_state, s->code, s->k, 0L);
1224 vstore(s, &val[X_ATOM], v, alter);
1225 break;
1226
1227 case BPF_ALU|BPF_NEG:
1228 if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1229 s->code = BPF_LD|BPF_IMM;
1230 /*
1231 * Do this negation as unsigned arithmetic; that's
1232 * what modern BPF engines do, and it guarantees
1233 * that all possible values can be negated. (Yeah,
1234 * negating 0x80000000, the minimum signed 32-bit
1235 * two's-complement value, results in 0x80000000,
1236 * so it's still negative, but we *should* be doing
1237 * all unsigned arithmetic here, to match what
1238 * modern BPF engines do.)
1239 *
1240 * Express it as 0U - (unsigned value) so that we
1241 * don't get compiler warnings about negating an
1242 * unsigned value and don't get UBSan warnings
1243 * about the result of negating 0x80000000 being
1244 * undefined.
1245 */
1246 s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1247 val[A_ATOM] = K(s->k);
1248 }
1249 else
1250 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1251 break;
1252
1253 case BPF_ALU|BPF_ADD|BPF_K:
1254 case BPF_ALU|BPF_SUB|BPF_K:
1255 case BPF_ALU|BPF_MUL|BPF_K:
1256 case BPF_ALU|BPF_DIV|BPF_K:
1257 case BPF_ALU|BPF_MOD|BPF_K:
1258 case BPF_ALU|BPF_AND|BPF_K:
1259 case BPF_ALU|BPF_OR|BPF_K:
1260 case BPF_ALU|BPF_XOR|BPF_K:
1261 case BPF_ALU|BPF_LSH|BPF_K:
1262 case BPF_ALU|BPF_RSH|BPF_K:
1263 op = BPF_OP(s->code);
1264 if (alter) {
1265 if (s->k == 0) {
1266 /*
1267 * Optimize operations where the constant
1268 * is zero.
1269 *
1270 * Don't optimize away "sub #0"
1271 * as it may be needed later to
1272 * fixup the generated math code.
1273 *
1274 * Fail if we're dividing by zero or taking
1275 * a modulus by zero.
1276 */
1277 if (op == BPF_ADD ||
1278 op == BPF_LSH || op == BPF_RSH ||
1279 op == BPF_OR || op == BPF_XOR) {
1280 s->code = NOP;
1281 break;
1282 }
1283 if (op == BPF_MUL || op == BPF_AND) {
1284 s->code = BPF_LD|BPF_IMM;
1285 val[A_ATOM] = K(s->k);
1286 break;
1287 }
1288 if (op == BPF_DIV)
1289 opt_error(opt_state,
1290 "division by zero");
1291 if (op == BPF_MOD)
1292 opt_error(opt_state,
1293 "modulus by zero");
1294 }
1295 if (opt_state->vmap[val[A_ATOM]].is_const) {
1296 fold_op(opt_state, s, val[A_ATOM], K(s->k));
1297 val[A_ATOM] = K(s->k);
1298 break;
1299 }
1300 }
1301 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1302 break;
1303
1304 case BPF_ALU|BPF_ADD|BPF_X:
1305 case BPF_ALU|BPF_SUB|BPF_X:
1306 case BPF_ALU|BPF_MUL|BPF_X:
1307 case BPF_ALU|BPF_DIV|BPF_X:
1308 case BPF_ALU|BPF_MOD|BPF_X:
1309 case BPF_ALU|BPF_AND|BPF_X:
1310 case BPF_ALU|BPF_OR|BPF_X:
1311 case BPF_ALU|BPF_XOR|BPF_X:
1312 case BPF_ALU|BPF_LSH|BPF_X:
1313 case BPF_ALU|BPF_RSH|BPF_X:
1314 op = BPF_OP(s->code);
1315 if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1316 if (opt_state->vmap[val[A_ATOM]].is_const) {
1317 fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1318 val[A_ATOM] = K(s->k);
1319 }
1320 else {
1321 s->code = BPF_ALU|BPF_K|op;
1322 s->k = opt_state->vmap[val[X_ATOM]].const_val;
1323 if ((op == BPF_LSH || op == BPF_RSH) &&
1324 s->k > 31)
1325 opt_error(opt_state,
1326 "shift by more than 31 bits");
1327 /*
1328 * XXX - optimizer loop detection.
1329 */
1330 opt_state->non_branch_movement_performed = 1;
1331 opt_state->done = 0;
1332 val[A_ATOM] =
1333 F(opt_state, s->code, val[A_ATOM], K(s->k));
1334 }
1335 break;
1336 }
1337 /*
1338 * Check if we're doing something to an accumulator
1339 * that is 0, and simplify. This may not seem like
1340 * much of a simplification but it could open up further
1341 * optimizations.
1342 * XXX We could also check for mul by 1, etc.
1343 */
1344 if (alter && opt_state->vmap[val[A_ATOM]].is_const
1345 && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1346 if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1347 s->code = BPF_MISC|BPF_TXA;
1348 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1349 break;
1350 }
1351 else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1352 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1353 s->code = BPF_LD|BPF_IMM;
1354 s->k = 0;
1355 vstore(s, &val[A_ATOM], K(s->k), alter);
1356 break;
1357 }
1358 else if (op == BPF_NEG) {
1359 s->code = NOP;
1360 break;
1361 }
1362 }
1363 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1364 break;
1365
1366 case BPF_MISC|BPF_TXA:
1367 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1368 break;
1369
1370 case BPF_LD|BPF_MEM:
1371 v = val[s->k];
1372 if (alter && opt_state->vmap[v].is_const) {
1373 s->code = BPF_LD|BPF_IMM;
1374 s->k = opt_state->vmap[v].const_val;
1375 /*
1376 * XXX - optimizer loop detection.
1377 */
1378 opt_state->non_branch_movement_performed = 1;
1379 opt_state->done = 0;
1380 }
1381 vstore(s, &val[A_ATOM], v, alter);
1382 break;
1383
1384 case BPF_MISC|BPF_TAX:
1385 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1386 break;
1387
1388 case BPF_LDX|BPF_MEM:
1389 v = val[s->k];
1390 if (alter && opt_state->vmap[v].is_const) {
1391 s->code = BPF_LDX|BPF_IMM;
1392 s->k = opt_state->vmap[v].const_val;
1393 /*
1394 * XXX - optimizer loop detection.
1395 */
1396 opt_state->non_branch_movement_performed = 1;
1397 opt_state->done = 0;
1398 }
1399 vstore(s, &val[X_ATOM], v, alter);
1400 break;
1401
1402 case BPF_ST:
1403 vstore(s, &val[s->k], val[A_ATOM], alter);
1404 break;
1405
1406 case BPF_STX:
1407 vstore(s, &val[s->k], val[X_ATOM], alter);
1408 break;
1409 }
1410 }
1411
1412 static void
1413 deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1414 {
1415 register int atom;
1416
1417 atom = atomuse(s);
1418 if (atom >= 0) {
1419 if (atom == AX_ATOM) {
1420 last[X_ATOM] = 0;
1421 last[A_ATOM] = 0;
1422 }
1423 else
1424 last[atom] = 0;
1425 }
1426 atom = atomdef(s);
1427 if (atom >= 0) {
1428 if (last[atom]) {
1429 /*
1430 * XXX - optimizer loop detection.
1431 */
1432 opt_state->non_branch_movement_performed = 1;
1433 opt_state->done = 0;
1434 last[atom]->code = NOP;
1435 }
1436 last[atom] = s;
1437 }
1438 }
1439
1440 static void
1441 opt_deadstores(opt_state_t *opt_state, register struct block *b)
1442 {
1443 register struct slist *s;
1444 register int atom;
1445 struct stmt *last[N_ATOMS];
1446
1447 memset((char *)last, 0, sizeof last);
1448
1449 for (s = b->stmts; s != 0; s = s->next)
1450 deadstmt(opt_state, &s->s, last);
1451 deadstmt(opt_state, &b->s, last);
1452
1453 for (atom = 0; atom < N_ATOMS; ++atom)
1454 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1455 last[atom]->code = NOP;
1456 /*
1457 * XXX - optimizer loop detection.
1458 */
1459 opt_state->non_branch_movement_performed = 1;
1460 opt_state->done = 0;
1461 }
1462 }
1463
1464 static void
1465 opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1466 {
1467 struct slist *s;
1468 struct edge *p;
1469 int i;
1470 bpf_u_int32 aval, xval;
1471
1472 #if 0
1473 for (s = b->stmts; s && s->next; s = s->next)
1474 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1475 do_stmts = 0;
1476 break;
1477 }
1478 #endif
1479
1480 /*
1481 * Initialize the atom values.
1482 */
1483 p = b->in_edges;
1484 if (p == 0) {
1485 /*
1486 * We have no predecessors, so everything is undefined
1487 * upon entry to this block.
1488 */
1489 memset((char *)b->val, 0, sizeof(b->val));
1490 } else {
1491 /*
1492 * Inherit values from our predecessors.
1493 *
1494 * First, get the values from the predecessor along the
1495 * first edge leading to this node.
1496 */
1497 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1498 /*
1499 * Now look at all the other nodes leading to this node.
1500 * If, for the predecessor along that edge, a register
1501 * has a different value from the one we have (i.e.,
1502 * control paths are merging, and the merging paths
1503 * assign different values to that register), give the
1504 * register the undefined value of 0.
1505 */
1506 while ((p = p->next) != NULL) {
1507 for (i = 0; i < N_ATOMS; ++i)
1508 if (b->val[i] != p->pred->val[i])
1509 b->val[i] = 0;
1510 }
1511 }
1512 aval = b->val[A_ATOM];
1513 xval = b->val[X_ATOM];
1514 for (s = b->stmts; s; s = s->next)
1515 opt_stmt(opt_state, &s->s, b->val, do_stmts);
1516
1517 /*
1518 * This is a special case: if we don't use anything from this
1519 * block, and we load the accumulator or index register with a
1520 * value that is already there, or if this block is a return,
1521 * eliminate all the statements.
1522 *
1523 * XXX - what if it does a store? Presumably that falls under
1524 * the heading of "if we don't use anything from this block",
1525 * i.e., if we use any memory location set to a different
1526 * value by this block, then we use something from this block.
1527 *
1528 * XXX - why does it matter whether we use anything from this
1529 * block? If the accumulator or index register doesn't change
1530 * its value, isn't that OK even if we use that value?
1531 *
1532 * XXX - if we load the accumulator with a different value,
1533 * and the block ends with a conditional branch, we obviously
1534 * can't eliminate it, as the branch depends on that value.
1535 * For the index register, the conditional branch only depends
1536 * on the index register value if the test is against the index
1537 * register value rather than a constant; if nothing uses the
1538 * value we put into the index register, and we're not testing
1539 * against the index register's value, and there aren't any
1540 * other problems that would keep us from eliminating this
1541 * block, can we eliminate it?
1542 */
1543 if (do_stmts &&
1544 ((b->out_use == 0 &&
1545 aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1546 xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1547 BPF_CLASS(b->s.code) == BPF_RET)) {
1548 if (b->stmts != 0) {
1549 b->stmts = 0;
1550 /*
1551 * XXX - optimizer loop detection.
1552 */
1553 opt_state->non_branch_movement_performed = 1;
1554 opt_state->done = 0;
1555 }
1556 } else {
1557 opt_peep(opt_state, b);
1558 opt_deadstores(opt_state, b);
1559 }
1560 /*
1561 * Set up values for branch optimizer.
1562 */
1563 if (BPF_SRC(b->s.code) == BPF_K)
1564 b->oval = K(b->s.k);
1565 else
1566 b->oval = b->val[X_ATOM];
1567 b->et.code = b->s.code;
1568 b->ef.code = -b->s.code;
1569 }
1570
1571 /*
1572 * Return true if any register that is used on exit from 'succ', has
1573 * an exit value that is different from the corresponding exit value
1574 * from 'b'.
1575 */
1576 static int
1577 use_conflict(struct block *b, struct block *succ)
1578 {
1579 int atom;
1580 atomset use = succ->out_use;
1581
1582 if (use == 0)
1583 return 0;
1584
1585 for (atom = 0; atom < N_ATOMS; ++atom)
1586 if (ATOMELEM(use, atom))
1587 if (b->val[atom] != succ->val[atom])
1588 return 1;
1589 return 0;
1590 }
1591
1592 /*
1593 * Given a block that is the successor of an edge, and an edge that
1594 * dominates that edge, return either a pointer to a child of that
1595 * block (a block to which that block jumps) if that block is a
1596 * candidate to replace the successor of the latter edge or NULL
1597 * if neither of the children of the first block are candidates.
1598 */
1599 static struct block *
1600 fold_edge(struct block *child, struct edge *ep)
1601 {
1602 int sense;
1603 bpf_u_int32 aval0, aval1, oval0, oval1;
1604 int code = ep->code;
1605
1606 if (code < 0) {
1607 /*
1608 * This edge is a "branch if false" edge.
1609 */
1610 code = -code;
1611 sense = 0;
1612 } else {
1613 /*
1614 * This edge is a "branch if true" edge.
1615 */
1616 sense = 1;
1617 }
1618
1619 /*
1620 * If the opcode for the branch at the end of the block we
1621 * were handed isn't the same as the opcode for the branch
1622 * to which the edge we were handed corresponds, the tests
1623 * for those branches aren't testing the same conditions,
1624 * so the blocks to which the first block branches aren't
1625 * candidates to replace the successor of the edge.
1626 */
1627 if (child->s.code != code)
1628 return 0;
1629
1630 aval0 = child->val[A_ATOM];
1631 oval0 = child->oval;
1632 aval1 = ep->pred->val[A_ATOM];
1633 oval1 = ep->pred->oval;
1634
1635 /*
1636 * If the A register value on exit from the successor block
1637 * isn't the same as the A register value on exit from the
1638 * predecessor of the edge, the blocks to which the first
1639 * block branches aren't candidates to replace the successor
1640 * of the edge.
1641 */
1642 if (aval0 != aval1)
1643 return 0;
1644
1645 if (oval0 == oval1)
1646 /*
1647 * The operands of the branch instructions are
1648 * identical, so the branches are testing the
1649 * same condition, and the result is true if a true
1650 * branch was taken to get here, otherwise false.
1651 */
1652 return sense ? JT(child) : JF(child);
1653
1654 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1655 /*
1656 * At this point, we only know the comparison if we
1657 * came down the true branch, and it was an equality
1658 * comparison with a constant.
1659 *
1660 * I.e., if we came down the true branch, and the branch
1661 * was an equality comparison with a constant, we know the
1662 * accumulator contains that constant. If we came down
1663 * the false branch, or the comparison wasn't with a
1664 * constant, we don't know what was in the accumulator.
1665 *
1666 * We rely on the fact that distinct constants have distinct
1667 * value numbers.
1668 */
1669 return JF(child);
1670
1671 return 0;
1672 }
1673
1674 /*
1675 * If we can make this edge go directly to a child of the edge's current
1676 * successor, do so.
1677 */
1678 static void
1679 opt_j(opt_state_t *opt_state, struct edge *ep)
1680 {
1681 register u_int i, k;
1682 register struct block *target;
1683
1684 /*
1685 * Does this edge go to a block where, if the test
1686 * at the end of it succeeds, it goes to a block
1687 * that's a leaf node of the DAG, i.e. a return
1688 * statement?
1689 * If so, there's nothing to optimize.
1690 */
1691 if (JT(ep->succ) == 0)
1692 return;
1693
1694 /*
1695 * Does this edge go to a block that goes, in turn, to
1696 * the same block regardless of whether the test at the
1697 * end succeeds or fails?
1698 */
1699 if (JT(ep->succ) == JF(ep->succ)) {
1700 /*
1701 * Common branch targets can be eliminated, provided
1702 * there is no data dependency.
1703 *
1704 * Check whether any register used on exit from the
1705 * block to which the successor of this edge goes
1706 * has a value at that point that's different from
1707 * the value it has on exit from the predecessor of
1708 * this edge. If not, the predecessor of this edge
1709 * can just go to the block to which the successor
1710 * of this edge goes, bypassing the successor of this
1711 * edge, as the successor of this edge isn't doing
1712 * any calculations whose results are different
1713 * from what the blocks before it did and isn't
1714 * doing any tests the results of which matter.
1715 */
1716 if (!use_conflict(ep->pred, JT(ep->succ))) {
1717 /*
1718 * No, there isn't.
1719 * Make this edge go to the block to
1720 * which the successor of that edge
1721 * goes.
1722 *
1723 * XXX - optimizer loop detection.
1724 */
1725 opt_state->non_branch_movement_performed = 1;
1726 opt_state->done = 0;
1727 ep->succ = JT(ep->succ);
1728 }
1729 }
1730 /*
1731 * For each edge dominator that matches the successor of this
1732 * edge, promote the edge successor to the its grandchild.
1733 *
1734 * XXX We violate the set abstraction here in favor a reasonably
1735 * efficient loop.
1736 */
1737 top:
1738 for (i = 0; i < opt_state->edgewords; ++i) {
1739 /* i'th word in the bitset of dominators */
1740 register bpf_u_int32 x = ep->edom[i];
1741
1742 while (x != 0) {
1743 /* Find the next dominator in that word and mark it as found */
1744 k = lowest_set_bit(x);
1745 x &=~ ((bpf_u_int32)1 << k);
1746 k += i * BITS_PER_WORD;
1747
1748 target = fold_edge(ep->succ, opt_state->edges[k]);
1749 /*
1750 * We have a candidate to replace the successor
1751 * of ep.
1752 *
1753 * Check that there is no data dependency between
1754 * nodes that will be violated if we move the edge;
1755 * i.e., if any register used on exit from the
1756 * candidate has a value at that point different
1757 * from the value it has when we exit the
1758 * predecessor of that edge, there's a data
1759 * dependency that will be violated.
1760 */
1761 if (target != 0 && !use_conflict(ep->pred, target)) {
1762 /*
1763 * It's safe to replace the successor of
1764 * ep; do so, and note that we've made
1765 * at least one change.
1766 *
1767 * XXX - this is one of the operations that
1768 * happens when the optimizer gets into
1769 * one of those infinite loops.
1770 */
1771 opt_state->done = 0;
1772 ep->succ = target;
1773 if (JT(target) != 0)
1774 /*
1775 * Start over unless we hit a leaf.
1776 */
1777 goto top;
1778 return;
1779 }
1780 }
1781 }
1782 }
1783
1784 /*
1785 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1786 * "Predicate Assertion Propagation" in the BPF+ paper?
1787 *
1788 * Note that this looks at block dominators, not edge dominators.
1789 * Don't think so.
1790 *
1791 * "A or B" compiles into
1792 *
1793 * A
1794 * t / \ f
1795 * / B
1796 * / t / \ f
1797 * \ /
1798 * \ /
1799 * X
1800 *
1801 *
1802 */
1803 static void
1804 or_pullup(opt_state_t *opt_state, struct block *b)
1805 {
1806 bpf_u_int32 val;
1807 int at_top;
1808 struct block *pull;
1809 struct block **diffp, **samep;
1810 struct edge *ep;
1811
1812 ep = b->in_edges;
1813 if (ep == 0)
1814 return;
1815
1816 /*
1817 * Make sure each predecessor loads the same value.
1818 * XXX why?
1819 */
1820 val = ep->pred->val[A_ATOM];
1821 for (ep = ep->next; ep != 0; ep = ep->next)
1822 if (val != ep->pred->val[A_ATOM])
1823 return;
1824
1825 /*
1826 * For the first edge in the list of edges coming into this block,
1827 * see whether the predecessor of that edge comes here via a true
1828 * branch or a false branch.
1829 */
1830 if (JT(b->in_edges->pred) == b)
1831 diffp = &JT(b->in_edges->pred); /* jt */
1832 else
1833 diffp = &JF(b->in_edges->pred); /* jf */
1834
1835 /*
1836 * diffp is a pointer to a pointer to the block.
1837 *
1838 * Go down the false chain looking as far as you can,
1839 * making sure that each jump-compare is doing the
1840 * same as the original block.
1841 *
1842 * If you reach the bottom before you reach a
1843 * different jump-compare, just exit. There's nothing
1844 * to do here. XXX - no, this version is checking for
1845 * the value leaving the block; that's from the BPF+
1846 * pullup routine.
1847 */
1848 at_top = 1;
1849 for (;;) {
1850 /*
1851 * Done if that's not going anywhere XXX
1852 */
1853 if (*diffp == 0)
1854 return;
1855
1856 /*
1857 * Done if that predecessor blah blah blah isn't
1858 * going the same place we're going XXX
1859 *
1860 * Does the true edge of this block point to the same
1861 * location as the true edge of b?
1862 */
1863 if (JT(*diffp) != JT(b))
1864 return;
1865
1866 /*
1867 * Done if this node isn't a dominator of that
1868 * node blah blah blah XXX
1869 *
1870 * Does b dominate diffp?
1871 */
1872 if (!SET_MEMBER((*diffp)->dom, b->id))
1873 return;
1874
1875 /*
1876 * Break out of the loop if that node's value of A
1877 * isn't the value of A above XXX
1878 */
1879 if ((*diffp)->val[A_ATOM] != val)
1880 break;
1881
1882 /*
1883 * Get the JF for that node XXX
1884 * Go down the false path.
1885 */
1886 diffp = &JF(*diffp);
1887 at_top = 0;
1888 }
1889
1890 /*
1891 * Now that we've found a different jump-compare in a chain
1892 * below b, search further down until we find another
1893 * jump-compare that looks at the original value. This
1894 * jump-compare should get pulled up. XXX again we're
1895 * comparing values not jump-compares.
1896 */
1897 samep = &JF(*diffp);
1898 for (;;) {
1899 /*
1900 * Done if that's not going anywhere XXX
1901 */
1902 if (*samep == 0)
1903 return;
1904
1905 /*
1906 * Done if that predecessor blah blah blah isn't
1907 * going the same place we're going XXX
1908 */
1909 if (JT(*samep) != JT(b))
1910 return;
1911
1912 /*
1913 * Done if this node isn't a dominator of that
1914 * node blah blah blah XXX
1915 *
1916 * Does b dominate samep?
1917 */
1918 if (!SET_MEMBER((*samep)->dom, b->id))
1919 return;
1920
1921 /*
1922 * Break out of the loop if that node's value of A
1923 * is the value of A above XXX
1924 */
1925 if ((*samep)->val[A_ATOM] == val)
1926 break;
1927
1928 /* XXX Need to check that there are no data dependencies
1929 between dp0 and dp1. Currently, the code generator
1930 will not produce such dependencies. */
1931 samep = &JF(*samep);
1932 }
1933 #ifdef notdef
1934 /* XXX This doesn't cover everything. */
1935 for (i = 0; i < N_ATOMS; ++i)
1936 if ((*samep)->val[i] != pred->val[i])
1937 return;
1938 #endif
1939 /* Pull up the node. */
1940 pull = *samep;
1941 *samep = JF(pull);
1942 JF(pull) = *diffp;
1943
1944 /*
1945 * At the top of the chain, each predecessor needs to point at the
1946 * pulled up node. Inside the chain, there is only one predecessor
1947 * to worry about.
1948 */
1949 if (at_top) {
1950 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1951 if (JT(ep->pred) == b)
1952 JT(ep->pred) = pull;
1953 else
1954 JF(ep->pred) = pull;
1955 }
1956 }
1957 else
1958 *diffp = pull;
1959
1960 /*
1961 * XXX - this is one of the operations that happens when the
1962 * optimizer gets into one of those infinite loops.
1963 */
1964 opt_state->done = 0;
1965 }
1966
1967 static void
1968 and_pullup(opt_state_t *opt_state, struct block *b)
1969 {
1970 bpf_u_int32 val;
1971 int at_top;
1972 struct block *pull;
1973 struct block **diffp, **samep;
1974 struct edge *ep;
1975
1976 ep = b->in_edges;
1977 if (ep == 0)
1978 return;
1979
1980 /*
1981 * Make sure each predecessor loads the same value.
1982 */
1983 val = ep->pred->val[A_ATOM];
1984 for (ep = ep->next; ep != 0; ep = ep->next)
1985 if (val != ep->pred->val[A_ATOM])
1986 return;
1987
1988 if (JT(b->in_edges->pred) == b)
1989 diffp = &JT(b->in_edges->pred);
1990 else
1991 diffp = &JF(b->in_edges->pred);
1992
1993 at_top = 1;
1994 for (;;) {
1995 if (*diffp == 0)
1996 return;
1997
1998 if (JF(*diffp) != JF(b))
1999 return;
2000
2001 if (!SET_MEMBER((*diffp)->dom, b->id))
2002 return;
2003
2004 if ((*diffp)->val[A_ATOM] != val)
2005 break;
2006
2007 diffp = &JT(*diffp);
2008 at_top = 0;
2009 }
2010 samep = &JT(*diffp);
2011 for (;;) {
2012 if (*samep == 0)
2013 return;
2014
2015 if (JF(*samep) != JF(b))
2016 return;
2017
2018 if (!SET_MEMBER((*samep)->dom, b->id))
2019 return;
2020
2021 if ((*samep)->val[A_ATOM] == val)
2022 break;
2023
2024 /* XXX Need to check that there are no data dependencies
2025 between diffp and samep. Currently, the code generator
2026 will not produce such dependencies. */
2027 samep = &JT(*samep);
2028 }
2029 #ifdef notdef
2030 /* XXX This doesn't cover everything. */
2031 for (i = 0; i < N_ATOMS; ++i)
2032 if ((*samep)->val[i] != pred->val[i])
2033 return;
2034 #endif
2035 /* Pull up the node. */
2036 pull = *samep;
2037 *samep = JT(pull);
2038 JT(pull) = *diffp;
2039
2040 /*
2041 * At the top of the chain, each predecessor needs to point at the
2042 * pulled up node. Inside the chain, there is only one predecessor
2043 * to worry about.
2044 */
2045 if (at_top) {
2046 for (ep = b->in_edges; ep != 0; ep = ep->next) {
2047 if (JT(ep->pred) == b)
2048 JT(ep->pred) = pull;
2049 else
2050 JF(ep->pred) = pull;
2051 }
2052 }
2053 else
2054 *diffp = pull;
2055
2056 /*
2057 * XXX - this is one of the operations that happens when the
2058 * optimizer gets into one of those infinite loops.
2059 */
2060 opt_state->done = 0;
2061 }
2062
2063 static void
2064 opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2065 {
2066 int i, maxlevel;
2067 struct block *p;
2068
2069 init_val(opt_state);
2070 maxlevel = ic->root->level;
2071
2072 find_inedges(opt_state, ic->root);
2073 for (i = maxlevel; i >= 0; --i)
2074 for (p = opt_state->levels[i]; p; p = p->link)
2075 opt_blk(opt_state, p, do_stmts);
2076
2077 if (do_stmts)
2078 /*
2079 * No point trying to move branches; it can't possibly
2080 * make a difference at this point.
2081 *
2082 * XXX - this might be after we detect a loop where
2083 * we were just looping infinitely moving branches
2084 * in such a fashion that we went through two or more
2085 * versions of the machine code, eventually returning
2086 * to the first version. (We're really not doing a
2087 * full loop detection, we're just testing for two
2088 * passes in a row where we do nothing but
2089 * move branches.)
2090 */
2091 return;
2092
2093 /*
2094 * Is this what the BPF+ paper describes in sections 6.1.1,
2095 * 6.1.2, and 6.1.3?
2096 */
2097 for (i = 1; i <= maxlevel; ++i) {
2098 for (p = opt_state->levels[i]; p; p = p->link) {
2099 opt_j(opt_state, &p->et);
2100 opt_j(opt_state, &p->ef);
2101 }
2102 }
2103
2104 find_inedges(opt_state, ic->root);
2105 for (i = 1; i <= maxlevel; ++i) {
2106 for (p = opt_state->levels[i]; p; p = p->link) {
2107 or_pullup(opt_state, p);
2108 and_pullup(opt_state, p);
2109 }
2110 }
2111 }
2112
2113 static inline void
2114 link_inedge(struct edge *parent, struct block *child)
2115 {
2116 parent->next = child->in_edges;
2117 child->in_edges = parent;
2118 }
2119
2120 static void
2121 find_inedges(opt_state_t *opt_state, struct block *root)
2122 {
2123 u_int i;
2124 int level;
2125 struct block *b;
2126
2127 for (i = 0; i < opt_state->n_blocks; ++i)
2128 opt_state->blocks[i]->in_edges = 0;
2129
2130 /*
2131 * Traverse the graph, adding each edge to the predecessor
2132 * list of its successors. Skip the leaves (i.e. level 0).
2133 */
2134 for (level = root->level; level > 0; --level) {
2135 for (b = opt_state->levels[level]; b != 0; b = b->link) {
2136 link_inedge(&b->et, JT(b));
2137 link_inedge(&b->ef, JF(b));
2138 }
2139 }
2140 }
2141
2142 static void
2143 opt_root(struct block **b)
2144 {
2145 struct slist *tmp, *s;
2146
2147 s = (*b)->stmts;
2148 (*b)->stmts = 0;
2149 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2150 *b = JT(*b);
2151
2152 tmp = (*b)->stmts;
2153 if (tmp != 0)
2154 sappend(s, tmp);
2155 (*b)->stmts = s;
2156
2157 /*
2158 * If the root node is a return, then there is no
2159 * point executing any statements (since the bpf machine
2160 * has no side effects).
2161 */
2162 if (BPF_CLASS((*b)->s.code) == BPF_RET)
2163 (*b)->stmts = 0;
2164 }
2165
2166 static void
2167 opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2168 {
2169
2170 #ifdef BDEBUG
2171 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2172 printf("opt_loop(root, %d) begin\n", do_stmts);
2173 opt_dump(opt_state, ic);
2174 }
2175 #endif
2176
2177 /*
2178 * XXX - optimizer loop detection.
2179 */
2180 int loop_count = 0;
2181 for (;;) {
2182 opt_state->done = 1;
2183 /*
2184 * XXX - optimizer loop detection.
2185 */
2186 opt_state->non_branch_movement_performed = 0;
2187 find_levels(opt_state, ic);
2188 find_dom(opt_state, ic->root);
2189 find_closure(opt_state, ic->root);
2190 find_ud(opt_state, ic->root);
2191 find_edom(opt_state, ic->root);
2192 opt_blks(opt_state, ic, do_stmts);
2193 #ifdef BDEBUG
2194 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2195 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2196 opt_dump(opt_state, ic);
2197 }
2198 #endif
2199
2200 /*
2201 * Was anything done in this optimizer pass?
2202 */
2203 if (opt_state->done) {
2204 /*
2205 * No, so we've reached a fixed point.
2206 * We're done.
2207 */
2208 break;
2209 }
2210
2211 /*
2212 * XXX - was anything done other than branch movement
2213 * in this pass?
2214 */
2215 if (opt_state->non_branch_movement_performed) {
2216 /*
2217 * Yes. Clear any loop-detection counter;
2218 * we're making some form of progress (assuming
2219 * we can't get into a cycle doing *other*
2220 * optimizations...).
2221 */
2222 loop_count = 0;
2223 } else {
2224 /*
2225 * No - increment the counter, and quit if
2226 * it's up to 100.
2227 */
2228 loop_count++;
2229 if (loop_count >= 100) {
2230 /*
2231 * We've done nothing but branch movement
2232 * for 100 passes; we're probably
2233 * in a cycle and will never reach a
2234 * fixed point.
2235 *
2236 * XXX - yes, we really need a non-
2237 * heuristic way of detecting a cycle.
2238 */
2239 opt_state->done = 1;
2240 break;
2241 }
2242 }
2243 }
2244 }
2245
2246 /*
2247 * Optimize the filter code in its dag representation.
2248 * Return 0 on success, -1 on error.
2249 */
2250 int
2251 bpf_optimize(struct icode *ic, char *errbuf)
2252 {
2253 opt_state_t opt_state;
2254
2255 memset(&opt_state, 0, sizeof(opt_state));
2256 opt_state.errbuf = errbuf;
2257 opt_state.non_branch_movement_performed = 0;
2258 if (setjmp(opt_state.top_ctx)) {
2259 opt_cleanup(&opt_state);
2260 return -1;
2261 }
2262 opt_init(&opt_state, ic);
2263 opt_loop(&opt_state, ic, 0);
2264 opt_loop(&opt_state, ic, 1);
2265 intern_blocks(&opt_state, ic);
2266 #ifdef BDEBUG
2267 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2268 printf("after intern_blocks()\n");
2269 opt_dump(&opt_state, ic);
2270 }
2271 #endif
2272 opt_root(&ic->root);
2273 #ifdef BDEBUG
2274 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2275 printf("after opt_root()\n");
2276 opt_dump(&opt_state, ic);
2277 }
2278 #endif
2279 opt_cleanup(&opt_state);
2280 return 0;
2281 }
2282
2283 static void
2284 make_marks(struct icode *ic, struct block *p)
2285 {
2286 if (!isMarked(ic, p)) {
2287 Mark(ic, p);
2288 if (BPF_CLASS(p->s.code) != BPF_RET) {
2289 make_marks(ic, JT(p));
2290 make_marks(ic, JF(p));
2291 }
2292 }
2293 }
2294
2295 /*
2296 * Mark code array such that isMarked(ic->cur_mark, i) is true
2297 * only for nodes that are alive.
2298 */
2299 static void
2300 mark_code(struct icode *ic)
2301 {
2302 ic->cur_mark += 1;
2303 make_marks(ic, ic->root);
2304 }
2305
2306 /*
2307 * True iff the two stmt lists load the same value from the packet into
2308 * the accumulator.
2309 */
2310 static int
2311 eq_slist(struct slist *x, struct slist *y)
2312 {
2313 for (;;) {
2314 while (x && x->s.code == NOP)
2315 x = x->next;
2316 while (y && y->s.code == NOP)
2317 y = y->next;
2318 if (x == 0)
2319 return y == 0;
2320 if (y == 0)
2321 return x == 0;
2322 if (x->s.code != y->s.code || x->s.k != y->s.k)
2323 return 0;
2324 x = x->next;
2325 y = y->next;
2326 }
2327 }
2328
2329 static inline int
2330 eq_blk(struct block *b0, struct block *b1)
2331 {
2332 if (b0->s.code == b1->s.code &&
2333 b0->s.k == b1->s.k &&
2334 b0->et.succ == b1->et.succ &&
2335 b0->ef.succ == b1->ef.succ)
2336 return eq_slist(b0->stmts, b1->stmts);
2337 return 0;
2338 }
2339
2340 static void
2341 intern_blocks(opt_state_t *opt_state, struct icode *ic)
2342 {
2343 struct block *p;
2344 u_int i, j;
2345 int done1; /* don't shadow global */
2346 top:
2347 done1 = 1;
2348 for (i = 0; i < opt_state->n_blocks; ++i)
2349 opt_state->blocks[i]->link = 0;
2350
2351 mark_code(ic);
2352
2353 for (i = opt_state->n_blocks - 1; i != 0; ) {
2354 --i;
2355 if (!isMarked(ic, opt_state->blocks[i]))
2356 continue;
2357 for (j = i + 1; j < opt_state->n_blocks; ++j) {
2358 if (!isMarked(ic, opt_state->blocks[j]))
2359 continue;
2360 if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2361 opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2362 opt_state->blocks[j]->link : opt_state->blocks[j];
2363 break;
2364 }
2365 }
2366 }
2367 for (i = 0; i < opt_state->n_blocks; ++i) {
2368 p = opt_state->blocks[i];
2369 if (JT(p) == 0)
2370 continue;
2371 if (JT(p)->link) {
2372 done1 = 0;
2373 JT(p) = JT(p)->link;
2374 }
2375 if (JF(p)->link) {
2376 done1 = 0;
2377 JF(p) = JF(p)->link;
2378 }
2379 }
2380 if (!done1)
2381 goto top;
2382 }
2383
2384 static void
2385 opt_cleanup(opt_state_t *opt_state)
2386 {
2387 free((void *)opt_state->vnode_base);
2388 free((void *)opt_state->vmap);
2389 free((void *)opt_state->edges);
2390 free((void *)opt_state->space);
2391 free((void *)opt_state->levels);
2392 free((void *)opt_state->blocks);
2393 }
2394
2395 /*
2396 * For optimizer errors.
2397 */
2398 static void PCAP_NORETURN
2399 opt_error(opt_state_t *opt_state, const char *fmt, ...)
2400 {
2401 va_list ap;
2402
2403 if (opt_state->errbuf != NULL) {
2404 va_start(ap, fmt);
2405 (void)vsnprintf(opt_state->errbuf,
2406 PCAP_ERRBUF_SIZE, fmt, ap);
2407 va_end(ap);
2408 }
2409 longjmp(opt_state->top_ctx, 1);
2410 /* NOTREACHED */
2411 #ifdef _AIX
2412 PCAP_UNREACHABLE
2413 #endif /* _AIX */
2414 }
2415
2416 /*
2417 * Return the number of stmts in 's'.
2418 */
2419 static u_int
2420 slength(struct slist *s)
2421 {
2422 u_int n = 0;
2423
2424 for (; s; s = s->next)
2425 if (s->s.code != NOP)
2426 ++n;
2427 return n;
2428 }
2429
2430 /*
2431 * Return the number of nodes reachable by 'p'.
2432 * All nodes should be initially unmarked.
2433 */
2434 static int
2435 count_blocks(struct icode *ic, struct block *p)
2436 {
2437 if (p == 0 || isMarked(ic, p))
2438 return 0;
2439 Mark(ic, p);
2440 return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2441 }
2442
2443 /*
2444 * Do a depth first search on the flow graph, numbering the
2445 * the basic blocks, and entering them into the 'blocks' array.`
2446 */
2447 static void
2448 number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2449 {
2450 u_int n;
2451
2452 if (p == 0 || isMarked(ic, p))
2453 return;
2454
2455 Mark(ic, p);
2456 n = opt_state->n_blocks++;
2457 if (opt_state->n_blocks == 0) {
2458 /*
2459 * Overflow.
2460 */
2461 opt_error(opt_state, "filter is too complex to optimize");
2462 }
2463 p->id = n;
2464 opt_state->blocks[n] = p;
2465
2466 number_blks_r(opt_state, ic, JT(p));
2467 number_blks_r(opt_state, ic, JF(p));
2468 }
2469
2470 /*
2471 * Return the number of stmts in the flowgraph reachable by 'p'.
2472 * The nodes should be unmarked before calling.
2473 *
2474 * Note that "stmts" means "instructions", and that this includes
2475 *
2476 * side-effect statements in 'p' (slength(p->stmts));
2477 *
2478 * statements in the true branch from 'p' (count_stmts(JT(p)));
2479 *
2480 * statements in the false branch from 'p' (count_stmts(JF(p)));
2481 *
2482 * the conditional jump itself (1);
2483 *
2484 * an extra long jump if the true branch requires it (p->longjt);
2485 *
2486 * an extra long jump if the false branch requires it (p->longjf).
2487 */
2488 static u_int
2489 count_stmts(struct icode *ic, struct block *p)
2490 {
2491 u_int n;
2492
2493 if (p == 0 || isMarked(ic, p))
2494 return 0;
2495 Mark(ic, p);
2496 n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2497 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2498 }
2499
2500 /*
2501 * Allocate memory. All allocation is done before optimization
2502 * is begun. A linear bound on the size of all data structures is computed
2503 * from the total number of blocks and/or statements.
2504 */
2505 static void
2506 opt_init(opt_state_t *opt_state, struct icode *ic)
2507 {
2508 bpf_u_int32 *p;
2509 int i, n, max_stmts;
2510 u_int product;
2511 size_t block_memsize, edge_memsize;
2512
2513 /*
2514 * First, count the blocks, so we can malloc an array to map
2515 * block number to block. Then, put the blocks into the array.
2516 */
2517 unMarkAll(ic);
2518 n = count_blocks(ic, ic->root);
2519 opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2520 if (opt_state->blocks == NULL)
2521 opt_error(opt_state, "malloc");
2522 unMarkAll(ic);
2523 opt_state->n_blocks = 0;
2524 number_blks_r(opt_state, ic, ic->root);
2525
2526 /*
2527 * This "should not happen".
2528 */
2529 if (opt_state->n_blocks == 0)
2530 opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2531
2532 opt_state->n_edges = 2 * opt_state->n_blocks;
2533 if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2534 /*
2535 * Overflow.
2536 */
2537 opt_error(opt_state, "filter is too complex to optimize");
2538 }
2539 opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2540 if (opt_state->edges == NULL) {
2541 opt_error(opt_state, "malloc");
2542 }
2543
2544 /*
2545 * The number of levels is bounded by the number of nodes.
2546 */
2547 opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2548 if (opt_state->levels == NULL) {
2549 opt_error(opt_state, "malloc");
2550 }
2551
2552 opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2553 opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2554
2555 /*
2556 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2557 * in a u_int; we use it as a u_int number-of-iterations
2558 * value.
2559 */
2560 product = opt_state->n_blocks * opt_state->nodewords;
2561 if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2562 /*
2563 * XXX - just punt and don't try to optimize?
2564 * In practice, this is unlikely to happen with
2565 * a normal filter.
2566 */
2567 opt_error(opt_state, "filter is too complex to optimize");
2568 }
2569
2570 /*
2571 * Make sure the total memory required for that doesn't
2572 * overflow.
2573 */
2574 block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2575 if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2576 opt_error(opt_state, "filter is too complex to optimize");
2577 }
2578
2579 /*
2580 * Make sure opt_state->n_edges * opt_state->edgewords fits
2581 * in a u_int; we use it as a u_int number-of-iterations
2582 * value.
2583 */
2584 product = opt_state->n_edges * opt_state->edgewords;
2585 if ((product / opt_state->n_edges) != opt_state->edgewords) {
2586 opt_error(opt_state, "filter is too complex to optimize");
2587 }
2588
2589 /*
2590 * Make sure the total memory required for that doesn't
2591 * overflow.
2592 */
2593 edge_memsize = (size_t)product * sizeof(*opt_state->space);
2594 if (edge_memsize / product != sizeof(*opt_state->space)) {
2595 opt_error(opt_state, "filter is too complex to optimize");
2596 }
2597
2598 /*
2599 * Make sure the total memory required for both of them doesn't
2600 * overflow.
2601 */
2602 if (block_memsize > SIZE_MAX - edge_memsize) {
2603 opt_error(opt_state, "filter is too complex to optimize");
2604 }
2605
2606 /* XXX */
2607 opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2608 if (opt_state->space == NULL) {
2609 opt_error(opt_state, "malloc");
2610 }
2611 p = opt_state->space;
2612 opt_state->all_dom_sets = p;
2613 for (i = 0; i < n; ++i) {
2614 opt_state->blocks[i]->dom = p;
2615 p += opt_state->nodewords;
2616 }
2617 opt_state->all_closure_sets = p;
2618 for (i = 0; i < n; ++i) {
2619 opt_state->blocks[i]->closure = p;
2620 p += opt_state->nodewords;
2621 }
2622 opt_state->all_edge_sets = p;
2623 for (i = 0; i < n; ++i) {
2624 register struct block *b = opt_state->blocks[i];
2625
2626 b->et.edom = p;
2627 p += opt_state->edgewords;
2628 b->ef.edom = p;
2629 p += opt_state->edgewords;
2630 b->et.id = i;
2631 opt_state->edges[i] = &b->et;
2632 b->ef.id = opt_state->n_blocks + i;
2633 opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2634 b->et.pred = b;
2635 b->ef.pred = b;
2636 }
2637 max_stmts = 0;
2638 for (i = 0; i < n; ++i)
2639 max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2640 /*
2641 * We allocate at most 3 value numbers per statement,
2642 * so this is an upper bound on the number of valnodes
2643 * we'll need.
2644 */
2645 opt_state->maxval = 3 * max_stmts;
2646 opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2647 if (opt_state->vmap == NULL) {
2648 opt_error(opt_state, "malloc");
2649 }
2650 opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2651 if (opt_state->vnode_base == NULL) {
2652 opt_error(opt_state, "malloc");
2653 }
2654 }
2655
2656 /*
2657 * This is only used when supporting optimizer debugging. It is
2658 * global state, so do *not* do more than one compile in parallel
2659 * and expect it to provide meaningful information.
2660 */
2661 #ifdef BDEBUG
2662 int bids[NBIDS];
2663 #endif
2664
2665 static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2666 PCAP_PRINTFLIKE(2, 3);
2667
2668 /*
2669 * Returns true if successful. Returns false if a branch has
2670 * an offset that is too large. If so, we have marked that
2671 * branch so that on a subsequent iteration, it will be treated
2672 * properly.
2673 */
2674 static int
2675 convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2676 {
2677 struct bpf_insn *dst;
2678 struct slist *src;
2679 u_int slen;
2680 u_int off;
2681 struct slist **offset = NULL;
2682
2683 if (p == 0 || isMarked(ic, p))
2684 return (1);
2685 Mark(ic, p);
2686
2687 if (convert_code_r(conv_state, ic, JF(p)) == 0)
2688 return (0);
2689 if (convert_code_r(conv_state, ic, JT(p)) == 0)
2690 return (0);
2691
2692 slen = slength(p->stmts);
2693 dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2694 /* inflate length by any extra jumps */
2695
2696 p->offset = (int)(dst - conv_state->fstart);
2697
2698 /* generate offset[] for convenience */
2699 if (slen) {
2700 offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2701 if (!offset) {
2702 conv_error(conv_state, "not enough core");
2703 /*NOTREACHED*/
2704 }
2705 }
2706 src = p->stmts;
2707 for (off = 0; off < slen && src; off++) {
2708 #if 0
2709 printf("off=%d src=%x\n", off, src);
2710 #endif
2711 offset[off] = src;
2712 src = src->next;
2713 }
2714
2715 off = 0;
2716 for (src = p->stmts; src; src = src->next) {
2717 if (src->s.code == NOP)
2718 continue;
2719 dst->code = (u_short)src->s.code;
2720 dst->k = src->s.k;
2721
2722 /* fill block-local relative jump */
2723 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2724 #if 0
2725 if (src->s.jt || src->s.jf) {
2726 free(offset);
2727 conv_error(conv_state, "illegal jmp destination");
2728 /*NOTREACHED*/
2729 }
2730 #endif
2731 goto filled;
2732 }
2733 if (off == slen - 2) /*???*/
2734 goto filled;
2735
2736 {
2737 u_int i;
2738 int jt, jf;
2739 const char ljerr[] = "%s for block-local relative jump: off=%d";
2740
2741 #if 0
2742 printf("code=%x off=%d %x %x\n", src->s.code,
2743 off, src->s.jt, src->s.jf);
2744 #endif
2745
2746 if (!src->s.jt || !src->s.jf) {
2747 free(offset);
2748 conv_error(conv_state, ljerr, "no jmp destination", off);
2749 /*NOTREACHED*/
2750 }
2751
2752 jt = jf = 0;
2753 for (i = 0; i < slen; i++) {
2754 if (offset[i] == src->s.jt) {
2755 if (jt) {
2756 free(offset);
2757 conv_error(conv_state, ljerr, "multiple matches", off);
2758 /*NOTREACHED*/
2759 }
2760
2761 if (i - off - 1 >= 256) {
2762 free(offset);
2763 conv_error(conv_state, ljerr, "out-of-range jump", off);
2764 /*NOTREACHED*/
2765 }
2766 dst->jt = (u_char)(i - off - 1);
2767 jt++;
2768 }
2769 if (offset[i] == src->s.jf) {
2770 if (jf) {
2771 free(offset);
2772 conv_error(conv_state, ljerr, "multiple matches", off);
2773 /*NOTREACHED*/
2774 }
2775 if (i - off - 1 >= 256) {
2776 free(offset);
2777 conv_error(conv_state, ljerr, "out-of-range jump", off);
2778 /*NOTREACHED*/
2779 }
2780 dst->jf = (u_char)(i - off - 1);
2781 jf++;
2782 }
2783 }
2784 if (!jt || !jf) {
2785 free(offset);
2786 conv_error(conv_state, ljerr, "no destination found", off);
2787 /*NOTREACHED*/
2788 }
2789 }
2790 filled:
2791 ++dst;
2792 ++off;
2793 }
2794 if (offset)
2795 free(offset);
2796
2797 #ifdef BDEBUG
2798 if (dst - conv_state->fstart < NBIDS)
2799 bids[dst - conv_state->fstart] = p->id + 1;
2800 #endif
2801 dst->code = (u_short)p->s.code;
2802 dst->k = p->s.k;
2803 if (JT(p)) {
2804 /* number of extra jumps inserted */
2805 u_char extrajmps = 0;
2806 off = JT(p)->offset - (p->offset + slen) - 1;
2807 if (off >= 256) {
2808 /* offset too large for branch, must add a jump */
2809 if (p->longjt == 0) {
2810 /* mark this instruction and retry */
2811 p->longjt++;
2812 return(0);
2813 }
2814 dst->jt = extrajmps;
2815 extrajmps++;
2816 dst[extrajmps].code = BPF_JMP|BPF_JA;
2817 dst[extrajmps].k = off - extrajmps;
2818 }
2819 else
2820 dst->jt = (u_char)off;
2821 off = JF(p)->offset - (p->offset + slen) - 1;
2822 if (off >= 256) {
2823 /* offset too large for branch, must add a jump */
2824 if (p->longjf == 0) {
2825 /* mark this instruction and retry */
2826 p->longjf++;
2827 return(0);
2828 }
2829 /* branch if F to following jump */
2830 /* if two jumps are inserted, F goes to second one */
2831 dst->jf = extrajmps;
2832 extrajmps++;
2833 dst[extrajmps].code = BPF_JMP|BPF_JA;
2834 dst[extrajmps].k = off - extrajmps;
2835 }
2836 else
2837 dst->jf = (u_char)off;
2838 }
2839 return (1);
2840 }
2841
2842
2843 /*
2844 * Convert flowgraph intermediate representation to the
2845 * BPF array representation. Set *lenp to the number of instructions.
2846 *
2847 * This routine does *NOT* leak the memory pointed to by fp. It *must
2848 * not* do free(fp) before returning fp; doing so would make no sense,
2849 * as the BPF array pointed to by the return value of icode_to_fcode()
2850 * must be valid - it's being returned for use in a bpf_program structure.
2851 *
2852 * If it appears that icode_to_fcode() is leaking, the problem is that
2853 * the program using pcap_compile() is failing to free the memory in
2854 * the BPF program when it's done - the leak is in the program, not in
2855 * the routine that happens to be allocating the memory. (By analogy, if
2856 * a program calls fopen() without ever calling fclose() on the FILE *,
2857 * it will leak the FILE structure; the leak is not in fopen(), it's in
2858 * the program.) Change the program to use pcap_freecode() when it's
2859 * done with the filter program. See the pcap man page.
2860 */
2861 struct bpf_insn *
2862 icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2863 char *errbuf)
2864 {
2865 u_int n;
2866 struct bpf_insn *fp;
2867 conv_state_t conv_state;
2868
2869 conv_state.fstart = NULL;
2870 conv_state.errbuf = errbuf;
2871 if (setjmp(conv_state.top_ctx) != 0) {
2872 free(conv_state.fstart);
2873 return NULL;
2874 }
2875
2876 /*
2877 * Loop doing convert_code_r() until no branches remain
2878 * with too-large offsets.
2879 */
2880 for (;;) {
2881 unMarkAll(ic);
2882 n = *lenp = count_stmts(ic, root);
2883
2884 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2885 if (fp == NULL) {
2886 (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2887 "malloc");
2888 return NULL;
2889 }
2890 memset((char *)fp, 0, sizeof(*fp) * n);
2891 conv_state.fstart = fp;
2892 conv_state.ftail = fp + n;
2893
2894 unMarkAll(ic);
2895 if (convert_code_r(&conv_state, ic, root))
2896 break;
2897 free(fp);
2898 }
2899
2900 return fp;
2901 }
2902
2903 /*
2904 * For iconv_to_fconv() errors.
2905 */
2906 static void PCAP_NORETURN
2907 conv_error(conv_state_t *conv_state, const char *fmt, ...)
2908 {
2909 va_list ap;
2910
2911 va_start(ap, fmt);
2912 (void)vsnprintf(conv_state->errbuf,
2913 PCAP_ERRBUF_SIZE, fmt, ap);
2914 va_end(ap);
2915 longjmp(conv_state->top_ctx, 1);
2916 /* NOTREACHED */
2917 #ifdef _AIX
2918 PCAP_UNREACHABLE
2919 #endif /* _AIX */
2920 }
2921
2922 /*
2923 * Make a copy of a BPF program and put it in the "fcode" member of
2924 * a "pcap_t".
2925 *
2926 * If we fail to allocate memory for the copy, fill in the "errbuf"
2927 * member of the "pcap_t" with an error message, and return -1;
2928 * otherwise, return 0.
2929 */
2930 int
2931 pcap_install_bpf_program(pcap_t *p, struct bpf_program *fp)
2932 {
2933 size_t prog_size;
2934
2935 /*
2936 * Validate the program.
2937 */
2938 if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2939 snprintf(p->errbuf, sizeof(p->errbuf),
2940 "BPF program is not valid");
2941 return (-1);
2942 }
2943
2944 /*
2945 * Free up any already installed program.
2946 */
2947 pcap_freecode(&p->fcode);
2948
2949 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2950 p->fcode.bf_len = fp->bf_len;
2951 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2952 if (p->fcode.bf_insns == NULL) {
2953 pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2954 errno, "malloc");
2955 return (-1);
2956 }
2957 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2958 return (0);
2959 }
2960
2961 #ifdef BDEBUG
2962 static void
2963 dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2964 FILE *out)
2965 {
2966 int icount, noffset;
2967 int i;
2968
2969 if (block == NULL || isMarked(ic, block))
2970 return;
2971 Mark(ic, block);
2972
2973 icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2974 noffset = min(block->offset + icount, (int)prog->bf_len);
2975
2976 fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2977 for (i = block->offset; i < noffset; i++) {
2978 fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2979 }
2980 fprintf(out, "\" tooltip=\"");
2981 for (i = 0; i < BPF_MEMWORDS; i++)
2982 if (block->val[i] != VAL_UNKNOWN)
2983 fprintf(out, "val[%d]=%d ", i, block->val[i]);
2984 fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2985 fprintf(out, "val[X]=%d", block->val[X_ATOM]);
2986 fprintf(out, "\"");
2987 if (JT(block) == NULL)
2988 fprintf(out, ", peripheries=2");
2989 fprintf(out, "];\n");
2990
2991 dot_dump_node(ic, JT(block), prog, out);
2992 dot_dump_node(ic, JF(block), prog, out);
2993 }
2994
2995 static void
2996 dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
2997 {
2998 if (block == NULL || isMarked(ic, block))
2999 return;
3000 Mark(ic, block);
3001
3002 if (JT(block)) {
3003 fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3004 block->id, JT(block)->id);
3005 fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3006 block->id, JF(block)->id);
3007 }
3008 dot_dump_edge(ic, JT(block), out);
3009 dot_dump_edge(ic, JF(block), out);
3010 }
3011
3012 /* Output the block CFG using graphviz/DOT language
3013 * In the CFG, block's code, value index for each registers at EXIT,
3014 * and the jump relationship is show.
3015 *
3016 * example DOT for BPF `ip src host 1.1.1.1' is:
3017 digraph BPF {
3018 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3019 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3020 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3021 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3022 "block0":se -> "block1":n [label="T"];
3023 "block0":sw -> "block3":n [label="F"];
3024 "block1":se -> "block2":n [label="T"];
3025 "block1":sw -> "block3":n [label="F"];
3026 }
3027 *
3028 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3029 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3030 */
3031 static int
3032 dot_dump(struct icode *ic, char *errbuf)
3033 {
3034 struct bpf_program f;
3035 FILE *out = stdout;
3036
3037 memset(bids, 0, sizeof bids);
3038 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3039 if (f.bf_insns == NULL)
3040 return -1;
3041
3042 fprintf(out, "digraph BPF {\n");
3043 unMarkAll(ic);
3044 dot_dump_node(ic, ic->root, &f, out);
3045 unMarkAll(ic);
3046 dot_dump_edge(ic, ic->root, out);
3047 fprintf(out, "}\n");
3048
3049 free((char *)f.bf_insns);
3050 return 0;
3051 }
3052
3053 static int
3054 plain_dump(struct icode *ic, char *errbuf)
3055 {
3056 struct bpf_program f;
3057
3058 memset(bids, 0, sizeof bids);
3059 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3060 if (f.bf_insns == NULL)
3061 return -1;
3062 bpf_dump(&f, 1);
3063 putchar('\n');
3064 free((char *)f.bf_insns);
3065 return 0;
3066 }
3067
3068 static void
3069 opt_dump(opt_state_t *opt_state, struct icode *ic)
3070 {
3071 int status;
3072 char errbuf[PCAP_ERRBUF_SIZE];
3073
3074 /*
3075 * If the CFG, in DOT format, is requested, output it rather than
3076 * the code that would be generated from that graph.
3077 */
3078 if (pcap_print_dot_graph)
3079 status = dot_dump(ic, errbuf);
3080 else
3081 status = plain_dump(ic, errbuf);
3082 if (status == -1)
3083 opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3084 }
3085 #endif