]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
optimizer: move more "optimizer loop detection" code.
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for BPF code intermediate representation.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <pcap-types.h>
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <memory.h>
33 #include <setjmp.h>
34 #include <string.h>
35 #include <limits.h> /* for SIZE_MAX */
36 #include <errno.h>
37
38 #include "pcap-int.h"
39
40 #include "gencode.h"
41 #include "optimize.h"
42 #include "diag-control.h"
43
44 #ifdef HAVE_OS_PROTO_H
45 #include "os-proto.h"
46 #endif
47
48 #ifdef BDEBUG
49 /*
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
53 * defined.
54 */
55 static int pcap_optimizer_debug;
56
57 /*
58 * Routine to set that flag.
59 *
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
65 */
66 PCAP_API void pcap_set_optimizer_debug(int value);
67
68 PCAP_API_DEF void
69 pcap_set_optimizer_debug(int value)
70 {
71 pcap_optimizer_debug = value;
72 }
73
74 /*
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
78 * defined.
79 */
80 static int pcap_print_dot_graph;
81
82 /*
83 * Routine to set that flag.
84 *
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
90 */
91 PCAP_API void pcap_set_print_dot_graph(int value);
92
93 PCAP_API_DEF void
94 pcap_set_print_dot_graph(int value)
95 {
96 pcap_print_dot_graph = value;
97 }
98
99 #endif
100
101 /*
102 * lowest_set_bit().
103 *
104 * Takes a 32-bit integer as an argument.
105 *
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
108 *
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
112 *
113 * This is the same as the count of trailing zeroes in the word.
114 */
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116 /*
117 * GCC 3.4 and later; we have __builtin_ctz().
118 */
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
121 /*
122 * Visual Studio; we support only 2005 and later, so use
123 * _BitScanForward().
124 */
125 #include <intrin.h>
126
127 #ifndef __clang__
128 #pragma intrinsic(_BitScanForward)
129 #endif
130
131 static __forceinline u_int
132 lowest_set_bit(int mask)
133 {
134 unsigned long bit;
135
136 /*
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139 */
140 if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141 abort(); /* mask is zero */
142 return (u_int)bit;
143 }
144 #else
145 /*
146 * None of the above.
147 * Use a perfect-hash-function-based function.
148 */
149 static u_int
150 lowest_set_bit(int mask)
151 {
152 unsigned int v = (unsigned int)mask;
153
154 static const u_int MultiplyDeBruijnBitPosition[32] = {
155 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
156 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
157 };
158
159 /*
160 * We strip off all but the lowermost set bit (v & ~v),
161 * and perform a minimal perfect hash on it to look up the
162 * number of low-order zero bits in a table.
163 *
164 * See:
165 *
166 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
167 *
168 * http://supertech.csail.mit.edu/papers/debruijn.pdf
169 */
170 return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
171 }
172 #endif
173
174 /*
175 * Represents a deleted instruction.
176 */
177 #define NOP -1
178
179 /*
180 * Register numbers for use-def values.
181 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
182 * location. A_ATOM is the accumulator and X_ATOM is the index
183 * register.
184 */
185 #define A_ATOM BPF_MEMWORDS
186 #define X_ATOM (BPF_MEMWORDS+1)
187
188 /*
189 * This define is used to represent *both* the accumulator and
190 * x register in use-def computations.
191 * Currently, the use-def code assumes only one definition per instruction.
192 */
193 #define AX_ATOM N_ATOMS
194
195 /*
196 * These data structures are used in a Cocke and Schwartz style
197 * value numbering scheme. Since the flowgraph is acyclic,
198 * exit values can be propagated from a node's predecessors
199 * provided it is uniquely defined.
200 */
201 struct valnode {
202 int code;
203 bpf_u_int32 v0, v1;
204 int val; /* the value number */
205 struct valnode *next;
206 };
207
208 /* Integer constants mapped with the load immediate opcode. */
209 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
210
211 struct vmapinfo {
212 int is_const;
213 bpf_u_int32 const_val;
214 };
215
216 typedef struct {
217 /*
218 * Place to longjmp to on an error.
219 */
220 jmp_buf top_ctx;
221
222 /*
223 * The buffer into which to put error message.
224 */
225 char *errbuf;
226
227 /*
228 * A flag to indicate that further optimization is needed.
229 * Iterative passes are continued until a given pass yields no
230 * code simplification or branch movement.
231 */
232 int done;
233
234 /*
235 * XXX - detect loops that do nothing but repeated AND/OR pullups
236 * and edge moves.
237 * If 100 passes in a row do nothing but that, treat that as a
238 * sign that we're in a loop that just shuffles in a cycle in
239 * which each pass just shuffles the code and we eventually
240 * get back to the original configuration.
241 *
242 * XXX - we need a non-heuristic way of detecting, or preventing,
243 * such a cycle.
244 */
245 int non_branch_movement_performed;
246
247 u_int n_blocks; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
248 struct block **blocks;
249 u_int n_edges; /* twice n_blocks, so guaranteed to be > 0 */
250 struct edge **edges;
251
252 /*
253 * A bit vector set representation of the dominators.
254 * We round up the set size to the next power of two.
255 */
256 u_int nodewords; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
257 u_int edgewords; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
258 struct block **levels;
259 bpf_u_int32 *space;
260
261 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
262 /*
263 * True if a is in uset {p}
264 */
265 #define SET_MEMBER(p, a) \
266 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
267
268 /*
269 * Add 'a' to uset p.
270 */
271 #define SET_INSERT(p, a) \
272 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
273
274 /*
275 * Delete 'a' from uset p.
276 */
277 #define SET_DELETE(p, a) \
278 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
279
280 /*
281 * a := a intersect b
282 * n must be guaranteed to be > 0
283 */
284 #define SET_INTERSECT(a, b, n)\
285 {\
286 register bpf_u_int32 *_x = a, *_y = b;\
287 register u_int _n = n;\
288 do *_x++ &= *_y++; while (--_n != 0);\
289 }
290
291 /*
292 * a := a - b
293 * n must be guaranteed to be > 0
294 */
295 #define SET_SUBTRACT(a, b, n)\
296 {\
297 register bpf_u_int32 *_x = a, *_y = b;\
298 register u_int _n = n;\
299 do *_x++ &=~ *_y++; while (--_n != 0);\
300 }
301
302 /*
303 * a := a union b
304 * n must be guaranteed to be > 0
305 */
306 #define SET_UNION(a, b, n)\
307 {\
308 register bpf_u_int32 *_x = a, *_y = b;\
309 register u_int _n = n;\
310 do *_x++ |= *_y++; while (--_n != 0);\
311 }
312
313 uset all_dom_sets;
314 uset all_closure_sets;
315 uset all_edge_sets;
316
317 #define MODULUS 213
318 struct valnode *hashtbl[MODULUS];
319 bpf_u_int32 curval;
320 bpf_u_int32 maxval;
321
322 struct vmapinfo *vmap;
323 struct valnode *vnode_base;
324 struct valnode *next_vnode;
325 } opt_state_t;
326
327 typedef struct {
328 /*
329 * Place to longjmp to on an error.
330 */
331 jmp_buf top_ctx;
332
333 /*
334 * The buffer into which to put error message.
335 */
336 char *errbuf;
337
338 /*
339 * Some pointers used to convert the basic block form of the code,
340 * into the array form that BPF requires. 'fstart' will point to
341 * the malloc'd array while 'ftail' is used during the recursive
342 * traversal.
343 */
344 struct bpf_insn *fstart;
345 struct bpf_insn *ftail;
346 } conv_state_t;
347
348 static void opt_init(opt_state_t *, struct icode *);
349 static void opt_cleanup(opt_state_t *);
350 static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
351 PCAP_PRINTFLIKE(2, 3);
352
353 static void intern_blocks(opt_state_t *, struct icode *);
354
355 static void find_inedges(opt_state_t *, struct block *);
356 #ifdef BDEBUG
357 static void opt_dump(opt_state_t *, struct icode *);
358 #endif
359
360 #ifndef MAX
361 #define MAX(a,b) ((a)>(b)?(a):(b))
362 #endif
363
364 static void
365 find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
366 {
367 int level;
368
369 if (isMarked(ic, b))
370 return;
371
372 Mark(ic, b);
373 b->link = 0;
374
375 if (JT(b)) {
376 find_levels_r(opt_state, ic, JT(b));
377 find_levels_r(opt_state, ic, JF(b));
378 level = MAX(JT(b)->level, JF(b)->level) + 1;
379 } else
380 level = 0;
381 b->level = level;
382 b->link = opt_state->levels[level];
383 opt_state->levels[level] = b;
384 }
385
386 /*
387 * Level graph. The levels go from 0 at the leaves to
388 * N_LEVELS at the root. The opt_state->levels[] array points to the
389 * first node of the level list, whose elements are linked
390 * with the 'link' field of the struct block.
391 */
392 static void
393 find_levels(opt_state_t *opt_state, struct icode *ic)
394 {
395 memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
396 unMarkAll(ic);
397 find_levels_r(opt_state, ic, ic->root);
398 }
399
400 /*
401 * Find dominator relationships.
402 * Assumes graph has been leveled.
403 */
404 static void
405 find_dom(opt_state_t *opt_state, struct block *root)
406 {
407 u_int i;
408 int level;
409 struct block *b;
410 bpf_u_int32 *x;
411
412 /*
413 * Initialize sets to contain all nodes.
414 */
415 x = opt_state->all_dom_sets;
416 /*
417 * In opt_init(), we've made sure the product doesn't overflow.
418 */
419 i = opt_state->n_blocks * opt_state->nodewords;
420 while (i != 0) {
421 --i;
422 *x++ = 0xFFFFFFFFU;
423 }
424 /* Root starts off empty. */
425 for (i = opt_state->nodewords; i != 0;) {
426 --i;
427 root->dom[i] = 0;
428 }
429
430 /* root->level is the highest level no found. */
431 for (level = root->level; level >= 0; --level) {
432 for (b = opt_state->levels[level]; b; b = b->link) {
433 SET_INSERT(b->dom, b->id);
434 if (JT(b) == 0)
435 continue;
436 SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
437 SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
438 }
439 }
440 }
441
442 static void
443 propedom(opt_state_t *opt_state, struct edge *ep)
444 {
445 SET_INSERT(ep->edom, ep->id);
446 if (ep->succ) {
447 SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
448 SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
449 }
450 }
451
452 /*
453 * Compute edge dominators.
454 * Assumes graph has been leveled and predecessors established.
455 */
456 static void
457 find_edom(opt_state_t *opt_state, struct block *root)
458 {
459 u_int i;
460 uset x;
461 int level;
462 struct block *b;
463
464 x = opt_state->all_edge_sets;
465 /*
466 * In opt_init(), we've made sure the product doesn't overflow.
467 */
468 for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
469 --i;
470 x[i] = 0xFFFFFFFFU;
471 }
472
473 /* root->level is the highest level no found. */
474 memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
475 memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
476 for (level = root->level; level >= 0; --level) {
477 for (b = opt_state->levels[level]; b != 0; b = b->link) {
478 propedom(opt_state, &b->et);
479 propedom(opt_state, &b->ef);
480 }
481 }
482 }
483
484 /*
485 * Find the backwards transitive closure of the flow graph. These sets
486 * are backwards in the sense that we find the set of nodes that reach
487 * a given node, not the set of nodes that can be reached by a node.
488 *
489 * Assumes graph has been leveled.
490 */
491 static void
492 find_closure(opt_state_t *opt_state, struct block *root)
493 {
494 int level;
495 struct block *b;
496
497 /*
498 * Initialize sets to contain no nodes.
499 */
500 memset((char *)opt_state->all_closure_sets, 0,
501 opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
502
503 /* root->level is the highest level no found. */
504 for (level = root->level; level >= 0; --level) {
505 for (b = opt_state->levels[level]; b; b = b->link) {
506 SET_INSERT(b->closure, b->id);
507 if (JT(b) == 0)
508 continue;
509 SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
510 SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
511 }
512 }
513 }
514
515 /*
516 * Return the register number that is used by s.
517 *
518 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
519 * are used, the scratch memory location's number if a scratch memory
520 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
521 *
522 * The implementation should probably change to an array access.
523 */
524 static int
525 atomuse(struct stmt *s)
526 {
527 register int c = s->code;
528
529 if (c == NOP)
530 return -1;
531
532 switch (BPF_CLASS(c)) {
533
534 case BPF_RET:
535 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
536 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
537
538 case BPF_LD:
539 case BPF_LDX:
540 /*
541 * As there are fewer than 2^31 memory locations,
542 * s->k should be convertible to int without problems.
543 */
544 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
545 (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
546
547 case BPF_ST:
548 return A_ATOM;
549
550 case BPF_STX:
551 return X_ATOM;
552
553 case BPF_JMP:
554 case BPF_ALU:
555 if (BPF_SRC(c) == BPF_X)
556 return AX_ATOM;
557 return A_ATOM;
558
559 case BPF_MISC:
560 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
561 }
562 abort();
563 /* NOTREACHED */
564 }
565
566 /*
567 * Return the register number that is defined by 's'. We assume that
568 * a single stmt cannot define more than one register. If no register
569 * is defined, return -1.
570 *
571 * The implementation should probably change to an array access.
572 */
573 static int
574 atomdef(struct stmt *s)
575 {
576 if (s->code == NOP)
577 return -1;
578
579 switch (BPF_CLASS(s->code)) {
580
581 case BPF_LD:
582 case BPF_ALU:
583 return A_ATOM;
584
585 case BPF_LDX:
586 return X_ATOM;
587
588 case BPF_ST:
589 case BPF_STX:
590 return s->k;
591
592 case BPF_MISC:
593 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
594 }
595 return -1;
596 }
597
598 /*
599 * Compute the sets of registers used, defined, and killed by 'b'.
600 *
601 * "Used" means that a statement in 'b' uses the register before any
602 * statement in 'b' defines it, i.e. it uses the value left in
603 * that register by a predecessor block of this block.
604 * "Defined" means that a statement in 'b' defines it.
605 * "Killed" means that a statement in 'b' defines it before any
606 * statement in 'b' uses it, i.e. it kills the value left in that
607 * register by a predecessor block of this block.
608 */
609 static void
610 compute_local_ud(struct block *b)
611 {
612 struct slist *s;
613 atomset def = 0, use = 0, killed = 0;
614 int atom;
615
616 for (s = b->stmts; s; s = s->next) {
617 if (s->s.code == NOP)
618 continue;
619 atom = atomuse(&s->s);
620 if (atom >= 0) {
621 if (atom == AX_ATOM) {
622 if (!ATOMELEM(def, X_ATOM))
623 use |= ATOMMASK(X_ATOM);
624 if (!ATOMELEM(def, A_ATOM))
625 use |= ATOMMASK(A_ATOM);
626 }
627 else if (atom < N_ATOMS) {
628 if (!ATOMELEM(def, atom))
629 use |= ATOMMASK(atom);
630 }
631 else
632 abort();
633 }
634 atom = atomdef(&s->s);
635 if (atom >= 0) {
636 if (!ATOMELEM(use, atom))
637 killed |= ATOMMASK(atom);
638 def |= ATOMMASK(atom);
639 }
640 }
641 if (BPF_CLASS(b->s.code) == BPF_JMP) {
642 /*
643 * XXX - what about RET?
644 */
645 atom = atomuse(&b->s);
646 if (atom >= 0) {
647 if (atom == AX_ATOM) {
648 if (!ATOMELEM(def, X_ATOM))
649 use |= ATOMMASK(X_ATOM);
650 if (!ATOMELEM(def, A_ATOM))
651 use |= ATOMMASK(A_ATOM);
652 }
653 else if (atom < N_ATOMS) {
654 if (!ATOMELEM(def, atom))
655 use |= ATOMMASK(atom);
656 }
657 else
658 abort();
659 }
660 }
661
662 b->def = def;
663 b->kill = killed;
664 b->in_use = use;
665 }
666
667 /*
668 * Assume graph is already leveled.
669 */
670 static void
671 find_ud(opt_state_t *opt_state, struct block *root)
672 {
673 int i, maxlevel;
674 struct block *p;
675
676 /*
677 * root->level is the highest level no found;
678 * count down from there.
679 */
680 maxlevel = root->level;
681 for (i = maxlevel; i >= 0; --i)
682 for (p = opt_state->levels[i]; p; p = p->link) {
683 compute_local_ud(p);
684 p->out_use = 0;
685 }
686
687 for (i = 1; i <= maxlevel; ++i) {
688 for (p = opt_state->levels[i]; p; p = p->link) {
689 p->out_use |= JT(p)->in_use | JF(p)->in_use;
690 p->in_use |= p->out_use &~ p->kill;
691 }
692 }
693 }
694 static void
695 init_val(opt_state_t *opt_state)
696 {
697 opt_state->curval = 0;
698 opt_state->next_vnode = opt_state->vnode_base;
699 memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
700 memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
701 }
702
703 /*
704 * Because we really don't have an IR, this stuff is a little messy.
705 *
706 * This routine looks in the table of existing value number for a value
707 * with generated from an operation with the specified opcode and
708 * the specified values. If it finds it, it returns its value number,
709 * otherwise it makes a new entry in the table and returns the
710 * value number of that entry.
711 */
712 static bpf_u_int32
713 F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
714 {
715 u_int hash;
716 bpf_u_int32 val;
717 struct valnode *p;
718
719 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
720 hash %= MODULUS;
721
722 for (p = opt_state->hashtbl[hash]; p; p = p->next)
723 if (p->code == code && p->v0 == v0 && p->v1 == v1)
724 return p->val;
725
726 /*
727 * Not found. Allocate a new value, and assign it a new
728 * value number.
729 *
730 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
731 * increment it before using it as the new value number, which
732 * means we never assign VAL_UNKNOWN.
733 *
734 * XXX - unless we overflow, but we probably won't have 2^32-1
735 * values; we treat 32 bits as effectively infinite.
736 */
737 val = ++opt_state->curval;
738 if (BPF_MODE(code) == BPF_IMM &&
739 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
740 opt_state->vmap[val].const_val = v0;
741 opt_state->vmap[val].is_const = 1;
742 }
743 p = opt_state->next_vnode++;
744 p->val = val;
745 p->code = code;
746 p->v0 = v0;
747 p->v1 = v1;
748 p->next = opt_state->hashtbl[hash];
749 opt_state->hashtbl[hash] = p;
750
751 return val;
752 }
753
754 static inline void
755 vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
756 {
757 if (alter && newval != VAL_UNKNOWN && *valp == newval)
758 s->code = NOP;
759 else
760 *valp = newval;
761 }
762
763 /*
764 * Do constant-folding on binary operators.
765 * (Unary operators are handled elsewhere.)
766 */
767 static void
768 fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
769 {
770 bpf_u_int32 a, b;
771
772 a = opt_state->vmap[v0].const_val;
773 b = opt_state->vmap[v1].const_val;
774
775 switch (BPF_OP(s->code)) {
776 case BPF_ADD:
777 a += b;
778 break;
779
780 case BPF_SUB:
781 a -= b;
782 break;
783
784 case BPF_MUL:
785 a *= b;
786 break;
787
788 case BPF_DIV:
789 if (b == 0)
790 opt_error(opt_state, "division by zero");
791 a /= b;
792 break;
793
794 case BPF_MOD:
795 if (b == 0)
796 opt_error(opt_state, "modulus by zero");
797 a %= b;
798 break;
799
800 case BPF_AND:
801 a &= b;
802 break;
803
804 case BPF_OR:
805 a |= b;
806 break;
807
808 case BPF_XOR:
809 a ^= b;
810 break;
811
812 case BPF_LSH:
813 /*
814 * A left shift of more than the width of the type
815 * is undefined in C; we'll just treat it as shifting
816 * all the bits out.
817 *
818 * XXX - the BPF interpreter doesn't check for this,
819 * so its behavior is dependent on the behavior of
820 * the processor on which it's running. There are
821 * processors on which it shifts all the bits out
822 * and processors on which it does no shift.
823 */
824 if (b < 32)
825 a <<= b;
826 else
827 a = 0;
828 break;
829
830 case BPF_RSH:
831 /*
832 * A right shift of more than the width of the type
833 * is undefined in C; we'll just treat it as shifting
834 * all the bits out.
835 *
836 * XXX - the BPF interpreter doesn't check for this,
837 * so its behavior is dependent on the behavior of
838 * the processor on which it's running. There are
839 * processors on which it shifts all the bits out
840 * and processors on which it does no shift.
841 */
842 if (b < 32)
843 a >>= b;
844 else
845 a = 0;
846 break;
847
848 default:
849 abort();
850 }
851 s->k = a;
852 s->code = BPF_LD|BPF_IMM;
853 opt_state->done = 0;
854 /*
855 * XXX - optimizer loop detection.
856 */
857 opt_state->non_branch_movement_performed = 1;
858 }
859
860 static inline struct slist *
861 this_op(struct slist *s)
862 {
863 while (s != 0 && s->s.code == NOP)
864 s = s->next;
865 return s;
866 }
867
868 static void
869 opt_not(struct block *b)
870 {
871 struct block *tmp = JT(b);
872
873 JT(b) = JF(b);
874 JF(b) = tmp;
875 }
876
877 static void
878 opt_peep(opt_state_t *opt_state, struct block *b)
879 {
880 struct slist *s;
881 struct slist *next, *last;
882 bpf_u_int32 val;
883
884 s = b->stmts;
885 if (s == 0)
886 return;
887
888 last = s;
889 for (/*empty*/; /*empty*/; s = next) {
890 /*
891 * Skip over nops.
892 */
893 s = this_op(s);
894 if (s == 0)
895 break; /* nothing left in the block */
896
897 /*
898 * Find the next real instruction after that one
899 * (skipping nops).
900 */
901 next = this_op(s->next);
902 if (next == 0)
903 break; /* no next instruction */
904 last = next;
905
906 /*
907 * st M[k] --> st M[k]
908 * ldx M[k] tax
909 */
910 if (s->s.code == BPF_ST &&
911 next->s.code == (BPF_LDX|BPF_MEM) &&
912 s->s.k == next->s.k) {
913 opt_state->done = 0;
914 next->s.code = BPF_MISC|BPF_TAX;
915 /*
916 * XXX - optimizer loop detection.
917 */
918 opt_state->non_branch_movement_performed = 1;
919 }
920 /*
921 * ld #k --> ldx #k
922 * tax txa
923 */
924 if (s->s.code == (BPF_LD|BPF_IMM) &&
925 next->s.code == (BPF_MISC|BPF_TAX)) {
926 s->s.code = BPF_LDX|BPF_IMM;
927 next->s.code = BPF_MISC|BPF_TXA;
928 opt_state->done = 0;
929 /*
930 * XXX - optimizer loop detection.
931 */
932 opt_state->non_branch_movement_performed = 1;
933 }
934 /*
935 * This is an ugly special case, but it happens
936 * when you say tcp[k] or udp[k] where k is a constant.
937 */
938 if (s->s.code == (BPF_LD|BPF_IMM)) {
939 struct slist *add, *tax, *ild;
940
941 /*
942 * Check that X isn't used on exit from this
943 * block (which the optimizer might cause).
944 * We know the code generator won't generate
945 * any local dependencies.
946 */
947 if (ATOMELEM(b->out_use, X_ATOM))
948 continue;
949
950 /*
951 * Check that the instruction following the ldi
952 * is an addx, or it's an ldxms with an addx
953 * following it (with 0 or more nops between the
954 * ldxms and addx).
955 */
956 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
957 add = next;
958 else
959 add = this_op(next->next);
960 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
961 continue;
962
963 /*
964 * Check that a tax follows that (with 0 or more
965 * nops between them).
966 */
967 tax = this_op(add->next);
968 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
969 continue;
970
971 /*
972 * Check that an ild follows that (with 0 or more
973 * nops between them).
974 */
975 ild = this_op(tax->next);
976 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
977 BPF_MODE(ild->s.code) != BPF_IND)
978 continue;
979 /*
980 * We want to turn this sequence:
981 *
982 * (004) ldi #0x2 {s}
983 * (005) ldxms [14] {next} -- optional
984 * (006) addx {add}
985 * (007) tax {tax}
986 * (008) ild [x+0] {ild}
987 *
988 * into this sequence:
989 *
990 * (004) nop
991 * (005) ldxms [14]
992 * (006) nop
993 * (007) nop
994 * (008) ild [x+2]
995 *
996 * XXX We need to check that X is not
997 * subsequently used, because we want to change
998 * what'll be in it after this sequence.
999 *
1000 * We know we can eliminate the accumulator
1001 * modifications earlier in the sequence since
1002 * it is defined by the last stmt of this sequence
1003 * (i.e., the last statement of the sequence loads
1004 * a value into the accumulator, so we can eliminate
1005 * earlier operations on the accumulator).
1006 */
1007 ild->s.k += s->s.k;
1008 s->s.code = NOP;
1009 add->s.code = NOP;
1010 tax->s.code = NOP;
1011 opt_state->done = 0;
1012 /*
1013 * XXX - optimizer loop detection.
1014 */
1015 opt_state->non_branch_movement_performed = 1;
1016 }
1017 }
1018 /*
1019 * If the comparison at the end of a block is an equality
1020 * comparison against a constant, and nobody uses the value
1021 * we leave in the A register at the end of a block, and
1022 * the operation preceding the comparison is an arithmetic
1023 * operation, we can sometime optimize it away.
1024 */
1025 if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1026 !ATOMELEM(b->out_use, A_ATOM)) {
1027 /*
1028 * We can optimize away certain subtractions of the
1029 * X register.
1030 */
1031 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1032 val = b->val[X_ATOM];
1033 if (opt_state->vmap[val].is_const) {
1034 /*
1035 * If we have a subtract to do a comparison,
1036 * and the X register is a known constant,
1037 * we can merge this value into the
1038 * comparison:
1039 *
1040 * sub x -> nop
1041 * jeq #y jeq #(x+y)
1042 */
1043 b->s.k += opt_state->vmap[val].const_val;
1044 last->s.code = NOP;
1045 opt_state->done = 0;
1046 /*
1047 * XXX - optimizer loop detection.
1048 */
1049 opt_state->non_branch_movement_performed = 1;
1050 } else if (b->s.k == 0) {
1051 /*
1052 * If the X register isn't a constant,
1053 * and the comparison in the test is
1054 * against 0, we can compare with the
1055 * X register, instead:
1056 *
1057 * sub x -> nop
1058 * jeq #0 jeq x
1059 */
1060 last->s.code = NOP;
1061 b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1062 opt_state->done = 0;
1063 /*
1064 * XXX - optimizer loop detection.
1065 */
1066 opt_state->non_branch_movement_performed = 1;
1067 }
1068 }
1069 /*
1070 * Likewise, a constant subtract can be simplified:
1071 *
1072 * sub #x -> nop
1073 * jeq #y -> jeq #(x+y)
1074 */
1075 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1076 last->s.code = NOP;
1077 b->s.k += last->s.k;
1078 opt_state->done = 0;
1079 /*
1080 * XXX - optimizer loop detection.
1081 */
1082 opt_state->non_branch_movement_performed = 1;
1083 }
1084 /*
1085 * And, similarly, a constant AND can be simplified
1086 * if we're testing against 0, i.e.:
1087 *
1088 * and #k nop
1089 * jeq #0 -> jset #k
1090 */
1091 else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1092 b->s.k == 0) {
1093 b->s.k = last->s.k;
1094 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1095 last->s.code = NOP;
1096 opt_state->done = 0;
1097 opt_not(b);
1098 /*
1099 * XXX - optimizer loop detection.
1100 */
1101 opt_state->non_branch_movement_performed = 1;
1102 }
1103 }
1104 /*
1105 * jset #0 -> never
1106 * jset #ffffffff -> always
1107 */
1108 if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1109 if (b->s.k == 0)
1110 JT(b) = JF(b);
1111 if (b->s.k == 0xffffffffU)
1112 JF(b) = JT(b);
1113 }
1114 /*
1115 * If we're comparing against the index register, and the index
1116 * register is a known constant, we can just compare against that
1117 * constant.
1118 */
1119 val = b->val[X_ATOM];
1120 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1121 bpf_u_int32 v = opt_state->vmap[val].const_val;
1122 b->s.code &= ~BPF_X;
1123 b->s.k = v;
1124 }
1125 /*
1126 * If the accumulator is a known constant, we can compute the
1127 * comparison result.
1128 */
1129 val = b->val[A_ATOM];
1130 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1131 bpf_u_int32 v = opt_state->vmap[val].const_val;
1132 switch (BPF_OP(b->s.code)) {
1133
1134 case BPF_JEQ:
1135 v = v == b->s.k;
1136 break;
1137
1138 case BPF_JGT:
1139 v = v > b->s.k;
1140 break;
1141
1142 case BPF_JGE:
1143 v = v >= b->s.k;
1144 break;
1145
1146 case BPF_JSET:
1147 v &= b->s.k;
1148 break;
1149
1150 default:
1151 abort();
1152 }
1153 if (JF(b) != JT(b)) {
1154 opt_state->done = 0;
1155 /*
1156 * XXX - optimizer loop detection.
1157 */
1158 opt_state->non_branch_movement_performed = 1;
1159 }
1160 if (v)
1161 JF(b) = JT(b);
1162 else
1163 JT(b) = JF(b);
1164 }
1165 }
1166
1167 /*
1168 * Compute the symbolic value of expression of 's', and update
1169 * anything it defines in the value table 'val'. If 'alter' is true,
1170 * do various optimizations. This code would be cleaner if symbolic
1171 * evaluation and code transformations weren't folded together.
1172 */
1173 static void
1174 opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1175 {
1176 int op;
1177 bpf_u_int32 v;
1178
1179 switch (s->code) {
1180
1181 case BPF_LD|BPF_ABS|BPF_W:
1182 case BPF_LD|BPF_ABS|BPF_H:
1183 case BPF_LD|BPF_ABS|BPF_B:
1184 v = F(opt_state, s->code, s->k, 0L);
1185 vstore(s, &val[A_ATOM], v, alter);
1186 break;
1187
1188 case BPF_LD|BPF_IND|BPF_W:
1189 case BPF_LD|BPF_IND|BPF_H:
1190 case BPF_LD|BPF_IND|BPF_B:
1191 v = val[X_ATOM];
1192 if (alter && opt_state->vmap[v].is_const) {
1193 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1194 s->k += opt_state->vmap[v].const_val;
1195 v = F(opt_state, s->code, s->k, 0L);
1196 /*
1197 * XXX - optimizer loop detection.
1198 */
1199 opt_state->non_branch_movement_performed = 1;
1200 opt_state->done = 0;
1201 }
1202 else
1203 v = F(opt_state, s->code, s->k, v);
1204 vstore(s, &val[A_ATOM], v, alter);
1205 break;
1206
1207 case BPF_LD|BPF_LEN:
1208 v = F(opt_state, s->code, 0L, 0L);
1209 vstore(s, &val[A_ATOM], v, alter);
1210 break;
1211
1212 case BPF_LD|BPF_IMM:
1213 v = K(s->k);
1214 vstore(s, &val[A_ATOM], v, alter);
1215 break;
1216
1217 case BPF_LDX|BPF_IMM:
1218 v = K(s->k);
1219 vstore(s, &val[X_ATOM], v, alter);
1220 break;
1221
1222 case BPF_LDX|BPF_MSH|BPF_B:
1223 v = F(opt_state, s->code, s->k, 0L);
1224 vstore(s, &val[X_ATOM], v, alter);
1225 break;
1226
1227 case BPF_ALU|BPF_NEG:
1228 if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1229 s->code = BPF_LD|BPF_IMM;
1230 /*
1231 * Do this negation as unsigned arithmetic; that's
1232 * what modern BPF engines do, and it guarantees
1233 * that all possible values can be negated. (Yeah,
1234 * negating 0x80000000, the minimum signed 32-bit
1235 * two's-complement value, results in 0x80000000,
1236 * so it's still negative, but we *should* be doing
1237 * all unsigned arithmetic here, to match what
1238 * modern BPF engines do.)
1239 *
1240 * Express it as 0U - (unsigned value) so that we
1241 * don't get compiler warnings about negating an
1242 * unsigned value and don't get UBSan warnings
1243 * about the result of negating 0x80000000 being
1244 * undefined.
1245 */
1246 s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1247 val[A_ATOM] = K(s->k);
1248 }
1249 else
1250 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1251 break;
1252
1253 case BPF_ALU|BPF_ADD|BPF_K:
1254 case BPF_ALU|BPF_SUB|BPF_K:
1255 case BPF_ALU|BPF_MUL|BPF_K:
1256 case BPF_ALU|BPF_DIV|BPF_K:
1257 case BPF_ALU|BPF_MOD|BPF_K:
1258 case BPF_ALU|BPF_AND|BPF_K:
1259 case BPF_ALU|BPF_OR|BPF_K:
1260 case BPF_ALU|BPF_XOR|BPF_K:
1261 case BPF_ALU|BPF_LSH|BPF_K:
1262 case BPF_ALU|BPF_RSH|BPF_K:
1263 op = BPF_OP(s->code);
1264 if (alter) {
1265 if (s->k == 0) {
1266 /*
1267 * Optimize operations where the constant
1268 * is zero.
1269 *
1270 * Don't optimize away "sub #0"
1271 * as it may be needed later to
1272 * fixup the generated math code.
1273 *
1274 * Fail if we're dividing by zero or taking
1275 * a modulus by zero.
1276 */
1277 if (op == BPF_ADD ||
1278 op == BPF_LSH || op == BPF_RSH ||
1279 op == BPF_OR || op == BPF_XOR) {
1280 s->code = NOP;
1281 break;
1282 }
1283 if (op == BPF_MUL || op == BPF_AND) {
1284 s->code = BPF_LD|BPF_IMM;
1285 val[A_ATOM] = K(s->k);
1286 break;
1287 }
1288 if (op == BPF_DIV)
1289 opt_error(opt_state,
1290 "division by zero");
1291 if (op == BPF_MOD)
1292 opt_error(opt_state,
1293 "modulus by zero");
1294 }
1295 if (opt_state->vmap[val[A_ATOM]].is_const) {
1296 fold_op(opt_state, s, val[A_ATOM], K(s->k));
1297 val[A_ATOM] = K(s->k);
1298 break;
1299 }
1300 }
1301 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1302 break;
1303
1304 case BPF_ALU|BPF_ADD|BPF_X:
1305 case BPF_ALU|BPF_SUB|BPF_X:
1306 case BPF_ALU|BPF_MUL|BPF_X:
1307 case BPF_ALU|BPF_DIV|BPF_X:
1308 case BPF_ALU|BPF_MOD|BPF_X:
1309 case BPF_ALU|BPF_AND|BPF_X:
1310 case BPF_ALU|BPF_OR|BPF_X:
1311 case BPF_ALU|BPF_XOR|BPF_X:
1312 case BPF_ALU|BPF_LSH|BPF_X:
1313 case BPF_ALU|BPF_RSH|BPF_X:
1314 op = BPF_OP(s->code);
1315 if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1316 if (opt_state->vmap[val[A_ATOM]].is_const) {
1317 fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1318 val[A_ATOM] = K(s->k);
1319 }
1320 else {
1321 s->code = BPF_ALU|BPF_K|op;
1322 s->k = opt_state->vmap[val[X_ATOM]].const_val;
1323 if ((op == BPF_LSH || op == BPF_RSH) &&
1324 s->k > 31)
1325 opt_error(opt_state,
1326 "shift by more than 31 bits");
1327 /*
1328 * XXX - optimizer loop detection.
1329 */
1330 opt_state->non_branch_movement_performed = 1;
1331 opt_state->done = 0;
1332 val[A_ATOM] =
1333 F(opt_state, s->code, val[A_ATOM], K(s->k));
1334 }
1335 break;
1336 }
1337 /*
1338 * Check if we're doing something to an accumulator
1339 * that is 0, and simplify. This may not seem like
1340 * much of a simplification but it could open up further
1341 * optimizations.
1342 * XXX We could also check for mul by 1, etc.
1343 */
1344 if (alter && opt_state->vmap[val[A_ATOM]].is_const
1345 && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1346 if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1347 s->code = BPF_MISC|BPF_TXA;
1348 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1349 break;
1350 }
1351 else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1352 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1353 s->code = BPF_LD|BPF_IMM;
1354 s->k = 0;
1355 vstore(s, &val[A_ATOM], K(s->k), alter);
1356 break;
1357 }
1358 else if (op == BPF_NEG) {
1359 s->code = NOP;
1360 break;
1361 }
1362 }
1363 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1364 break;
1365
1366 case BPF_MISC|BPF_TXA:
1367 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1368 break;
1369
1370 case BPF_LD|BPF_MEM:
1371 v = val[s->k];
1372 if (alter && opt_state->vmap[v].is_const) {
1373 s->code = BPF_LD|BPF_IMM;
1374 s->k = opt_state->vmap[v].const_val;
1375 /*
1376 * XXX - optimizer loop detection.
1377 */
1378 opt_state->non_branch_movement_performed = 1;
1379 opt_state->done = 0;
1380 }
1381 vstore(s, &val[A_ATOM], v, alter);
1382 break;
1383
1384 case BPF_MISC|BPF_TAX:
1385 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1386 break;
1387
1388 case BPF_LDX|BPF_MEM:
1389 v = val[s->k];
1390 if (alter && opt_state->vmap[v].is_const) {
1391 s->code = BPF_LDX|BPF_IMM;
1392 s->k = opt_state->vmap[v].const_val;
1393 /*
1394 * XXX - optimizer loop detection.
1395 */
1396 opt_state->non_branch_movement_performed = 1;
1397 opt_state->done = 0;
1398 }
1399 vstore(s, &val[X_ATOM], v, alter);
1400 break;
1401
1402 case BPF_ST:
1403 vstore(s, &val[s->k], val[A_ATOM], alter);
1404 break;
1405
1406 case BPF_STX:
1407 vstore(s, &val[s->k], val[X_ATOM], alter);
1408 break;
1409 }
1410 }
1411
1412 static void
1413 deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1414 {
1415 register int atom;
1416
1417 atom = atomuse(s);
1418 if (atom >= 0) {
1419 if (atom == AX_ATOM) {
1420 last[X_ATOM] = 0;
1421 last[A_ATOM] = 0;
1422 }
1423 else
1424 last[atom] = 0;
1425 }
1426 atom = atomdef(s);
1427 if (atom >= 0) {
1428 if (last[atom]) {
1429 /*
1430 * XXX - optimizer loop detection.
1431 */
1432 opt_state->non_branch_movement_performed = 1;
1433 opt_state->done = 0;
1434 last[atom]->code = NOP;
1435 }
1436 last[atom] = s;
1437 }
1438 }
1439
1440 static void
1441 opt_deadstores(opt_state_t *opt_state, register struct block *b)
1442 {
1443 register struct slist *s;
1444 register int atom;
1445 struct stmt *last[N_ATOMS];
1446
1447 memset((char *)last, 0, sizeof last);
1448
1449 for (s = b->stmts; s != 0; s = s->next)
1450 deadstmt(opt_state, &s->s, last);
1451 deadstmt(opt_state, &b->s, last);
1452
1453 for (atom = 0; atom < N_ATOMS; ++atom)
1454 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1455 last[atom]->code = NOP;
1456 /*
1457 * The store was removed as it's dead,
1458 * so the value stored into now has
1459 * an unknown value.
1460 */
1461 vstore(0, &b->val[atom], VAL_UNKNOWN, 0);
1462 /*
1463 * XXX - optimizer loop detection.
1464 */
1465 opt_state->non_branch_movement_performed = 1;
1466 opt_state->done = 0;
1467 }
1468 }
1469
1470 static void
1471 opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1472 {
1473 struct slist *s;
1474 struct edge *p;
1475 int i;
1476 bpf_u_int32 aval, xval;
1477
1478 #if 0
1479 for (s = b->stmts; s && s->next; s = s->next)
1480 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1481 do_stmts = 0;
1482 break;
1483 }
1484 #endif
1485
1486 /*
1487 * Initialize the atom values.
1488 */
1489 p = b->in_edges;
1490 if (p == 0) {
1491 /*
1492 * We have no predecessors, so everything is undefined
1493 * upon entry to this block.
1494 */
1495 memset((char *)b->val, 0, sizeof(b->val));
1496 } else {
1497 /*
1498 * Inherit values from our predecessors.
1499 *
1500 * First, get the values from the predecessor along the
1501 * first edge leading to this node.
1502 */
1503 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1504 /*
1505 * Now look at all the other nodes leading to this node.
1506 * If, for the predecessor along that edge, a register
1507 * has a different value from the one we have (i.e.,
1508 * control paths are merging, and the merging paths
1509 * assign different values to that register), give the
1510 * register the undefined value of 0.
1511 */
1512 while ((p = p->next) != NULL) {
1513 for (i = 0; i < N_ATOMS; ++i)
1514 if (b->val[i] != p->pred->val[i])
1515 b->val[i] = 0;
1516 }
1517 }
1518 aval = b->val[A_ATOM];
1519 xval = b->val[X_ATOM];
1520 for (s = b->stmts; s; s = s->next)
1521 opt_stmt(opt_state, &s->s, b->val, do_stmts);
1522
1523 /*
1524 * This is a special case: if we don't use anything from this
1525 * block, and we load the accumulator or index register with a
1526 * value that is already there, or if this block is a return,
1527 * eliminate all the statements.
1528 *
1529 * XXX - what if it does a store? Presumably that falls under
1530 * the heading of "if we don't use anything from this block",
1531 * i.e., if we use any memory location set to a different
1532 * value by this block, then we use something from this block.
1533 *
1534 * XXX - why does it matter whether we use anything from this
1535 * block? If the accumulator or index register doesn't change
1536 * its value, isn't that OK even if we use that value?
1537 *
1538 * XXX - if we load the accumulator with a different value,
1539 * and the block ends with a conditional branch, we obviously
1540 * can't eliminate it, as the branch depends on that value.
1541 * For the index register, the conditional branch only depends
1542 * on the index register value if the test is against the index
1543 * register value rather than a constant; if nothing uses the
1544 * value we put into the index register, and we're not testing
1545 * against the index register's value, and there aren't any
1546 * other problems that would keep us from eliminating this
1547 * block, can we eliminate it?
1548 */
1549 if (do_stmts &&
1550 ((b->out_use == 0 &&
1551 aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1552 xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1553 BPF_CLASS(b->s.code) == BPF_RET)) {
1554 if (b->stmts != 0) {
1555 b->stmts = 0;
1556 /*
1557 * XXX - optimizer loop detection.
1558 */
1559 opt_state->non_branch_movement_performed = 1;
1560 opt_state->done = 0;
1561 }
1562 } else {
1563 opt_peep(opt_state, b);
1564 opt_deadstores(opt_state, b);
1565 }
1566 /*
1567 * Set up values for branch optimizer.
1568 */
1569 if (BPF_SRC(b->s.code) == BPF_K)
1570 b->oval = K(b->s.k);
1571 else
1572 b->oval = b->val[X_ATOM];
1573 b->et.code = b->s.code;
1574 b->ef.code = -b->s.code;
1575 }
1576
1577 /*
1578 * Return true if any register that is used on exit from 'succ', has
1579 * an exit value that is different from the corresponding exit value
1580 * from 'b'.
1581 */
1582 static int
1583 use_conflict(struct block *b, struct block *succ)
1584 {
1585 int atom;
1586 atomset use = succ->out_use;
1587
1588 if (use == 0)
1589 return 0;
1590
1591 for (atom = 0; atom < N_ATOMS; ++atom)
1592 if (ATOMELEM(use, atom))
1593 if (b->val[atom] != succ->val[atom])
1594 return 1;
1595 return 0;
1596 }
1597
1598 /*
1599 * Given a block that is the successor of an edge, and an edge that
1600 * dominates that edge, return either a pointer to a child of that
1601 * block (a block to which that block jumps) if that block is a
1602 * candidate to replace the successor of the latter edge or NULL
1603 * if neither of the children of the first block are candidates.
1604 */
1605 static struct block *
1606 fold_edge(struct block *child, struct edge *ep)
1607 {
1608 int sense;
1609 bpf_u_int32 aval0, aval1, oval0, oval1;
1610 int code = ep->code;
1611
1612 if (code < 0) {
1613 /*
1614 * This edge is a "branch if false" edge.
1615 */
1616 code = -code;
1617 sense = 0;
1618 } else {
1619 /*
1620 * This edge is a "branch if true" edge.
1621 */
1622 sense = 1;
1623 }
1624
1625 /*
1626 * If the opcode for the branch at the end of the block we
1627 * were handed isn't the same as the opcode for the branch
1628 * to which the edge we were handed corresponds, the tests
1629 * for those branches aren't testing the same conditions,
1630 * so the blocks to which the first block branches aren't
1631 * candidates to replace the successor of the edge.
1632 */
1633 if (child->s.code != code)
1634 return 0;
1635
1636 aval0 = child->val[A_ATOM];
1637 oval0 = child->oval;
1638 aval1 = ep->pred->val[A_ATOM];
1639 oval1 = ep->pred->oval;
1640
1641 /*
1642 * If the A register value on exit from the successor block
1643 * isn't the same as the A register value on exit from the
1644 * predecessor of the edge, the blocks to which the first
1645 * block branches aren't candidates to replace the successor
1646 * of the edge.
1647 */
1648 if (aval0 != aval1)
1649 return 0;
1650
1651 if (oval0 == oval1)
1652 /*
1653 * The operands of the branch instructions are
1654 * identical, so the branches are testing the
1655 * same condition, and the result is true if a true
1656 * branch was taken to get here, otherwise false.
1657 */
1658 return sense ? JT(child) : JF(child);
1659
1660 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1661 /*
1662 * At this point, we only know the comparison if we
1663 * came down the true branch, and it was an equality
1664 * comparison with a constant.
1665 *
1666 * I.e., if we came down the true branch, and the branch
1667 * was an equality comparison with a constant, we know the
1668 * accumulator contains that constant. If we came down
1669 * the false branch, or the comparison wasn't with a
1670 * constant, we don't know what was in the accumulator.
1671 *
1672 * We rely on the fact that distinct constants have distinct
1673 * value numbers.
1674 */
1675 return JF(child);
1676
1677 return 0;
1678 }
1679
1680 /*
1681 * If we can make this edge go directly to a child of the edge's current
1682 * successor, do so.
1683 */
1684 static void
1685 opt_j(opt_state_t *opt_state, struct edge *ep)
1686 {
1687 register u_int i, k;
1688 register struct block *target;
1689
1690 /*
1691 * Does this edge go to a block where, if the test
1692 * at the end of it succeeds, it goes to a block
1693 * that's a leaf node of the DAG, i.e. a return
1694 * statement?
1695 * If so, there's nothing to optimize.
1696 */
1697 if (JT(ep->succ) == 0)
1698 return;
1699
1700 /*
1701 * Does this edge go to a block that goes, in turn, to
1702 * the same block regardless of whether the test at the
1703 * end succeeds or fails?
1704 */
1705 if (JT(ep->succ) == JF(ep->succ)) {
1706 /*
1707 * Common branch targets can be eliminated, provided
1708 * there is no data dependency.
1709 *
1710 * Check whether any register used on exit from the
1711 * block to which the successor of this edge goes
1712 * has a value at that point that's different from
1713 * the value it has on exit from the predecessor of
1714 * this edge. If not, the predecessor of this edge
1715 * can just go to the block to which the successor
1716 * of this edge goes, bypassing the successor of this
1717 * edge, as the successor of this edge isn't doing
1718 * any calculations whose results are different
1719 * from what the blocks before it did and isn't
1720 * doing any tests the results of which matter.
1721 */
1722 if (!use_conflict(ep->pred, JT(ep->succ))) {
1723 /*
1724 * No, there isn't.
1725 * Make this edge go to the block to
1726 * which the successor of that edge
1727 * goes.
1728 *
1729 * XXX - optimizer loop detection.
1730 */
1731 opt_state->non_branch_movement_performed = 1;
1732 opt_state->done = 0;
1733 ep->succ = JT(ep->succ);
1734 }
1735 }
1736 /*
1737 * For each edge dominator that matches the successor of this
1738 * edge, promote the edge successor to the its grandchild.
1739 *
1740 * XXX We violate the set abstraction here in favor a reasonably
1741 * efficient loop.
1742 */
1743 top:
1744 for (i = 0; i < opt_state->edgewords; ++i) {
1745 /* i'th word in the bitset of dominators */
1746 register bpf_u_int32 x = ep->edom[i];
1747
1748 while (x != 0) {
1749 /* Find the next dominator in that word and mark it as found */
1750 k = lowest_set_bit(x);
1751 x &=~ ((bpf_u_int32)1 << k);
1752 k += i * BITS_PER_WORD;
1753
1754 target = fold_edge(ep->succ, opt_state->edges[k]);
1755 /*
1756 * We have a candidate to replace the successor
1757 * of ep.
1758 *
1759 * Check that there is no data dependency between
1760 * nodes that will be violated if we move the edge;
1761 * i.e., if any register used on exit from the
1762 * candidate has a value at that point different
1763 * from the value it has when we exit the
1764 * predecessor of that edge, there's a data
1765 * dependency that will be violated.
1766 */
1767 if (target != 0 && !use_conflict(ep->pred, target)) {
1768 /*
1769 * It's safe to replace the successor of
1770 * ep; do so, and note that we've made
1771 * at least one change.
1772 *
1773 * XXX - this is one of the operations that
1774 * happens when the optimizer gets into
1775 * one of those infinite loops.
1776 */
1777 opt_state->done = 0;
1778 ep->succ = target;
1779 if (JT(target) != 0)
1780 /*
1781 * Start over unless we hit a leaf.
1782 */
1783 goto top;
1784 return;
1785 }
1786 }
1787 }
1788 }
1789
1790 /*
1791 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1792 * "Predicate Assertion Propagation" in the BPF+ paper?
1793 *
1794 * Note that this looks at block dominators, not edge dominators.
1795 * Don't think so.
1796 *
1797 * "A or B" compiles into
1798 *
1799 * A
1800 * t / \ f
1801 * / B
1802 * / t / \ f
1803 * \ /
1804 * \ /
1805 * X
1806 *
1807 *
1808 */
1809 static void
1810 or_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1811 {
1812 bpf_u_int32 val;
1813 int at_top;
1814 struct block *pull;
1815 struct block **diffp, **samep;
1816 struct edge *ep;
1817
1818 ep = b->in_edges;
1819 if (ep == 0)
1820 return;
1821
1822 /*
1823 * Make sure each predecessor loads the same value.
1824 * XXX why?
1825 */
1826 val = ep->pred->val[A_ATOM];
1827 for (ep = ep->next; ep != 0; ep = ep->next)
1828 if (val != ep->pred->val[A_ATOM])
1829 return;
1830
1831 /*
1832 * For the first edge in the list of edges coming into this block,
1833 * see whether the predecessor of that edge comes here via a true
1834 * branch or a false branch.
1835 */
1836 if (JT(b->in_edges->pred) == b)
1837 diffp = &JT(b->in_edges->pred); /* jt */
1838 else
1839 diffp = &JF(b->in_edges->pred); /* jf */
1840
1841 /*
1842 * diffp is a pointer to a pointer to the block.
1843 *
1844 * Go down the false chain looking as far as you can,
1845 * making sure that each jump-compare is doing the
1846 * same as the original block.
1847 *
1848 * If you reach the bottom before you reach a
1849 * different jump-compare, just exit. There's nothing
1850 * to do here. XXX - no, this version is checking for
1851 * the value leaving the block; that's from the BPF+
1852 * pullup routine.
1853 */
1854 at_top = 1;
1855 for (;;) {
1856 /*
1857 * Done if that's not going anywhere XXX
1858 */
1859 if (*diffp == 0)
1860 return;
1861
1862 /*
1863 * Done if that predecessor blah blah blah isn't
1864 * going the same place we're going XXX
1865 *
1866 * Does the true edge of this block point to the same
1867 * location as the true edge of b?
1868 */
1869 if (JT(*diffp) != JT(b))
1870 return;
1871
1872 /*
1873 * Done if this node isn't a dominator of that
1874 * node blah blah blah XXX
1875 *
1876 * Does b dominate diffp?
1877 */
1878 if (!SET_MEMBER((*diffp)->dom, b->id))
1879 return;
1880
1881 /*
1882 * Break out of the loop if that node's value of A
1883 * isn't the value of A above XXX
1884 */
1885 if ((*diffp)->val[A_ATOM] != val)
1886 break;
1887
1888 /*
1889 * Get the JF for that node XXX
1890 * Go down the false path.
1891 */
1892 diffp = &JF(*diffp);
1893 at_top = 0;
1894 }
1895
1896 /*
1897 * Now that we've found a different jump-compare in a chain
1898 * below b, search further down until we find another
1899 * jump-compare that looks at the original value. This
1900 * jump-compare should get pulled up. XXX again we're
1901 * comparing values not jump-compares.
1902 */
1903 samep = &JF(*diffp);
1904 for (;;) {
1905 /*
1906 * Done if that's not going anywhere XXX
1907 */
1908 if (*samep == 0)
1909 return;
1910
1911 /*
1912 * Done if that predecessor blah blah blah isn't
1913 * going the same place we're going XXX
1914 */
1915 if (JT(*samep) != JT(b))
1916 return;
1917
1918 /*
1919 * Done if this node isn't a dominator of that
1920 * node blah blah blah XXX
1921 *
1922 * Does b dominate samep?
1923 */
1924 if (!SET_MEMBER((*samep)->dom, b->id))
1925 return;
1926
1927 /*
1928 * Break out of the loop if that node's value of A
1929 * is the value of A above XXX
1930 */
1931 if ((*samep)->val[A_ATOM] == val)
1932 break;
1933
1934 /* XXX Need to check that there are no data dependencies
1935 between dp0 and dp1. Currently, the code generator
1936 will not produce such dependencies. */
1937 samep = &JF(*samep);
1938 }
1939 #ifdef notdef
1940 /* XXX This doesn't cover everything. */
1941 for (i = 0; i < N_ATOMS; ++i)
1942 if ((*samep)->val[i] != pred->val[i])
1943 return;
1944 #endif
1945 /* Pull up the node. */
1946 pull = *samep;
1947 *samep = JF(pull);
1948 JF(pull) = *diffp;
1949
1950 /*
1951 * At the top of the chain, each predecessor needs to point at the
1952 * pulled up node. Inside the chain, there is only one predecessor
1953 * to worry about.
1954 */
1955 if (at_top) {
1956 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1957 if (JT(ep->pred) == b)
1958 JT(ep->pred) = pull;
1959 else
1960 JF(ep->pred) = pull;
1961 }
1962 }
1963 else
1964 *diffp = pull;
1965
1966 /*
1967 * XXX - this is one of the operations that happens when the
1968 * optimizer gets into one of those infinite loops.
1969 */
1970 opt_state->done = 0;
1971
1972 /*
1973 * Recompute dominator sets as control flow graph has changed.
1974 */
1975 find_dom(opt_state, root);
1976 }
1977
1978 static void
1979 and_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1980 {
1981 bpf_u_int32 val;
1982 int at_top;
1983 struct block *pull;
1984 struct block **diffp, **samep;
1985 struct edge *ep;
1986
1987 ep = b->in_edges;
1988 if (ep == 0)
1989 return;
1990
1991 /*
1992 * Make sure each predecessor loads the same value.
1993 */
1994 val = ep->pred->val[A_ATOM];
1995 for (ep = ep->next; ep != 0; ep = ep->next)
1996 if (val != ep->pred->val[A_ATOM])
1997 return;
1998
1999 if (JT(b->in_edges->pred) == b)
2000 diffp = &JT(b->in_edges->pred);
2001 else
2002 diffp = &JF(b->in_edges->pred);
2003
2004 at_top = 1;
2005 for (;;) {
2006 if (*diffp == 0)
2007 return;
2008
2009 if (JF(*diffp) != JF(b))
2010 return;
2011
2012 if (!SET_MEMBER((*diffp)->dom, b->id))
2013 return;
2014
2015 if ((*diffp)->val[A_ATOM] != val)
2016 break;
2017
2018 diffp = &JT(*diffp);
2019 at_top = 0;
2020 }
2021 samep = &JT(*diffp);
2022 for (;;) {
2023 if (*samep == 0)
2024 return;
2025
2026 if (JF(*samep) != JF(b))
2027 return;
2028
2029 if (!SET_MEMBER((*samep)->dom, b->id))
2030 return;
2031
2032 if ((*samep)->val[A_ATOM] == val)
2033 break;
2034
2035 /* XXX Need to check that there are no data dependencies
2036 between diffp and samep. Currently, the code generator
2037 will not produce such dependencies. */
2038 samep = &JT(*samep);
2039 }
2040 #ifdef notdef
2041 /* XXX This doesn't cover everything. */
2042 for (i = 0; i < N_ATOMS; ++i)
2043 if ((*samep)->val[i] != pred->val[i])
2044 return;
2045 #endif
2046 /* Pull up the node. */
2047 pull = *samep;
2048 *samep = JT(pull);
2049 JT(pull) = *diffp;
2050
2051 /*
2052 * At the top of the chain, each predecessor needs to point at the
2053 * pulled up node. Inside the chain, there is only one predecessor
2054 * to worry about.
2055 */
2056 if (at_top) {
2057 for (ep = b->in_edges; ep != 0; ep = ep->next) {
2058 if (JT(ep->pred) == b)
2059 JT(ep->pred) = pull;
2060 else
2061 JF(ep->pred) = pull;
2062 }
2063 }
2064 else
2065 *diffp = pull;
2066
2067 /*
2068 * XXX - this is one of the operations that happens when the
2069 * optimizer gets into one of those infinite loops.
2070 */
2071 opt_state->done = 0;
2072
2073 /*
2074 * Recompute dominator sets as control flow graph has changed.
2075 */
2076 find_dom(opt_state, root);
2077 }
2078
2079 static void
2080 opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2081 {
2082 int i, maxlevel;
2083 struct block *p;
2084
2085 init_val(opt_state);
2086 maxlevel = ic->root->level;
2087
2088 find_inedges(opt_state, ic->root);
2089 for (i = maxlevel; i >= 0; --i)
2090 for (p = opt_state->levels[i]; p; p = p->link)
2091 opt_blk(opt_state, p, do_stmts);
2092
2093 if (do_stmts)
2094 /*
2095 * No point trying to move branches; it can't possibly
2096 * make a difference at this point.
2097 *
2098 * XXX - this might be after we detect a loop where
2099 * we were just looping infinitely moving branches
2100 * in such a fashion that we went through two or more
2101 * versions of the machine code, eventually returning
2102 * to the first version. (We're really not doing a
2103 * full loop detection, we're just testing for two
2104 * passes in a row where we do nothing but
2105 * move branches.)
2106 */
2107 return;
2108
2109 /*
2110 * Is this what the BPF+ paper describes in sections 6.1.1,
2111 * 6.1.2, and 6.1.3?
2112 */
2113 for (i = 1; i <= maxlevel; ++i) {
2114 for (p = opt_state->levels[i]; p; p = p->link) {
2115 opt_j(opt_state, &p->et);
2116 opt_j(opt_state, &p->ef);
2117 }
2118 }
2119
2120 find_inedges(opt_state, ic->root);
2121 for (i = 1; i <= maxlevel; ++i) {
2122 for (p = opt_state->levels[i]; p; p = p->link) {
2123 or_pullup(opt_state, p, ic->root);
2124 and_pullup(opt_state, p, ic->root);
2125 }
2126 }
2127 }
2128
2129 static inline void
2130 link_inedge(struct edge *parent, struct block *child)
2131 {
2132 parent->next = child->in_edges;
2133 child->in_edges = parent;
2134 }
2135
2136 static void
2137 find_inedges(opt_state_t *opt_state, struct block *root)
2138 {
2139 u_int i;
2140 int level;
2141 struct block *b;
2142
2143 for (i = 0; i < opt_state->n_blocks; ++i)
2144 opt_state->blocks[i]->in_edges = 0;
2145
2146 /*
2147 * Traverse the graph, adding each edge to the predecessor
2148 * list of its successors. Skip the leaves (i.e. level 0).
2149 */
2150 for (level = root->level; level > 0; --level) {
2151 for (b = opt_state->levels[level]; b != 0; b = b->link) {
2152 link_inedge(&b->et, JT(b));
2153 link_inedge(&b->ef, JF(b));
2154 }
2155 }
2156 }
2157
2158 static void
2159 opt_root(struct block **b)
2160 {
2161 struct slist *tmp, *s;
2162
2163 s = (*b)->stmts;
2164 (*b)->stmts = 0;
2165 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2166 *b = JT(*b);
2167
2168 tmp = (*b)->stmts;
2169 if (tmp != 0)
2170 sappend(s, tmp);
2171 (*b)->stmts = s;
2172
2173 /*
2174 * If the root node is a return, then there is no
2175 * point executing any statements (since the bpf machine
2176 * has no side effects).
2177 */
2178 if (BPF_CLASS((*b)->s.code) == BPF_RET)
2179 (*b)->stmts = 0;
2180 }
2181
2182 static void
2183 opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2184 {
2185
2186 #ifdef BDEBUG
2187 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2188 printf("opt_loop(root, %d) begin\n", do_stmts);
2189 opt_dump(opt_state, ic);
2190 }
2191 #endif
2192
2193 /*
2194 * XXX - optimizer loop detection.
2195 */
2196 int loop_count = 0;
2197 for (;;) {
2198 opt_state->done = 1;
2199 /*
2200 * XXX - optimizer loop detection.
2201 */
2202 opt_state->non_branch_movement_performed = 0;
2203 find_levels(opt_state, ic);
2204 find_dom(opt_state, ic->root);
2205 find_closure(opt_state, ic->root);
2206 find_ud(opt_state, ic->root);
2207 find_edom(opt_state, ic->root);
2208 opt_blks(opt_state, ic, do_stmts);
2209 #ifdef BDEBUG
2210 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2211 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2212 opt_dump(opt_state, ic);
2213 }
2214 #endif
2215
2216 /*
2217 * Was anything done in this optimizer pass?
2218 */
2219 if (opt_state->done) {
2220 /*
2221 * No, so we've reached a fixed point.
2222 * We're done.
2223 */
2224 break;
2225 }
2226
2227 /*
2228 * XXX - was anything done other than branch movement
2229 * in this pass?
2230 */
2231 if (opt_state->non_branch_movement_performed) {
2232 /*
2233 * Yes. Clear any loop-detection counter;
2234 * we're making some form of progress (assuming
2235 * we can't get into a cycle doing *other*
2236 * optimizations...).
2237 */
2238 loop_count = 0;
2239 } else {
2240 /*
2241 * No - increment the counter, and quit if
2242 * it's up to 100.
2243 */
2244 loop_count++;
2245 if (loop_count >= 100) {
2246 /*
2247 * We've done nothing but branch movement
2248 * for 100 passes; we're probably
2249 * in a cycle and will never reach a
2250 * fixed point.
2251 *
2252 * XXX - yes, we really need a non-
2253 * heuristic way of detecting a cycle.
2254 */
2255 opt_state->done = 1;
2256 break;
2257 }
2258 }
2259 }
2260 }
2261
2262 /*
2263 * Optimize the filter code in its dag representation.
2264 * Return 0 on success, -1 on error.
2265 */
2266 int
2267 bpf_optimize(struct icode *ic, char *errbuf)
2268 {
2269 opt_state_t opt_state;
2270
2271 memset(&opt_state, 0, sizeof(opt_state));
2272 opt_state.errbuf = errbuf;
2273 opt_state.non_branch_movement_performed = 0;
2274 if (setjmp(opt_state.top_ctx)) {
2275 opt_cleanup(&opt_state);
2276 return -1;
2277 }
2278 opt_init(&opt_state, ic);
2279 opt_loop(&opt_state, ic, 0);
2280 opt_loop(&opt_state, ic, 1);
2281 intern_blocks(&opt_state, ic);
2282 #ifdef BDEBUG
2283 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2284 printf("after intern_blocks()\n");
2285 opt_dump(&opt_state, ic);
2286 }
2287 #endif
2288 opt_root(&ic->root);
2289 #ifdef BDEBUG
2290 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2291 printf("after opt_root()\n");
2292 opt_dump(&opt_state, ic);
2293 }
2294 #endif
2295 opt_cleanup(&opt_state);
2296 return 0;
2297 }
2298
2299 static void
2300 make_marks(struct icode *ic, struct block *p)
2301 {
2302 if (!isMarked(ic, p)) {
2303 Mark(ic, p);
2304 if (BPF_CLASS(p->s.code) != BPF_RET) {
2305 make_marks(ic, JT(p));
2306 make_marks(ic, JF(p));
2307 }
2308 }
2309 }
2310
2311 /*
2312 * Mark code array such that isMarked(ic->cur_mark, i) is true
2313 * only for nodes that are alive.
2314 */
2315 static void
2316 mark_code(struct icode *ic)
2317 {
2318 ic->cur_mark += 1;
2319 make_marks(ic, ic->root);
2320 }
2321
2322 /*
2323 * True iff the two stmt lists load the same value from the packet into
2324 * the accumulator.
2325 */
2326 static int
2327 eq_slist(struct slist *x, struct slist *y)
2328 {
2329 for (;;) {
2330 while (x && x->s.code == NOP)
2331 x = x->next;
2332 while (y && y->s.code == NOP)
2333 y = y->next;
2334 if (x == 0)
2335 return y == 0;
2336 if (y == 0)
2337 return x == 0;
2338 if (x->s.code != y->s.code || x->s.k != y->s.k)
2339 return 0;
2340 x = x->next;
2341 y = y->next;
2342 }
2343 }
2344
2345 static inline int
2346 eq_blk(struct block *b0, struct block *b1)
2347 {
2348 if (b0->s.code == b1->s.code &&
2349 b0->s.k == b1->s.k &&
2350 b0->et.succ == b1->et.succ &&
2351 b0->ef.succ == b1->ef.succ)
2352 return eq_slist(b0->stmts, b1->stmts);
2353 return 0;
2354 }
2355
2356 static void
2357 intern_blocks(opt_state_t *opt_state, struct icode *ic)
2358 {
2359 struct block *p;
2360 u_int i, j;
2361 int done1; /* don't shadow global */
2362 top:
2363 done1 = 1;
2364 for (i = 0; i < opt_state->n_blocks; ++i)
2365 opt_state->blocks[i]->link = 0;
2366
2367 mark_code(ic);
2368
2369 for (i = opt_state->n_blocks - 1; i != 0; ) {
2370 --i;
2371 if (!isMarked(ic, opt_state->blocks[i]))
2372 continue;
2373 for (j = i + 1; j < opt_state->n_blocks; ++j) {
2374 if (!isMarked(ic, opt_state->blocks[j]))
2375 continue;
2376 if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2377 opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2378 opt_state->blocks[j]->link : opt_state->blocks[j];
2379 break;
2380 }
2381 }
2382 }
2383 for (i = 0; i < opt_state->n_blocks; ++i) {
2384 p = opt_state->blocks[i];
2385 if (JT(p) == 0)
2386 continue;
2387 if (JT(p)->link) {
2388 done1 = 0;
2389 JT(p) = JT(p)->link;
2390 }
2391 if (JF(p)->link) {
2392 done1 = 0;
2393 JF(p) = JF(p)->link;
2394 }
2395 }
2396 if (!done1)
2397 goto top;
2398 }
2399
2400 static void
2401 opt_cleanup(opt_state_t *opt_state)
2402 {
2403 free((void *)opt_state->vnode_base);
2404 free((void *)opt_state->vmap);
2405 free((void *)opt_state->edges);
2406 free((void *)opt_state->space);
2407 free((void *)opt_state->levels);
2408 free((void *)opt_state->blocks);
2409 }
2410
2411 /*
2412 * For optimizer errors.
2413 */
2414 static void PCAP_NORETURN
2415 opt_error(opt_state_t *opt_state, const char *fmt, ...)
2416 {
2417 va_list ap;
2418
2419 if (opt_state->errbuf != NULL) {
2420 va_start(ap, fmt);
2421 (void)vsnprintf(opt_state->errbuf,
2422 PCAP_ERRBUF_SIZE, fmt, ap);
2423 va_end(ap);
2424 }
2425 longjmp(opt_state->top_ctx, 1);
2426 /* NOTREACHED */
2427 #ifdef _AIX
2428 PCAP_UNREACHABLE
2429 #endif /* _AIX */
2430 }
2431
2432 /*
2433 * Return the number of stmts in 's'.
2434 */
2435 static u_int
2436 slength(struct slist *s)
2437 {
2438 u_int n = 0;
2439
2440 for (; s; s = s->next)
2441 if (s->s.code != NOP)
2442 ++n;
2443 return n;
2444 }
2445
2446 /*
2447 * Return the number of nodes reachable by 'p'.
2448 * All nodes should be initially unmarked.
2449 */
2450 static int
2451 count_blocks(struct icode *ic, struct block *p)
2452 {
2453 if (p == 0 || isMarked(ic, p))
2454 return 0;
2455 Mark(ic, p);
2456 return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2457 }
2458
2459 /*
2460 * Do a depth first search on the flow graph, numbering the
2461 * the basic blocks, and entering them into the 'blocks' array.`
2462 */
2463 static void
2464 number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2465 {
2466 u_int n;
2467
2468 if (p == 0 || isMarked(ic, p))
2469 return;
2470
2471 Mark(ic, p);
2472 n = opt_state->n_blocks++;
2473 if (opt_state->n_blocks == 0) {
2474 /*
2475 * Overflow.
2476 */
2477 opt_error(opt_state, "filter is too complex to optimize");
2478 }
2479 p->id = n;
2480 opt_state->blocks[n] = p;
2481
2482 number_blks_r(opt_state, ic, JT(p));
2483 number_blks_r(opt_state, ic, JF(p));
2484 }
2485
2486 /*
2487 * Return the number of stmts in the flowgraph reachable by 'p'.
2488 * The nodes should be unmarked before calling.
2489 *
2490 * Note that "stmts" means "instructions", and that this includes
2491 *
2492 * side-effect statements in 'p' (slength(p->stmts));
2493 *
2494 * statements in the true branch from 'p' (count_stmts(JT(p)));
2495 *
2496 * statements in the false branch from 'p' (count_stmts(JF(p)));
2497 *
2498 * the conditional jump itself (1);
2499 *
2500 * an extra long jump if the true branch requires it (p->longjt);
2501 *
2502 * an extra long jump if the false branch requires it (p->longjf).
2503 */
2504 static u_int
2505 count_stmts(struct icode *ic, struct block *p)
2506 {
2507 u_int n;
2508
2509 if (p == 0 || isMarked(ic, p))
2510 return 0;
2511 Mark(ic, p);
2512 n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2513 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2514 }
2515
2516 /*
2517 * Allocate memory. All allocation is done before optimization
2518 * is begun. A linear bound on the size of all data structures is computed
2519 * from the total number of blocks and/or statements.
2520 */
2521 static void
2522 opt_init(opt_state_t *opt_state, struct icode *ic)
2523 {
2524 bpf_u_int32 *p;
2525 int i, n, max_stmts;
2526 u_int product;
2527 size_t block_memsize, edge_memsize;
2528
2529 /*
2530 * First, count the blocks, so we can malloc an array to map
2531 * block number to block. Then, put the blocks into the array.
2532 */
2533 unMarkAll(ic);
2534 n = count_blocks(ic, ic->root);
2535 opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2536 if (opt_state->blocks == NULL)
2537 opt_error(opt_state, "malloc");
2538 unMarkAll(ic);
2539 opt_state->n_blocks = 0;
2540 number_blks_r(opt_state, ic, ic->root);
2541
2542 /*
2543 * This "should not happen".
2544 */
2545 if (opt_state->n_blocks == 0)
2546 opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2547
2548 opt_state->n_edges = 2 * opt_state->n_blocks;
2549 if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2550 /*
2551 * Overflow.
2552 */
2553 opt_error(opt_state, "filter is too complex to optimize");
2554 }
2555 opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2556 if (opt_state->edges == NULL) {
2557 opt_error(opt_state, "malloc");
2558 }
2559
2560 /*
2561 * The number of levels is bounded by the number of nodes.
2562 */
2563 opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2564 if (opt_state->levels == NULL) {
2565 opt_error(opt_state, "malloc");
2566 }
2567
2568 opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2569 opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2570
2571 /*
2572 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2573 * in a u_int; we use it as a u_int number-of-iterations
2574 * value.
2575 */
2576 product = opt_state->n_blocks * opt_state->nodewords;
2577 if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2578 /*
2579 * XXX - just punt and don't try to optimize?
2580 * In practice, this is unlikely to happen with
2581 * a normal filter.
2582 */
2583 opt_error(opt_state, "filter is too complex to optimize");
2584 }
2585
2586 /*
2587 * Make sure the total memory required for that doesn't
2588 * overflow.
2589 */
2590 block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2591 if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2592 opt_error(opt_state, "filter is too complex to optimize");
2593 }
2594
2595 /*
2596 * Make sure opt_state->n_edges * opt_state->edgewords fits
2597 * in a u_int; we use it as a u_int number-of-iterations
2598 * value.
2599 */
2600 product = opt_state->n_edges * opt_state->edgewords;
2601 if ((product / opt_state->n_edges) != opt_state->edgewords) {
2602 opt_error(opt_state, "filter is too complex to optimize");
2603 }
2604
2605 /*
2606 * Make sure the total memory required for that doesn't
2607 * overflow.
2608 */
2609 edge_memsize = (size_t)product * sizeof(*opt_state->space);
2610 if (edge_memsize / product != sizeof(*opt_state->space)) {
2611 opt_error(opt_state, "filter is too complex to optimize");
2612 }
2613
2614 /*
2615 * Make sure the total memory required for both of them doesn't
2616 * overflow.
2617 */
2618 if (block_memsize > SIZE_MAX - edge_memsize) {
2619 opt_error(opt_state, "filter is too complex to optimize");
2620 }
2621
2622 /* XXX */
2623 opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2624 if (opt_state->space == NULL) {
2625 opt_error(opt_state, "malloc");
2626 }
2627 p = opt_state->space;
2628 opt_state->all_dom_sets = p;
2629 for (i = 0; i < n; ++i) {
2630 opt_state->blocks[i]->dom = p;
2631 p += opt_state->nodewords;
2632 }
2633 opt_state->all_closure_sets = p;
2634 for (i = 0; i < n; ++i) {
2635 opt_state->blocks[i]->closure = p;
2636 p += opt_state->nodewords;
2637 }
2638 opt_state->all_edge_sets = p;
2639 for (i = 0; i < n; ++i) {
2640 register struct block *b = opt_state->blocks[i];
2641
2642 b->et.edom = p;
2643 p += opt_state->edgewords;
2644 b->ef.edom = p;
2645 p += opt_state->edgewords;
2646 b->et.id = i;
2647 opt_state->edges[i] = &b->et;
2648 b->ef.id = opt_state->n_blocks + i;
2649 opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2650 b->et.pred = b;
2651 b->ef.pred = b;
2652 }
2653 max_stmts = 0;
2654 for (i = 0; i < n; ++i)
2655 max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2656 /*
2657 * We allocate at most 3 value numbers per statement,
2658 * so this is an upper bound on the number of valnodes
2659 * we'll need.
2660 */
2661 opt_state->maxval = 3 * max_stmts;
2662 opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2663 if (opt_state->vmap == NULL) {
2664 opt_error(opt_state, "malloc");
2665 }
2666 opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2667 if (opt_state->vnode_base == NULL) {
2668 opt_error(opt_state, "malloc");
2669 }
2670 }
2671
2672 /*
2673 * This is only used when supporting optimizer debugging. It is
2674 * global state, so do *not* do more than one compile in parallel
2675 * and expect it to provide meaningful information.
2676 */
2677 #ifdef BDEBUG
2678 int bids[NBIDS];
2679 #endif
2680
2681 static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2682 PCAP_PRINTFLIKE(2, 3);
2683
2684 /*
2685 * Returns true if successful. Returns false if a branch has
2686 * an offset that is too large. If so, we have marked that
2687 * branch so that on a subsequent iteration, it will be treated
2688 * properly.
2689 */
2690 static int
2691 convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2692 {
2693 struct bpf_insn *dst;
2694 struct slist *src;
2695 u_int slen;
2696 u_int off;
2697 struct slist **offset = NULL;
2698
2699 if (p == 0 || isMarked(ic, p))
2700 return (1);
2701 Mark(ic, p);
2702
2703 if (convert_code_r(conv_state, ic, JF(p)) == 0)
2704 return (0);
2705 if (convert_code_r(conv_state, ic, JT(p)) == 0)
2706 return (0);
2707
2708 slen = slength(p->stmts);
2709 dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2710 /* inflate length by any extra jumps */
2711
2712 p->offset = (int)(dst - conv_state->fstart);
2713
2714 /* generate offset[] for convenience */
2715 if (slen) {
2716 offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2717 if (!offset) {
2718 conv_error(conv_state, "not enough core");
2719 /*NOTREACHED*/
2720 }
2721 }
2722 src = p->stmts;
2723 for (off = 0; off < slen && src; off++) {
2724 #if 0
2725 printf("off=%d src=%x\n", off, src);
2726 #endif
2727 offset[off] = src;
2728 src = src->next;
2729 }
2730
2731 off = 0;
2732 for (src = p->stmts; src; src = src->next) {
2733 if (src->s.code == NOP)
2734 continue;
2735 dst->code = (u_short)src->s.code;
2736 dst->k = src->s.k;
2737
2738 /* fill block-local relative jump */
2739 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2740 #if 0
2741 if (src->s.jt || src->s.jf) {
2742 free(offset);
2743 conv_error(conv_state, "illegal jmp destination");
2744 /*NOTREACHED*/
2745 }
2746 #endif
2747 goto filled;
2748 }
2749 if (off == slen - 2) /*???*/
2750 goto filled;
2751
2752 {
2753 u_int i;
2754 int jt, jf;
2755 const char ljerr[] = "%s for block-local relative jump: off=%d";
2756
2757 #if 0
2758 printf("code=%x off=%d %x %x\n", src->s.code,
2759 off, src->s.jt, src->s.jf);
2760 #endif
2761
2762 if (!src->s.jt || !src->s.jf) {
2763 free(offset);
2764 conv_error(conv_state, ljerr, "no jmp destination", off);
2765 /*NOTREACHED*/
2766 }
2767
2768 jt = jf = 0;
2769 for (i = 0; i < slen; i++) {
2770 if (offset[i] == src->s.jt) {
2771 if (jt) {
2772 free(offset);
2773 conv_error(conv_state, ljerr, "multiple matches", off);
2774 /*NOTREACHED*/
2775 }
2776
2777 if (i - off - 1 >= 256) {
2778 free(offset);
2779 conv_error(conv_state, ljerr, "out-of-range jump", off);
2780 /*NOTREACHED*/
2781 }
2782 dst->jt = (u_char)(i - off - 1);
2783 jt++;
2784 }
2785 if (offset[i] == src->s.jf) {
2786 if (jf) {
2787 free(offset);
2788 conv_error(conv_state, ljerr, "multiple matches", off);
2789 /*NOTREACHED*/
2790 }
2791 if (i - off - 1 >= 256) {
2792 free(offset);
2793 conv_error(conv_state, ljerr, "out-of-range jump", off);
2794 /*NOTREACHED*/
2795 }
2796 dst->jf = (u_char)(i - off - 1);
2797 jf++;
2798 }
2799 }
2800 if (!jt || !jf) {
2801 free(offset);
2802 conv_error(conv_state, ljerr, "no destination found", off);
2803 /*NOTREACHED*/
2804 }
2805 }
2806 filled:
2807 ++dst;
2808 ++off;
2809 }
2810 if (offset)
2811 free(offset);
2812
2813 #ifdef BDEBUG
2814 if (dst - conv_state->fstart < NBIDS)
2815 bids[dst - conv_state->fstart] = p->id + 1;
2816 #endif
2817 dst->code = (u_short)p->s.code;
2818 dst->k = p->s.k;
2819 if (JT(p)) {
2820 /* number of extra jumps inserted */
2821 u_char extrajmps = 0;
2822 off = JT(p)->offset - (p->offset + slen) - 1;
2823 if (off >= 256) {
2824 /* offset too large for branch, must add a jump */
2825 if (p->longjt == 0) {
2826 /* mark this instruction and retry */
2827 p->longjt++;
2828 return(0);
2829 }
2830 dst->jt = extrajmps;
2831 extrajmps++;
2832 dst[extrajmps].code = BPF_JMP|BPF_JA;
2833 dst[extrajmps].k = off - extrajmps;
2834 }
2835 else
2836 dst->jt = (u_char)off;
2837 off = JF(p)->offset - (p->offset + slen) - 1;
2838 if (off >= 256) {
2839 /* offset too large for branch, must add a jump */
2840 if (p->longjf == 0) {
2841 /* mark this instruction and retry */
2842 p->longjf++;
2843 return(0);
2844 }
2845 /* branch if F to following jump */
2846 /* if two jumps are inserted, F goes to second one */
2847 dst->jf = extrajmps;
2848 extrajmps++;
2849 dst[extrajmps].code = BPF_JMP|BPF_JA;
2850 dst[extrajmps].k = off - extrajmps;
2851 }
2852 else
2853 dst->jf = (u_char)off;
2854 }
2855 return (1);
2856 }
2857
2858
2859 /*
2860 * Convert flowgraph intermediate representation to the
2861 * BPF array representation. Set *lenp to the number of instructions.
2862 *
2863 * This routine does *NOT* leak the memory pointed to by fp. It *must
2864 * not* do free(fp) before returning fp; doing so would make no sense,
2865 * as the BPF array pointed to by the return value of icode_to_fcode()
2866 * must be valid - it's being returned for use in a bpf_program structure.
2867 *
2868 * If it appears that icode_to_fcode() is leaking, the problem is that
2869 * the program using pcap_compile() is failing to free the memory in
2870 * the BPF program when it's done - the leak is in the program, not in
2871 * the routine that happens to be allocating the memory. (By analogy, if
2872 * a program calls fopen() without ever calling fclose() on the FILE *,
2873 * it will leak the FILE structure; the leak is not in fopen(), it's in
2874 * the program.) Change the program to use pcap_freecode() when it's
2875 * done with the filter program. See the pcap man page.
2876 */
2877 struct bpf_insn *
2878 icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2879 char *errbuf)
2880 {
2881 u_int n;
2882 struct bpf_insn *fp;
2883 conv_state_t conv_state;
2884
2885 conv_state.fstart = NULL;
2886 conv_state.errbuf = errbuf;
2887 if (setjmp(conv_state.top_ctx) != 0) {
2888 free(conv_state.fstart);
2889 return NULL;
2890 }
2891
2892 /*
2893 * Loop doing convert_code_r() until no branches remain
2894 * with too-large offsets.
2895 */
2896 for (;;) {
2897 unMarkAll(ic);
2898 n = *lenp = count_stmts(ic, root);
2899
2900 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2901 if (fp == NULL) {
2902 (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2903 "malloc");
2904 return NULL;
2905 }
2906 memset((char *)fp, 0, sizeof(*fp) * n);
2907 conv_state.fstart = fp;
2908 conv_state.ftail = fp + n;
2909
2910 unMarkAll(ic);
2911 if (convert_code_r(&conv_state, ic, root))
2912 break;
2913 free(fp);
2914 }
2915
2916 return fp;
2917 }
2918
2919 /*
2920 * For iconv_to_fconv() errors.
2921 */
2922 static void PCAP_NORETURN
2923 conv_error(conv_state_t *conv_state, const char *fmt, ...)
2924 {
2925 va_list ap;
2926
2927 va_start(ap, fmt);
2928 (void)vsnprintf(conv_state->errbuf,
2929 PCAP_ERRBUF_SIZE, fmt, ap);
2930 va_end(ap);
2931 longjmp(conv_state->top_ctx, 1);
2932 /* NOTREACHED */
2933 #ifdef _AIX
2934 PCAP_UNREACHABLE
2935 #endif /* _AIX */
2936 }
2937
2938 /*
2939 * Make a copy of a BPF program and put it in the "fcode" member of
2940 * a "pcap_t".
2941 *
2942 * If we fail to allocate memory for the copy, fill in the "errbuf"
2943 * member of the "pcap_t" with an error message, and return -1;
2944 * otherwise, return 0.
2945 */
2946 int
2947 pcapint_install_bpf_program(pcap_t *p, struct bpf_program *fp)
2948 {
2949 size_t prog_size;
2950
2951 /*
2952 * Validate the program.
2953 */
2954 if (!pcapint_validate_filter(fp->bf_insns, fp->bf_len)) {
2955 snprintf(p->errbuf, sizeof(p->errbuf),
2956 "BPF program is not valid");
2957 return (-1);
2958 }
2959
2960 /*
2961 * Free up any already installed program.
2962 */
2963 pcap_freecode(&p->fcode);
2964
2965 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2966 p->fcode.bf_len = fp->bf_len;
2967 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2968 if (p->fcode.bf_insns == NULL) {
2969 pcapint_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2970 errno, "malloc");
2971 return (-1);
2972 }
2973 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2974 return (0);
2975 }
2976
2977 #ifdef BDEBUG
2978 static void
2979 dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2980 FILE *out)
2981 {
2982 int icount, noffset;
2983 int i;
2984
2985 if (block == NULL || isMarked(ic, block))
2986 return;
2987 Mark(ic, block);
2988
2989 icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2990 noffset = min(block->offset + icount, (int)prog->bf_len);
2991
2992 fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2993 for (i = block->offset; i < noffset; i++) {
2994 fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2995 }
2996 fprintf(out, "\" tooltip=\"");
2997 for (i = 0; i < BPF_MEMWORDS; i++)
2998 if (block->val[i] != VAL_UNKNOWN)
2999 fprintf(out, "val[%d]=%d ", i, block->val[i]);
3000 fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
3001 fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3002 fprintf(out, "\"");
3003 if (JT(block) == NULL)
3004 fprintf(out, ", peripheries=2");
3005 fprintf(out, "];\n");
3006
3007 dot_dump_node(ic, JT(block), prog, out);
3008 dot_dump_node(ic, JF(block), prog, out);
3009 }
3010
3011 static void
3012 dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3013 {
3014 if (block == NULL || isMarked(ic, block))
3015 return;
3016 Mark(ic, block);
3017
3018 if (JT(block)) {
3019 fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3020 block->id, JT(block)->id);
3021 fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3022 block->id, JF(block)->id);
3023 }
3024 dot_dump_edge(ic, JT(block), out);
3025 dot_dump_edge(ic, JF(block), out);
3026 }
3027
3028 /* Output the block CFG using graphviz/DOT language
3029 * In the CFG, block's code, value index for each registers at EXIT,
3030 * and the jump relationship is show.
3031 *
3032 * example DOT for BPF `ip src host 1.1.1.1' is:
3033 digraph BPF {
3034 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3035 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3036 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3037 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3038 "block0":se -> "block1":n [label="T"];
3039 "block0":sw -> "block3":n [label="F"];
3040 "block1":se -> "block2":n [label="T"];
3041 "block1":sw -> "block3":n [label="F"];
3042 }
3043 *
3044 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3045 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3046 */
3047 static int
3048 dot_dump(struct icode *ic, char *errbuf)
3049 {
3050 struct bpf_program f;
3051 FILE *out = stdout;
3052
3053 memset(bids, 0, sizeof bids);
3054 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3055 if (f.bf_insns == NULL)
3056 return -1;
3057
3058 fprintf(out, "digraph BPF {\n");
3059 unMarkAll(ic);
3060 dot_dump_node(ic, ic->root, &f, out);
3061 unMarkAll(ic);
3062 dot_dump_edge(ic, ic->root, out);
3063 fprintf(out, "}\n");
3064
3065 free((char *)f.bf_insns);
3066 return 0;
3067 }
3068
3069 static int
3070 plain_dump(struct icode *ic, char *errbuf)
3071 {
3072 struct bpf_program f;
3073
3074 memset(bids, 0, sizeof bids);
3075 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3076 if (f.bf_insns == NULL)
3077 return -1;
3078 bpf_dump(&f, 1);
3079 putchar('\n');
3080 free((char *)f.bf_insns);
3081 return 0;
3082 }
3083
3084 static void
3085 opt_dump(opt_state_t *opt_state, struct icode *ic)
3086 {
3087 int status;
3088 char errbuf[PCAP_ERRBUF_SIZE];
3089
3090 /*
3091 * If the CFG, in DOT format, is requested, output it rather than
3092 * the code that would be generated from that graph.
3093 */
3094 if (pcap_print_dot_graph)
3095 status = dot_dump(ic, errbuf);
3096 else
3097 status = plain_dump(ic, errbuf);
3098 if (status == -1)
3099 opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3100 }
3101 #endif