]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
215f2c98e113985f6071d6458700e5f9fb5e21c7
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for BPF code intermediate representation.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <pcap-types.h>
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <memory.h>
33 #include <setjmp.h>
34 #include <string.h>
35 #include <limits.h> /* for SIZE_MAX */
36 #include <errno.h>
37
38 #include "pcap-int.h"
39
40 #include "gencode.h"
41 #include "optimize.h"
42 #include "diag-control.h"
43
44 #ifdef HAVE_OS_PROTO_H
45 #include "os-proto.h"
46 #endif
47
48 #ifdef BDEBUG
49 /*
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
53 * defined.
54 */
55 static int pcap_optimizer_debug;
56
57 /*
58 * Routine to set that flag.
59 *
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
65 */
66 PCAP_API void pcap_set_optimizer_debug(int value);
67
68 PCAP_API_DEF void
69 pcap_set_optimizer_debug(int value)
70 {
71 pcap_optimizer_debug = value;
72 }
73
74 /*
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
78 * defined.
79 */
80 static int pcap_print_dot_graph;
81
82 /*
83 * Routine to set that flag.
84 *
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
90 */
91 PCAP_API void pcap_set_print_dot_graph(int value);
92
93 PCAP_API_DEF void
94 pcap_set_print_dot_graph(int value)
95 {
96 pcap_print_dot_graph = value;
97 }
98
99 #endif
100
101 /*
102 * lowest_set_bit().
103 *
104 * Takes a 32-bit integer as an argument.
105 *
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
108 *
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
112 *
113 * This is the same as the count of trailing zeroes in the word.
114 */
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116 /*
117 * GCC 3.4 and later; we have __builtin_ctz().
118 */
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
121 /*
122 * Visual Studio; we support only 2005 and later, so use
123 * _BitScanForward().
124 */
125 #include <intrin.h>
126
127 #ifndef __clang__
128 #pragma intrinsic(_BitScanForward)
129 #endif
130
131 static __forceinline u_int
132 lowest_set_bit(int mask)
133 {
134 unsigned long bit;
135
136 /*
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139 */
140 if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141 abort(); /* mask is zero */
142 return (u_int)bit;
143 }
144 #else
145 /*
146 * None of the above.
147 * Use a perfect-hash-function-based function.
148 */
149 static u_int
150 lowest_set_bit(int mask)
151 {
152 unsigned int v = (unsigned int)mask;
153
154 static const u_int MultiplyDeBruijnBitPosition[32] = {
155 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
156 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
157 };
158
159 /*
160 * We strip off all but the lowermost set bit (v & ~v),
161 * and perform a minimal perfect hash on it to look up the
162 * number of low-order zero bits in a table.
163 *
164 * See:
165 *
166 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
167 *
168 * http://supertech.csail.mit.edu/papers/debruijn.pdf
169 */
170 return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
171 }
172 #endif
173
174 /*
175 * Represents a deleted instruction.
176 */
177 #define NOP -1
178
179 /*
180 * Register numbers for use-def values.
181 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
182 * location. A_ATOM is the accumulator and X_ATOM is the index
183 * register.
184 */
185 #define A_ATOM BPF_MEMWORDS
186 #define X_ATOM (BPF_MEMWORDS+1)
187
188 /*
189 * This define is used to represent *both* the accumulator and
190 * x register in use-def computations.
191 * Currently, the use-def code assumes only one definition per instruction.
192 */
193 #define AX_ATOM N_ATOMS
194
195 /*
196 * These data structures are used in a Cocke and Schwartz style
197 * value numbering scheme. Since the flowgraph is acyclic,
198 * exit values can be propagated from a node's predecessors
199 * provided it is uniquely defined.
200 */
201 struct valnode {
202 int code;
203 bpf_u_int32 v0, v1;
204 int val; /* the value number */
205 struct valnode *next;
206 };
207
208 /* Integer constants mapped with the load immediate opcode. */
209 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
210
211 struct vmapinfo {
212 int is_const;
213 bpf_u_int32 const_val;
214 };
215
216 typedef struct {
217 /*
218 * Place to longjmp to on an error.
219 */
220 jmp_buf top_ctx;
221
222 /*
223 * The buffer into which to put error message.
224 */
225 char *errbuf;
226
227 /*
228 * A flag to indicate that further optimization is needed.
229 * Iterative passes are continued until a given pass yields no
230 * code simplification or branch movement.
231 */
232 int done;
233
234 /*
235 * XXX - detect loops that do nothing but repeated AND/OR pullups
236 * and edge moves.
237 * If 100 passes in a row do nothing but that, treat that as a
238 * sign that we're in a loop that just shuffles in a cycle in
239 * which each pass just shuffles the code and we eventually
240 * get back to the original configuration.
241 *
242 * XXX - we need a non-heuristic way of detecting, or preventing,
243 * such a cycle.
244 */
245 int non_branch_movement_performed;
246
247 u_int n_blocks; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
248 struct block **blocks;
249 u_int n_edges; /* twice n_blocks, so guaranteed to be > 0 */
250 struct edge **edges;
251
252 /*
253 * A bit vector set representation of the dominators.
254 * We round up the set size to the next power of two.
255 */
256 u_int nodewords; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
257 u_int edgewords; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
258 struct block **levels;
259 bpf_u_int32 *space;
260
261 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
262 /*
263 * True if a is in uset {p}
264 */
265 #define SET_MEMBER(p, a) \
266 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
267
268 /*
269 * Add 'a' to uset p.
270 */
271 #define SET_INSERT(p, a) \
272 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
273
274 /*
275 * Delete 'a' from uset p.
276 */
277 #define SET_DELETE(p, a) \
278 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
279
280 /*
281 * a := a intersect b
282 * n must be guaranteed to be > 0
283 */
284 #define SET_INTERSECT(a, b, n)\
285 {\
286 register bpf_u_int32 *_x = a, *_y = b;\
287 register u_int _n = n;\
288 do *_x++ &= *_y++; while (--_n != 0);\
289 }
290
291 /*
292 * a := a - b
293 * n must be guaranteed to be > 0
294 */
295 #define SET_SUBTRACT(a, b, n)\
296 {\
297 register bpf_u_int32 *_x = a, *_y = b;\
298 register u_int _n = n;\
299 do *_x++ &=~ *_y++; while (--_n != 0);\
300 }
301
302 /*
303 * a := a union b
304 * n must be guaranteed to be > 0
305 */
306 #define SET_UNION(a, b, n)\
307 {\
308 register bpf_u_int32 *_x = a, *_y = b;\
309 register u_int _n = n;\
310 do *_x++ |= *_y++; while (--_n != 0);\
311 }
312
313 uset all_dom_sets;
314 uset all_closure_sets;
315 uset all_edge_sets;
316
317 #define MODULUS 213
318 struct valnode *hashtbl[MODULUS];
319 bpf_u_int32 curval;
320 bpf_u_int32 maxval;
321
322 struct vmapinfo *vmap;
323 struct valnode *vnode_base;
324 struct valnode *next_vnode;
325 } opt_state_t;
326
327 typedef struct {
328 /*
329 * Place to longjmp to on an error.
330 */
331 jmp_buf top_ctx;
332
333 /*
334 * The buffer into which to put error message.
335 */
336 char *errbuf;
337
338 /*
339 * Some pointers used to convert the basic block form of the code,
340 * into the array form that BPF requires. 'fstart' will point to
341 * the malloc'd array while 'ftail' is used during the recursive
342 * traversal.
343 */
344 struct bpf_insn *fstart;
345 struct bpf_insn *ftail;
346 } conv_state_t;
347
348 static void opt_init(opt_state_t *, struct icode *);
349 static void opt_cleanup(opt_state_t *);
350 static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
351 PCAP_PRINTFLIKE(2, 3);
352
353 static void intern_blocks(opt_state_t *, struct icode *);
354
355 static void find_inedges(opt_state_t *, struct block *);
356 #ifdef BDEBUG
357 static void opt_dump(opt_state_t *, struct icode *);
358 #endif
359
360 #ifndef MAX
361 #define MAX(a,b) ((a)>(b)?(a):(b))
362 #endif
363
364 static void
365 find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
366 {
367 int level;
368
369 if (isMarked(ic, b))
370 return;
371
372 Mark(ic, b);
373 b->link = 0;
374
375 if (JT(b)) {
376 find_levels_r(opt_state, ic, JT(b));
377 find_levels_r(opt_state, ic, JF(b));
378 level = MAX(JT(b)->level, JF(b)->level) + 1;
379 } else
380 level = 0;
381 b->level = level;
382 b->link = opt_state->levels[level];
383 opt_state->levels[level] = b;
384 }
385
386 /*
387 * Level graph. The levels go from 0 at the leaves to
388 * N_LEVELS at the root. The opt_state->levels[] array points to the
389 * first node of the level list, whose elements are linked
390 * with the 'link' field of the struct block.
391 */
392 static void
393 find_levels(opt_state_t *opt_state, struct icode *ic)
394 {
395 memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
396 unMarkAll(ic);
397 find_levels_r(opt_state, ic, ic->root);
398 }
399
400 /*
401 * Find dominator relationships.
402 * Assumes graph has been leveled.
403 */
404 static void
405 find_dom(opt_state_t *opt_state, struct block *root)
406 {
407 u_int i;
408 int level;
409 struct block *b;
410 bpf_u_int32 *x;
411
412 /*
413 * Initialize sets to contain all nodes.
414 */
415 x = opt_state->all_dom_sets;
416 /*
417 * In opt_init(), we've made sure the product doesn't overflow.
418 */
419 i = opt_state->n_blocks * opt_state->nodewords;
420 while (i != 0) {
421 --i;
422 *x++ = 0xFFFFFFFFU;
423 }
424 /* Root starts off empty. */
425 for (i = opt_state->nodewords; i != 0;) {
426 --i;
427 root->dom[i] = 0;
428 }
429
430 /* root->level is the highest level no found. */
431 for (level = root->level; level >= 0; --level) {
432 for (b = opt_state->levels[level]; b; b = b->link) {
433 SET_INSERT(b->dom, b->id);
434 if (JT(b) == 0)
435 continue;
436 SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
437 SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
438 }
439 }
440 }
441
442 static void
443 propedom(opt_state_t *opt_state, struct edge *ep)
444 {
445 SET_INSERT(ep->edom, ep->id);
446 if (ep->succ) {
447 SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
448 SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
449 }
450 }
451
452 /*
453 * Compute edge dominators.
454 * Assumes graph has been leveled and predecessors established.
455 */
456 static void
457 find_edom(opt_state_t *opt_state, struct block *root)
458 {
459 u_int i;
460 uset x;
461 int level;
462 struct block *b;
463
464 x = opt_state->all_edge_sets;
465 /*
466 * In opt_init(), we've made sure the product doesn't overflow.
467 */
468 for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
469 --i;
470 x[i] = 0xFFFFFFFFU;
471 }
472
473 /* root->level is the highest level no found. */
474 memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
475 memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
476 for (level = root->level; level >= 0; --level) {
477 for (b = opt_state->levels[level]; b != 0; b = b->link) {
478 propedom(opt_state, &b->et);
479 propedom(opt_state, &b->ef);
480 }
481 }
482 }
483
484 /*
485 * Find the backwards transitive closure of the flow graph. These sets
486 * are backwards in the sense that we find the set of nodes that reach
487 * a given node, not the set of nodes that can be reached by a node.
488 *
489 * Assumes graph has been leveled.
490 */
491 static void
492 find_closure(opt_state_t *opt_state, struct block *root)
493 {
494 int level;
495 struct block *b;
496
497 /*
498 * Initialize sets to contain no nodes.
499 */
500 memset((char *)opt_state->all_closure_sets, 0,
501 opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
502
503 /* root->level is the highest level no found. */
504 for (level = root->level; level >= 0; --level) {
505 for (b = opt_state->levels[level]; b; b = b->link) {
506 SET_INSERT(b->closure, b->id);
507 if (JT(b) == 0)
508 continue;
509 SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
510 SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
511 }
512 }
513 }
514
515 /*
516 * Return the register number that is used by s.
517 *
518 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
519 * are used, the scratch memory location's number if a scratch memory
520 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
521 *
522 * The implementation should probably change to an array access.
523 */
524 static int
525 atomuse(struct stmt *s)
526 {
527 register int c = s->code;
528
529 if (c == NOP)
530 return -1;
531
532 switch (BPF_CLASS(c)) {
533
534 case BPF_RET:
535 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
536 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
537
538 case BPF_LD:
539 case BPF_LDX:
540 /*
541 * As there are fewer than 2^31 memory locations,
542 * s->k should be convertible to int without problems.
543 */
544 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
545 (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
546
547 case BPF_ST:
548 return A_ATOM;
549
550 case BPF_STX:
551 return X_ATOM;
552
553 case BPF_JMP:
554 case BPF_ALU:
555 if (BPF_SRC(c) == BPF_X)
556 return AX_ATOM;
557 return A_ATOM;
558
559 case BPF_MISC:
560 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
561 }
562 abort();
563 /* NOTREACHED */
564 }
565
566 /*
567 * Return the register number that is defined by 's'. We assume that
568 * a single stmt cannot define more than one register. If no register
569 * is defined, return -1.
570 *
571 * The implementation should probably change to an array access.
572 */
573 static int
574 atomdef(struct stmt *s)
575 {
576 if (s->code == NOP)
577 return -1;
578
579 switch (BPF_CLASS(s->code)) {
580
581 case BPF_LD:
582 case BPF_ALU:
583 return A_ATOM;
584
585 case BPF_LDX:
586 return X_ATOM;
587
588 case BPF_ST:
589 case BPF_STX:
590 return s->k;
591
592 case BPF_MISC:
593 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
594 }
595 return -1;
596 }
597
598 /*
599 * Compute the sets of registers used, defined, and killed by 'b'.
600 *
601 * "Used" means that a statement in 'b' uses the register before any
602 * statement in 'b' defines it, i.e. it uses the value left in
603 * that register by a predecessor block of this block.
604 * "Defined" means that a statement in 'b' defines it.
605 * "Killed" means that a statement in 'b' defines it before any
606 * statement in 'b' uses it, i.e. it kills the value left in that
607 * register by a predecessor block of this block.
608 */
609 static void
610 compute_local_ud(struct block *b)
611 {
612 struct slist *s;
613 atomset def = 0, use = 0, killed = 0;
614 int atom;
615
616 for (s = b->stmts; s; s = s->next) {
617 if (s->s.code == NOP)
618 continue;
619 atom = atomuse(&s->s);
620 if (atom >= 0) {
621 if (atom == AX_ATOM) {
622 if (!ATOMELEM(def, X_ATOM))
623 use |= ATOMMASK(X_ATOM);
624 if (!ATOMELEM(def, A_ATOM))
625 use |= ATOMMASK(A_ATOM);
626 }
627 else if (atom < N_ATOMS) {
628 if (!ATOMELEM(def, atom))
629 use |= ATOMMASK(atom);
630 }
631 else
632 abort();
633 }
634 atom = atomdef(&s->s);
635 if (atom >= 0) {
636 if (!ATOMELEM(use, atom))
637 killed |= ATOMMASK(atom);
638 def |= ATOMMASK(atom);
639 }
640 }
641 if (BPF_CLASS(b->s.code) == BPF_JMP) {
642 /*
643 * XXX - what about RET?
644 */
645 atom = atomuse(&b->s);
646 if (atom >= 0) {
647 if (atom == AX_ATOM) {
648 if (!ATOMELEM(def, X_ATOM))
649 use |= ATOMMASK(X_ATOM);
650 if (!ATOMELEM(def, A_ATOM))
651 use |= ATOMMASK(A_ATOM);
652 }
653 else if (atom < N_ATOMS) {
654 if (!ATOMELEM(def, atom))
655 use |= ATOMMASK(atom);
656 }
657 else
658 abort();
659 }
660 }
661
662 b->def = def;
663 b->kill = killed;
664 b->in_use = use;
665 }
666
667 /*
668 * Assume graph is already leveled.
669 */
670 static void
671 find_ud(opt_state_t *opt_state, struct block *root)
672 {
673 int i, maxlevel;
674 struct block *p;
675
676 /*
677 * root->level is the highest level no found;
678 * count down from there.
679 */
680 maxlevel = root->level;
681 for (i = maxlevel; i >= 0; --i)
682 for (p = opt_state->levels[i]; p; p = p->link) {
683 compute_local_ud(p);
684 p->out_use = 0;
685 }
686
687 for (i = 1; i <= maxlevel; ++i) {
688 for (p = opt_state->levels[i]; p; p = p->link) {
689 p->out_use |= JT(p)->in_use | JF(p)->in_use;
690 p->in_use |= p->out_use &~ p->kill;
691 }
692 }
693 }
694 static void
695 init_val(opt_state_t *opt_state)
696 {
697 opt_state->curval = 0;
698 opt_state->next_vnode = opt_state->vnode_base;
699 memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
700 memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
701 }
702
703 /*
704 * Because we really don't have an IR, this stuff is a little messy.
705 *
706 * This routine looks in the table of existing value number for a value
707 * with generated from an operation with the specified opcode and
708 * the specified values. If it finds it, it returns its value number,
709 * otherwise it makes a new entry in the table and returns the
710 * value number of that entry.
711 */
712 static bpf_u_int32
713 F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
714 {
715 u_int hash;
716 bpf_u_int32 val;
717 struct valnode *p;
718
719 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
720 hash %= MODULUS;
721
722 for (p = opt_state->hashtbl[hash]; p; p = p->next)
723 if (p->code == code && p->v0 == v0 && p->v1 == v1)
724 return p->val;
725
726 /*
727 * Not found. Allocate a new value, and assign it a new
728 * value number.
729 *
730 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
731 * increment it before using it as the new value number, which
732 * means we never assign VAL_UNKNOWN.
733 *
734 * XXX - unless we overflow, but we probably won't have 2^32-1
735 * values; we treat 32 bits as effectively infinite.
736 */
737 val = ++opt_state->curval;
738 if (BPF_MODE(code) == BPF_IMM &&
739 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
740 opt_state->vmap[val].const_val = v0;
741 opt_state->vmap[val].is_const = 1;
742 }
743 p = opt_state->next_vnode++;
744 p->val = val;
745 p->code = code;
746 p->v0 = v0;
747 p->v1 = v1;
748 p->next = opt_state->hashtbl[hash];
749 opt_state->hashtbl[hash] = p;
750
751 return val;
752 }
753
754 static inline void
755 vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
756 {
757 if (alter && newval != VAL_UNKNOWN && *valp == newval)
758 s->code = NOP;
759 else
760 *valp = newval;
761 }
762
763 /*
764 * Do constant-folding on binary operators.
765 * (Unary operators are handled elsewhere.)
766 */
767 static void
768 fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
769 {
770 bpf_u_int32 a, b;
771
772 a = opt_state->vmap[v0].const_val;
773 b = opt_state->vmap[v1].const_val;
774
775 switch (BPF_OP(s->code)) {
776 case BPF_ADD:
777 a += b;
778 break;
779
780 case BPF_SUB:
781 a -= b;
782 break;
783
784 case BPF_MUL:
785 a *= b;
786 break;
787
788 case BPF_DIV:
789 if (b == 0)
790 opt_error(opt_state, "division by zero");
791 a /= b;
792 break;
793
794 case BPF_MOD:
795 if (b == 0)
796 opt_error(opt_state, "modulus by zero");
797 a %= b;
798 break;
799
800 case BPF_AND:
801 a &= b;
802 break;
803
804 case BPF_OR:
805 a |= b;
806 break;
807
808 case BPF_XOR:
809 a ^= b;
810 break;
811
812 case BPF_LSH:
813 /*
814 * A left shift of more than the width of the type
815 * is undefined in C; we'll just treat it as shifting
816 * all the bits out.
817 *
818 * XXX - the BPF interpreter doesn't check for this,
819 * so its behavior is dependent on the behavior of
820 * the processor on which it's running. There are
821 * processors on which it shifts all the bits out
822 * and processors on which it does no shift.
823 */
824 if (b < 32)
825 a <<= b;
826 else
827 a = 0;
828 break;
829
830 case BPF_RSH:
831 /*
832 * A right shift of more than the width of the type
833 * is undefined in C; we'll just treat it as shifting
834 * all the bits out.
835 *
836 * XXX - the BPF interpreter doesn't check for this,
837 * so its behavior is dependent on the behavior of
838 * the processor on which it's running. There are
839 * processors on which it shifts all the bits out
840 * and processors on which it does no shift.
841 */
842 if (b < 32)
843 a >>= b;
844 else
845 a = 0;
846 break;
847
848 default:
849 abort();
850 }
851 s->k = a;
852 s->code = BPF_LD|BPF_IMM;
853 /*
854 * XXX - optimizer loop detection.
855 */
856 opt_state->non_branch_movement_performed = 1;
857 opt_state->done = 0;
858 }
859
860 static inline struct slist *
861 this_op(struct slist *s)
862 {
863 while (s != 0 && s->s.code == NOP)
864 s = s->next;
865 return s;
866 }
867
868 static void
869 opt_not(struct block *b)
870 {
871 struct block *tmp = JT(b);
872
873 JT(b) = JF(b);
874 JF(b) = tmp;
875 }
876
877 static void
878 opt_peep(opt_state_t *opt_state, struct block *b)
879 {
880 struct slist *s;
881 struct slist *next, *last;
882 bpf_u_int32 val;
883
884 s = b->stmts;
885 if (s == 0)
886 return;
887
888 last = s;
889 for (/*empty*/; /*empty*/; s = next) {
890 /*
891 * Skip over nops.
892 */
893 s = this_op(s);
894 if (s == 0)
895 break; /* nothing left in the block */
896
897 /*
898 * Find the next real instruction after that one
899 * (skipping nops).
900 */
901 next = this_op(s->next);
902 if (next == 0)
903 break; /* no next instruction */
904 last = next;
905
906 /*
907 * st M[k] --> st M[k]
908 * ldx M[k] tax
909 */
910 if (s->s.code == BPF_ST &&
911 next->s.code == (BPF_LDX|BPF_MEM) &&
912 s->s.k == next->s.k) {
913 /*
914 * XXX - optimizer loop detection.
915 */
916 opt_state->non_branch_movement_performed = 1;
917 opt_state->done = 0;
918 next->s.code = BPF_MISC|BPF_TAX;
919 }
920 /*
921 * ld #k --> ldx #k
922 * tax txa
923 */
924 if (s->s.code == (BPF_LD|BPF_IMM) &&
925 next->s.code == (BPF_MISC|BPF_TAX)) {
926 s->s.code = BPF_LDX|BPF_IMM;
927 next->s.code = BPF_MISC|BPF_TXA;
928 /*
929 * XXX - optimizer loop detection.
930 */
931 opt_state->non_branch_movement_performed = 1;
932 opt_state->done = 0;
933 }
934 /*
935 * This is an ugly special case, but it happens
936 * when you say tcp[k] or udp[k] where k is a constant.
937 */
938 if (s->s.code == (BPF_LD|BPF_IMM)) {
939 struct slist *add, *tax, *ild;
940
941 /*
942 * Check that X isn't used on exit from this
943 * block (which the optimizer might cause).
944 * We know the code generator won't generate
945 * any local dependencies.
946 */
947 if (ATOMELEM(b->out_use, X_ATOM))
948 continue;
949
950 /*
951 * Check that the instruction following the ldi
952 * is an addx, or it's an ldxms with an addx
953 * following it (with 0 or more nops between the
954 * ldxms and addx).
955 */
956 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
957 add = next;
958 else
959 add = this_op(next->next);
960 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
961 continue;
962
963 /*
964 * Check that a tax follows that (with 0 or more
965 * nops between them).
966 */
967 tax = this_op(add->next);
968 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
969 continue;
970
971 /*
972 * Check that an ild follows that (with 0 or more
973 * nops between them).
974 */
975 ild = this_op(tax->next);
976 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
977 BPF_MODE(ild->s.code) != BPF_IND)
978 continue;
979 /*
980 * We want to turn this sequence:
981 *
982 * (004) ldi #0x2 {s}
983 * (005) ldxms [14] {next} -- optional
984 * (006) addx {add}
985 * (007) tax {tax}
986 * (008) ild [x+0] {ild}
987 *
988 * into this sequence:
989 *
990 * (004) nop
991 * (005) ldxms [14]
992 * (006) nop
993 * (007) nop
994 * (008) ild [x+2]
995 *
996 * XXX We need to check that X is not
997 * subsequently used, because we want to change
998 * what'll be in it after this sequence.
999 *
1000 * We know we can eliminate the accumulator
1001 * modifications earlier in the sequence since
1002 * it is defined by the last stmt of this sequence
1003 * (i.e., the last statement of the sequence loads
1004 * a value into the accumulator, so we can eliminate
1005 * earlier operations on the accumulator).
1006 */
1007 ild->s.k += s->s.k;
1008 s->s.code = NOP;
1009 add->s.code = NOP;
1010 tax->s.code = NOP;
1011 /*
1012 * XXX - optimizer loop detection.
1013 */
1014 opt_state->non_branch_movement_performed = 1;
1015 opt_state->done = 0;
1016 }
1017 }
1018 /*
1019 * If the comparison at the end of a block is an equality
1020 * comparison against a constant, and nobody uses the value
1021 * we leave in the A register at the end of a block, and
1022 * the operation preceding the comparison is an arithmetic
1023 * operation, we can sometime optimize it away.
1024 */
1025 if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1026 !ATOMELEM(b->out_use, A_ATOM)) {
1027 /*
1028 * We can optimize away certain subtractions of the
1029 * X register.
1030 */
1031 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1032 val = b->val[X_ATOM];
1033 if (opt_state->vmap[val].is_const) {
1034 /*
1035 * If we have a subtract to do a comparison,
1036 * and the X register is a known constant,
1037 * we can merge this value into the
1038 * comparison:
1039 *
1040 * sub x -> nop
1041 * jeq #y jeq #(x+y)
1042 */
1043 b->s.k += opt_state->vmap[val].const_val;
1044 last->s.code = NOP;
1045 /*
1046 * XXX - optimizer loop detection.
1047 */
1048 opt_state->non_branch_movement_performed = 1;
1049 opt_state->done = 0;
1050 } else if (b->s.k == 0) {
1051 /*
1052 * If the X register isn't a constant,
1053 * and the comparison in the test is
1054 * against 0, we can compare with the
1055 * X register, instead:
1056 *
1057 * sub x -> nop
1058 * jeq #0 jeq x
1059 */
1060 last->s.code = NOP;
1061 b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1062 /*
1063 * XXX - optimizer loop detection.
1064 */
1065 opt_state->non_branch_movement_performed = 1;
1066 opt_state->done = 0;
1067 }
1068 }
1069 /*
1070 * Likewise, a constant subtract can be simplified:
1071 *
1072 * sub #x -> nop
1073 * jeq #y -> jeq #(x+y)
1074 */
1075 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1076 last->s.code = NOP;
1077 b->s.k += last->s.k;
1078 /*
1079 * XXX - optimizer loop detection.
1080 */
1081 opt_state->non_branch_movement_performed = 1;
1082 opt_state->done = 0;
1083 }
1084 /*
1085 * And, similarly, a constant AND can be simplified
1086 * if we're testing against 0, i.e.:
1087 *
1088 * and #k nop
1089 * jeq #0 -> jset #k
1090 */
1091 else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1092 b->s.k == 0) {
1093 b->s.k = last->s.k;
1094 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1095 last->s.code = NOP;
1096 /*
1097 * XXX - optimizer loop detection.
1098 */
1099 opt_state->non_branch_movement_performed = 1;
1100 opt_state->done = 0;
1101 opt_not(b);
1102 }
1103 }
1104 /*
1105 * jset #0 -> never
1106 * jset #ffffffff -> always
1107 */
1108 if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1109 if (b->s.k == 0)
1110 JT(b) = JF(b);
1111 if (b->s.k == 0xffffffffU)
1112 JF(b) = JT(b);
1113 }
1114 /*
1115 * If we're comparing against the index register, and the index
1116 * register is a known constant, we can just compare against that
1117 * constant.
1118 */
1119 val = b->val[X_ATOM];
1120 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1121 bpf_u_int32 v = opt_state->vmap[val].const_val;
1122 b->s.code &= ~BPF_X;
1123 b->s.k = v;
1124 }
1125 /*
1126 * If the accumulator is a known constant, we can compute the
1127 * comparison result.
1128 */
1129 val = b->val[A_ATOM];
1130 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1131 bpf_u_int32 v = opt_state->vmap[val].const_val;
1132 switch (BPF_OP(b->s.code)) {
1133
1134 case BPF_JEQ:
1135 v = v == b->s.k;
1136 break;
1137
1138 case BPF_JGT:
1139 v = v > b->s.k;
1140 break;
1141
1142 case BPF_JGE:
1143 v = v >= b->s.k;
1144 break;
1145
1146 case BPF_JSET:
1147 v &= b->s.k;
1148 break;
1149
1150 default:
1151 abort();
1152 }
1153 if (JF(b) != JT(b)) {
1154 /*
1155 * XXX - optimizer loop detection.
1156 */
1157 opt_state->non_branch_movement_performed = 1;
1158 opt_state->done = 0;
1159 }
1160 if (v)
1161 JF(b) = JT(b);
1162 else
1163 JT(b) = JF(b);
1164 }
1165 }
1166
1167 /*
1168 * Compute the symbolic value of expression of 's', and update
1169 * anything it defines in the value table 'val'. If 'alter' is true,
1170 * do various optimizations. This code would be cleaner if symbolic
1171 * evaluation and code transformations weren't folded together.
1172 */
1173 static void
1174 opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1175 {
1176 int op;
1177 bpf_u_int32 v;
1178
1179 switch (s->code) {
1180
1181 case BPF_LD|BPF_ABS|BPF_W:
1182 case BPF_LD|BPF_ABS|BPF_H:
1183 case BPF_LD|BPF_ABS|BPF_B:
1184 v = F(opt_state, s->code, s->k, 0L);
1185 vstore(s, &val[A_ATOM], v, alter);
1186 break;
1187
1188 case BPF_LD|BPF_IND|BPF_W:
1189 case BPF_LD|BPF_IND|BPF_H:
1190 case BPF_LD|BPF_IND|BPF_B:
1191 v = val[X_ATOM];
1192 if (alter && opt_state->vmap[v].is_const) {
1193 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1194 s->k += opt_state->vmap[v].const_val;
1195 v = F(opt_state, s->code, s->k, 0L);
1196 /*
1197 * XXX - optimizer loop detection.
1198 */
1199 opt_state->non_branch_movement_performed = 1;
1200 opt_state->done = 0;
1201 }
1202 else
1203 v = F(opt_state, s->code, s->k, v);
1204 vstore(s, &val[A_ATOM], v, alter);
1205 break;
1206
1207 case BPF_LD|BPF_LEN:
1208 v = F(opt_state, s->code, 0L, 0L);
1209 vstore(s, &val[A_ATOM], v, alter);
1210 break;
1211
1212 case BPF_LD|BPF_IMM:
1213 v = K(s->k);
1214 vstore(s, &val[A_ATOM], v, alter);
1215 break;
1216
1217 case BPF_LDX|BPF_IMM:
1218 v = K(s->k);
1219 vstore(s, &val[X_ATOM], v, alter);
1220 break;
1221
1222 case BPF_LDX|BPF_MSH|BPF_B:
1223 v = F(opt_state, s->code, s->k, 0L);
1224 vstore(s, &val[X_ATOM], v, alter);
1225 break;
1226
1227 case BPF_ALU|BPF_NEG:
1228 if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1229 s->code = BPF_LD|BPF_IMM;
1230 /*
1231 * Do this negation as unsigned arithmetic; that's
1232 * what modern BPF engines do, and it guarantees
1233 * that all possible values can be negated. (Yeah,
1234 * negating 0x80000000, the minimum signed 32-bit
1235 * two's-complement value, results in 0x80000000,
1236 * so it's still negative, but we *should* be doing
1237 * all unsigned arithmetic here, to match what
1238 * modern BPF engines do.)
1239 *
1240 * Express it as 0U - (unsigned value) so that we
1241 * don't get compiler warnings about negating an
1242 * unsigned value and don't get UBSan warnings
1243 * about the result of negating 0x80000000 being
1244 * undefined.
1245 */
1246 s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1247 val[A_ATOM] = K(s->k);
1248 }
1249 else
1250 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1251 break;
1252
1253 case BPF_ALU|BPF_ADD|BPF_K:
1254 case BPF_ALU|BPF_SUB|BPF_K:
1255 case BPF_ALU|BPF_MUL|BPF_K:
1256 case BPF_ALU|BPF_DIV|BPF_K:
1257 case BPF_ALU|BPF_MOD|BPF_K:
1258 case BPF_ALU|BPF_AND|BPF_K:
1259 case BPF_ALU|BPF_OR|BPF_K:
1260 case BPF_ALU|BPF_XOR|BPF_K:
1261 case BPF_ALU|BPF_LSH|BPF_K:
1262 case BPF_ALU|BPF_RSH|BPF_K:
1263 op = BPF_OP(s->code);
1264 if (alter) {
1265 if (s->k == 0) {
1266 /*
1267 * Optimize operations where the constant
1268 * is zero.
1269 *
1270 * Don't optimize away "sub #0"
1271 * as it may be needed later to
1272 * fixup the generated math code.
1273 *
1274 * Fail if we're dividing by zero or taking
1275 * a modulus by zero.
1276 */
1277 if (op == BPF_ADD ||
1278 op == BPF_LSH || op == BPF_RSH ||
1279 op == BPF_OR || op == BPF_XOR) {
1280 s->code = NOP;
1281 break;
1282 }
1283 if (op == BPF_MUL || op == BPF_AND) {
1284 s->code = BPF_LD|BPF_IMM;
1285 val[A_ATOM] = K(s->k);
1286 break;
1287 }
1288 if (op == BPF_DIV)
1289 opt_error(opt_state,
1290 "division by zero");
1291 if (op == BPF_MOD)
1292 opt_error(opt_state,
1293 "modulus by zero");
1294 }
1295 if (opt_state->vmap[val[A_ATOM]].is_const) {
1296 fold_op(opt_state, s, val[A_ATOM], K(s->k));
1297 val[A_ATOM] = K(s->k);
1298 break;
1299 }
1300 }
1301 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1302 break;
1303
1304 case BPF_ALU|BPF_ADD|BPF_X:
1305 case BPF_ALU|BPF_SUB|BPF_X:
1306 case BPF_ALU|BPF_MUL|BPF_X:
1307 case BPF_ALU|BPF_DIV|BPF_X:
1308 case BPF_ALU|BPF_MOD|BPF_X:
1309 case BPF_ALU|BPF_AND|BPF_X:
1310 case BPF_ALU|BPF_OR|BPF_X:
1311 case BPF_ALU|BPF_XOR|BPF_X:
1312 case BPF_ALU|BPF_LSH|BPF_X:
1313 case BPF_ALU|BPF_RSH|BPF_X:
1314 op = BPF_OP(s->code);
1315 if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1316 if (opt_state->vmap[val[A_ATOM]].is_const) {
1317 fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1318 val[A_ATOM] = K(s->k);
1319 }
1320 else {
1321 s->code = BPF_ALU|BPF_K|op;
1322 s->k = opt_state->vmap[val[X_ATOM]].const_val;
1323 if ((op == BPF_LSH || op == BPF_RSH) &&
1324 s->k > 31)
1325 opt_error(opt_state,
1326 "shift by more than 31 bits");
1327 /*
1328 * XXX - optimizer loop detection.
1329 */
1330 opt_state->non_branch_movement_performed = 1;
1331 opt_state->done = 0;
1332 val[A_ATOM] =
1333 F(opt_state, s->code, val[A_ATOM], K(s->k));
1334 }
1335 break;
1336 }
1337 /*
1338 * Check if we're doing something to an accumulator
1339 * that is 0, and simplify. This may not seem like
1340 * much of a simplification but it could open up further
1341 * optimizations.
1342 * XXX We could also check for mul by 1, etc.
1343 */
1344 if (alter && opt_state->vmap[val[A_ATOM]].is_const
1345 && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1346 if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1347 s->code = BPF_MISC|BPF_TXA;
1348 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1349 break;
1350 }
1351 else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1352 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1353 s->code = BPF_LD|BPF_IMM;
1354 s->k = 0;
1355 vstore(s, &val[A_ATOM], K(s->k), alter);
1356 break;
1357 }
1358 else if (op == BPF_NEG) {
1359 s->code = NOP;
1360 break;
1361 }
1362 }
1363 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1364 break;
1365
1366 case BPF_MISC|BPF_TXA:
1367 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1368 break;
1369
1370 case BPF_LD|BPF_MEM:
1371 v = val[s->k];
1372 if (alter && opt_state->vmap[v].is_const) {
1373 s->code = BPF_LD|BPF_IMM;
1374 s->k = opt_state->vmap[v].const_val;
1375 /*
1376 * XXX - optimizer loop detection.
1377 */
1378 opt_state->non_branch_movement_performed = 1;
1379 opt_state->done = 0;
1380 }
1381 vstore(s, &val[A_ATOM], v, alter);
1382 break;
1383
1384 case BPF_MISC|BPF_TAX:
1385 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1386 break;
1387
1388 case BPF_LDX|BPF_MEM:
1389 v = val[s->k];
1390 if (alter && opt_state->vmap[v].is_const) {
1391 s->code = BPF_LDX|BPF_IMM;
1392 s->k = opt_state->vmap[v].const_val;
1393 /*
1394 * XXX - optimizer loop detection.
1395 */
1396 opt_state->non_branch_movement_performed = 1;
1397 opt_state->done = 0;
1398 }
1399 vstore(s, &val[X_ATOM], v, alter);
1400 break;
1401
1402 case BPF_ST:
1403 vstore(s, &val[s->k], val[A_ATOM], alter);
1404 break;
1405
1406 case BPF_STX:
1407 vstore(s, &val[s->k], val[X_ATOM], alter);
1408 break;
1409 }
1410 }
1411
1412 static void
1413 deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1414 {
1415 register int atom;
1416
1417 atom = atomuse(s);
1418 if (atom >= 0) {
1419 if (atom == AX_ATOM) {
1420 last[X_ATOM] = 0;
1421 last[A_ATOM] = 0;
1422 }
1423 else
1424 last[atom] = 0;
1425 }
1426 atom = atomdef(s);
1427 if (atom >= 0) {
1428 if (last[atom]) {
1429 /*
1430 * XXX - optimizer loop detection.
1431 */
1432 opt_state->non_branch_movement_performed = 1;
1433 opt_state->done = 0;
1434 last[atom]->code = NOP;
1435 }
1436 last[atom] = s;
1437 }
1438 }
1439
1440 static void
1441 opt_deadstores(opt_state_t *opt_state, register struct block *b)
1442 {
1443 register struct slist *s;
1444 register int atom;
1445 struct stmt *last[N_ATOMS];
1446
1447 memset((char *)last, 0, sizeof last);
1448
1449 for (s = b->stmts; s != 0; s = s->next)
1450 deadstmt(opt_state, &s->s, last);
1451 deadstmt(opt_state, &b->s, last);
1452
1453 for (atom = 0; atom < N_ATOMS; ++atom)
1454 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1455 last[atom]->code = NOP;
1456 /*
1457 * XXX - optimizer loop detection.
1458 */
1459 opt_state->non_branch_movement_performed = 1;
1460 opt_state->done = 0;
1461 vstore(0, &b->val[atom], VAL_UNKNOWN, 0);
1462 }
1463 }
1464
1465 static void
1466 opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1467 {
1468 struct slist *s;
1469 struct edge *p;
1470 int i;
1471 bpf_u_int32 aval, xval;
1472
1473 #if 0
1474 for (s = b->stmts; s && s->next; s = s->next)
1475 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1476 do_stmts = 0;
1477 break;
1478 }
1479 #endif
1480
1481 /*
1482 * Initialize the atom values.
1483 */
1484 p = b->in_edges;
1485 if (p == 0) {
1486 /*
1487 * We have no predecessors, so everything is undefined
1488 * upon entry to this block.
1489 */
1490 memset((char *)b->val, 0, sizeof(b->val));
1491 } else {
1492 /*
1493 * Inherit values from our predecessors.
1494 *
1495 * First, get the values from the predecessor along the
1496 * first edge leading to this node.
1497 */
1498 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1499 /*
1500 * Now look at all the other nodes leading to this node.
1501 * If, for the predecessor along that edge, a register
1502 * has a different value from the one we have (i.e.,
1503 * control paths are merging, and the merging paths
1504 * assign different values to that register), give the
1505 * register the undefined value of 0.
1506 */
1507 while ((p = p->next) != NULL) {
1508 for (i = 0; i < N_ATOMS; ++i)
1509 if (b->val[i] != p->pred->val[i])
1510 b->val[i] = 0;
1511 }
1512 }
1513 aval = b->val[A_ATOM];
1514 xval = b->val[X_ATOM];
1515 for (s = b->stmts; s; s = s->next)
1516 opt_stmt(opt_state, &s->s, b->val, do_stmts);
1517
1518 /*
1519 * This is a special case: if we don't use anything from this
1520 * block, and we load the accumulator or index register with a
1521 * value that is already there, or if this block is a return,
1522 * eliminate all the statements.
1523 *
1524 * XXX - what if it does a store? Presumably that falls under
1525 * the heading of "if we don't use anything from this block",
1526 * i.e., if we use any memory location set to a different
1527 * value by this block, then we use something from this block.
1528 *
1529 * XXX - why does it matter whether we use anything from this
1530 * block? If the accumulator or index register doesn't change
1531 * its value, isn't that OK even if we use that value?
1532 *
1533 * XXX - if we load the accumulator with a different value,
1534 * and the block ends with a conditional branch, we obviously
1535 * can't eliminate it, as the branch depends on that value.
1536 * For the index register, the conditional branch only depends
1537 * on the index register value if the test is against the index
1538 * register value rather than a constant; if nothing uses the
1539 * value we put into the index register, and we're not testing
1540 * against the index register's value, and there aren't any
1541 * other problems that would keep us from eliminating this
1542 * block, can we eliminate it?
1543 */
1544 if (do_stmts &&
1545 ((b->out_use == 0 &&
1546 aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1547 xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1548 BPF_CLASS(b->s.code) == BPF_RET)) {
1549 if (b->stmts != 0) {
1550 b->stmts = 0;
1551 /*
1552 * XXX - optimizer loop detection.
1553 */
1554 opt_state->non_branch_movement_performed = 1;
1555 opt_state->done = 0;
1556 }
1557 } else {
1558 opt_peep(opt_state, b);
1559 opt_deadstores(opt_state, b);
1560 }
1561 /*
1562 * Set up values for branch optimizer.
1563 */
1564 if (BPF_SRC(b->s.code) == BPF_K)
1565 b->oval = K(b->s.k);
1566 else
1567 b->oval = b->val[X_ATOM];
1568 b->et.code = b->s.code;
1569 b->ef.code = -b->s.code;
1570 }
1571
1572 /*
1573 * Return true if any register that is used on exit from 'succ', has
1574 * an exit value that is different from the corresponding exit value
1575 * from 'b'.
1576 */
1577 static int
1578 use_conflict(struct block *b, struct block *succ)
1579 {
1580 int atom;
1581 atomset use = succ->out_use;
1582
1583 if (use == 0)
1584 return 0;
1585
1586 for (atom = 0; atom < N_ATOMS; ++atom)
1587 if (ATOMELEM(use, atom))
1588 if (b->val[atom] != succ->val[atom])
1589 return 1;
1590 return 0;
1591 }
1592
1593 /*
1594 * Given a block that is the successor of an edge, and an edge that
1595 * dominates that edge, return either a pointer to a child of that
1596 * block (a block to which that block jumps) if that block is a
1597 * candidate to replace the successor of the latter edge or NULL
1598 * if neither of the children of the first block are candidates.
1599 */
1600 static struct block *
1601 fold_edge(struct block *child, struct edge *ep)
1602 {
1603 int sense;
1604 bpf_u_int32 aval0, aval1, oval0, oval1;
1605 int code = ep->code;
1606
1607 if (code < 0) {
1608 /*
1609 * This edge is a "branch if false" edge.
1610 */
1611 code = -code;
1612 sense = 0;
1613 } else {
1614 /*
1615 * This edge is a "branch if true" edge.
1616 */
1617 sense = 1;
1618 }
1619
1620 /*
1621 * If the opcode for the branch at the end of the block we
1622 * were handed isn't the same as the opcode for the branch
1623 * to which the edge we were handed corresponds, the tests
1624 * for those branches aren't testing the same conditions,
1625 * so the blocks to which the first block branches aren't
1626 * candidates to replace the successor of the edge.
1627 */
1628 if (child->s.code != code)
1629 return 0;
1630
1631 aval0 = child->val[A_ATOM];
1632 oval0 = child->oval;
1633 aval1 = ep->pred->val[A_ATOM];
1634 oval1 = ep->pred->oval;
1635
1636 /*
1637 * If the A register value on exit from the successor block
1638 * isn't the same as the A register value on exit from the
1639 * predecessor of the edge, the blocks to which the first
1640 * block branches aren't candidates to replace the successor
1641 * of the edge.
1642 */
1643 if (aval0 != aval1)
1644 return 0;
1645
1646 if (oval0 == oval1)
1647 /*
1648 * The operands of the branch instructions are
1649 * identical, so the branches are testing the
1650 * same condition, and the result is true if a true
1651 * branch was taken to get here, otherwise false.
1652 */
1653 return sense ? JT(child) : JF(child);
1654
1655 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1656 /*
1657 * At this point, we only know the comparison if we
1658 * came down the true branch, and it was an equality
1659 * comparison with a constant.
1660 *
1661 * I.e., if we came down the true branch, and the branch
1662 * was an equality comparison with a constant, we know the
1663 * accumulator contains that constant. If we came down
1664 * the false branch, or the comparison wasn't with a
1665 * constant, we don't know what was in the accumulator.
1666 *
1667 * We rely on the fact that distinct constants have distinct
1668 * value numbers.
1669 */
1670 return JF(child);
1671
1672 return 0;
1673 }
1674
1675 /*
1676 * If we can make this edge go directly to a child of the edge's current
1677 * successor, do so.
1678 */
1679 static void
1680 opt_j(opt_state_t *opt_state, struct edge *ep)
1681 {
1682 register u_int i, k;
1683 register struct block *target;
1684
1685 /*
1686 * Does this edge go to a block where, if the test
1687 * at the end of it succeeds, it goes to a block
1688 * that's a leaf node of the DAG, i.e. a return
1689 * statement?
1690 * If so, there's nothing to optimize.
1691 */
1692 if (JT(ep->succ) == 0)
1693 return;
1694
1695 /*
1696 * Does this edge go to a block that goes, in turn, to
1697 * the same block regardless of whether the test at the
1698 * end succeeds or fails?
1699 */
1700 if (JT(ep->succ) == JF(ep->succ)) {
1701 /*
1702 * Common branch targets can be eliminated, provided
1703 * there is no data dependency.
1704 *
1705 * Check whether any register used on exit from the
1706 * block to which the successor of this edge goes
1707 * has a value at that point that's different from
1708 * the value it has on exit from the predecessor of
1709 * this edge. If not, the predecessor of this edge
1710 * can just go to the block to which the successor
1711 * of this edge goes, bypassing the successor of this
1712 * edge, as the successor of this edge isn't doing
1713 * any calculations whose results are different
1714 * from what the blocks before it did and isn't
1715 * doing any tests the results of which matter.
1716 */
1717 if (!use_conflict(ep->pred, JT(ep->succ))) {
1718 /*
1719 * No, there isn't.
1720 * Make this edge go to the block to
1721 * which the successor of that edge
1722 * goes.
1723 *
1724 * XXX - optimizer loop detection.
1725 */
1726 opt_state->non_branch_movement_performed = 1;
1727 opt_state->done = 0;
1728 ep->succ = JT(ep->succ);
1729 }
1730 }
1731 /*
1732 * For each edge dominator that matches the successor of this
1733 * edge, promote the edge successor to the its grandchild.
1734 *
1735 * XXX We violate the set abstraction here in favor a reasonably
1736 * efficient loop.
1737 */
1738 top:
1739 for (i = 0; i < opt_state->edgewords; ++i) {
1740 /* i'th word in the bitset of dominators */
1741 register bpf_u_int32 x = ep->edom[i];
1742
1743 while (x != 0) {
1744 /* Find the next dominator in that word and mark it as found */
1745 k = lowest_set_bit(x);
1746 x &=~ ((bpf_u_int32)1 << k);
1747 k += i * BITS_PER_WORD;
1748
1749 target = fold_edge(ep->succ, opt_state->edges[k]);
1750 /*
1751 * We have a candidate to replace the successor
1752 * of ep.
1753 *
1754 * Check that there is no data dependency between
1755 * nodes that will be violated if we move the edge;
1756 * i.e., if any register used on exit from the
1757 * candidate has a value at that point different
1758 * from the value it has when we exit the
1759 * predecessor of that edge, there's a data
1760 * dependency that will be violated.
1761 */
1762 if (target != 0 && !use_conflict(ep->pred, target)) {
1763 /*
1764 * It's safe to replace the successor of
1765 * ep; do so, and note that we've made
1766 * at least one change.
1767 *
1768 * XXX - this is one of the operations that
1769 * happens when the optimizer gets into
1770 * one of those infinite loops.
1771 */
1772 opt_state->done = 0;
1773 ep->succ = target;
1774 if (JT(target) != 0)
1775 /*
1776 * Start over unless we hit a leaf.
1777 */
1778 goto top;
1779 return;
1780 }
1781 }
1782 }
1783 }
1784
1785 /*
1786 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1787 * "Predicate Assertion Propagation" in the BPF+ paper?
1788 *
1789 * Note that this looks at block dominators, not edge dominators.
1790 * Don't think so.
1791 *
1792 * "A or B" compiles into
1793 *
1794 * A
1795 * t / \ f
1796 * / B
1797 * / t / \ f
1798 * \ /
1799 * \ /
1800 * X
1801 *
1802 *
1803 */
1804 static void
1805 or_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1806 {
1807 bpf_u_int32 val;
1808 int at_top;
1809 struct block *pull;
1810 struct block **diffp, **samep;
1811 struct edge *ep;
1812
1813 ep = b->in_edges;
1814 if (ep == 0)
1815 return;
1816
1817 /*
1818 * Make sure each predecessor loads the same value.
1819 * XXX why?
1820 */
1821 val = ep->pred->val[A_ATOM];
1822 for (ep = ep->next; ep != 0; ep = ep->next)
1823 if (val != ep->pred->val[A_ATOM])
1824 return;
1825
1826 /*
1827 * For the first edge in the list of edges coming into this block,
1828 * see whether the predecessor of that edge comes here via a true
1829 * branch or a false branch.
1830 */
1831 if (JT(b->in_edges->pred) == b)
1832 diffp = &JT(b->in_edges->pred); /* jt */
1833 else
1834 diffp = &JF(b->in_edges->pred); /* jf */
1835
1836 /*
1837 * diffp is a pointer to a pointer to the block.
1838 *
1839 * Go down the false chain looking as far as you can,
1840 * making sure that each jump-compare is doing the
1841 * same as the original block.
1842 *
1843 * If you reach the bottom before you reach a
1844 * different jump-compare, just exit. There's nothing
1845 * to do here. XXX - no, this version is checking for
1846 * the value leaving the block; that's from the BPF+
1847 * pullup routine.
1848 */
1849 at_top = 1;
1850 for (;;) {
1851 /*
1852 * Done if that's not going anywhere XXX
1853 */
1854 if (*diffp == 0)
1855 return;
1856
1857 /*
1858 * Done if that predecessor blah blah blah isn't
1859 * going the same place we're going XXX
1860 *
1861 * Does the true edge of this block point to the same
1862 * location as the true edge of b?
1863 */
1864 if (JT(*diffp) != JT(b))
1865 return;
1866
1867 /*
1868 * Done if this node isn't a dominator of that
1869 * node blah blah blah XXX
1870 *
1871 * Does b dominate diffp?
1872 */
1873 if (!SET_MEMBER((*diffp)->dom, b->id))
1874 return;
1875
1876 /*
1877 * Break out of the loop if that node's value of A
1878 * isn't the value of A above XXX
1879 */
1880 if ((*diffp)->val[A_ATOM] != val)
1881 break;
1882
1883 /*
1884 * Get the JF for that node XXX
1885 * Go down the false path.
1886 */
1887 diffp = &JF(*diffp);
1888 at_top = 0;
1889 }
1890
1891 /*
1892 * Now that we've found a different jump-compare in a chain
1893 * below b, search further down until we find another
1894 * jump-compare that looks at the original value. This
1895 * jump-compare should get pulled up. XXX again we're
1896 * comparing values not jump-compares.
1897 */
1898 samep = &JF(*diffp);
1899 for (;;) {
1900 /*
1901 * Done if that's not going anywhere XXX
1902 */
1903 if (*samep == 0)
1904 return;
1905
1906 /*
1907 * Done if that predecessor blah blah blah isn't
1908 * going the same place we're going XXX
1909 */
1910 if (JT(*samep) != JT(b))
1911 return;
1912
1913 /*
1914 * Done if this node isn't a dominator of that
1915 * node blah blah blah XXX
1916 *
1917 * Does b dominate samep?
1918 */
1919 if (!SET_MEMBER((*samep)->dom, b->id))
1920 return;
1921
1922 /*
1923 * Break out of the loop if that node's value of A
1924 * is the value of A above XXX
1925 */
1926 if ((*samep)->val[A_ATOM] == val)
1927 break;
1928
1929 /* XXX Need to check that there are no data dependencies
1930 between dp0 and dp1. Currently, the code generator
1931 will not produce such dependencies. */
1932 samep = &JF(*samep);
1933 }
1934 #ifdef notdef
1935 /* XXX This doesn't cover everything. */
1936 for (i = 0; i < N_ATOMS; ++i)
1937 if ((*samep)->val[i] != pred->val[i])
1938 return;
1939 #endif
1940 /* Pull up the node. */
1941 pull = *samep;
1942 *samep = JF(pull);
1943 JF(pull) = *diffp;
1944
1945 /*
1946 * At the top of the chain, each predecessor needs to point at the
1947 * pulled up node. Inside the chain, there is only one predecessor
1948 * to worry about.
1949 */
1950 if (at_top) {
1951 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1952 if (JT(ep->pred) == b)
1953 JT(ep->pred) = pull;
1954 else
1955 JF(ep->pred) = pull;
1956 }
1957 }
1958 else
1959 *diffp = pull;
1960
1961 /*
1962 * XXX - this is one of the operations that happens when the
1963 * optimizer gets into one of those infinite loops.
1964 */
1965 opt_state->done = 0;
1966
1967 /*
1968 * Recompute dominator sets as control flow graph has changed.
1969 */
1970 find_dom(opt_state, root);
1971 }
1972
1973 static void
1974 and_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1975 {
1976 bpf_u_int32 val;
1977 int at_top;
1978 struct block *pull;
1979 struct block **diffp, **samep;
1980 struct edge *ep;
1981
1982 ep = b->in_edges;
1983 if (ep == 0)
1984 return;
1985
1986 /*
1987 * Make sure each predecessor loads the same value.
1988 */
1989 val = ep->pred->val[A_ATOM];
1990 for (ep = ep->next; ep != 0; ep = ep->next)
1991 if (val != ep->pred->val[A_ATOM])
1992 return;
1993
1994 if (JT(b->in_edges->pred) == b)
1995 diffp = &JT(b->in_edges->pred);
1996 else
1997 diffp = &JF(b->in_edges->pred);
1998
1999 at_top = 1;
2000 for (;;) {
2001 if (*diffp == 0)
2002 return;
2003
2004 if (JF(*diffp) != JF(b))
2005 return;
2006
2007 if (!SET_MEMBER((*diffp)->dom, b->id))
2008 return;
2009
2010 if ((*diffp)->val[A_ATOM] != val)
2011 break;
2012
2013 diffp = &JT(*diffp);
2014 at_top = 0;
2015 }
2016 samep = &JT(*diffp);
2017 for (;;) {
2018 if (*samep == 0)
2019 return;
2020
2021 if (JF(*samep) != JF(b))
2022 return;
2023
2024 if (!SET_MEMBER((*samep)->dom, b->id))
2025 return;
2026
2027 if ((*samep)->val[A_ATOM] == val)
2028 break;
2029
2030 /* XXX Need to check that there are no data dependencies
2031 between diffp and samep. Currently, the code generator
2032 will not produce such dependencies. */
2033 samep = &JT(*samep);
2034 }
2035 #ifdef notdef
2036 /* XXX This doesn't cover everything. */
2037 for (i = 0; i < N_ATOMS; ++i)
2038 if ((*samep)->val[i] != pred->val[i])
2039 return;
2040 #endif
2041 /* Pull up the node. */
2042 pull = *samep;
2043 *samep = JT(pull);
2044 JT(pull) = *diffp;
2045
2046 /*
2047 * At the top of the chain, each predecessor needs to point at the
2048 * pulled up node. Inside the chain, there is only one predecessor
2049 * to worry about.
2050 */
2051 if (at_top) {
2052 for (ep = b->in_edges; ep != 0; ep = ep->next) {
2053 if (JT(ep->pred) == b)
2054 JT(ep->pred) = pull;
2055 else
2056 JF(ep->pred) = pull;
2057 }
2058 }
2059 else
2060 *diffp = pull;
2061
2062 /*
2063 * XXX - this is one of the operations that happens when the
2064 * optimizer gets into one of those infinite loops.
2065 */
2066 opt_state->done = 0;
2067
2068 /*
2069 * Recompute dominator sets as control flow graph has changed.
2070 */
2071 find_dom(opt_state, root);
2072 }
2073
2074 static void
2075 opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2076 {
2077 int i, maxlevel;
2078 struct block *p;
2079
2080 init_val(opt_state);
2081 maxlevel = ic->root->level;
2082
2083 find_inedges(opt_state, ic->root);
2084 for (i = maxlevel; i >= 0; --i)
2085 for (p = opt_state->levels[i]; p; p = p->link)
2086 opt_blk(opt_state, p, do_stmts);
2087
2088 if (do_stmts)
2089 /*
2090 * No point trying to move branches; it can't possibly
2091 * make a difference at this point.
2092 *
2093 * XXX - this might be after we detect a loop where
2094 * we were just looping infinitely moving branches
2095 * in such a fashion that we went through two or more
2096 * versions of the machine code, eventually returning
2097 * to the first version. (We're really not doing a
2098 * full loop detection, we're just testing for two
2099 * passes in a row where we do nothing but
2100 * move branches.)
2101 */
2102 return;
2103
2104 /*
2105 * Is this what the BPF+ paper describes in sections 6.1.1,
2106 * 6.1.2, and 6.1.3?
2107 */
2108 for (i = 1; i <= maxlevel; ++i) {
2109 for (p = opt_state->levels[i]; p; p = p->link) {
2110 opt_j(opt_state, &p->et);
2111 opt_j(opt_state, &p->ef);
2112 }
2113 }
2114
2115 find_inedges(opt_state, ic->root);
2116 for (i = 1; i <= maxlevel; ++i) {
2117 for (p = opt_state->levels[i]; p; p = p->link) {
2118 or_pullup(opt_state, p, ic->root);
2119 and_pullup(opt_state, p, ic->root);
2120 }
2121 }
2122 }
2123
2124 static inline void
2125 link_inedge(struct edge *parent, struct block *child)
2126 {
2127 parent->next = child->in_edges;
2128 child->in_edges = parent;
2129 }
2130
2131 static void
2132 find_inedges(opt_state_t *opt_state, struct block *root)
2133 {
2134 u_int i;
2135 int level;
2136 struct block *b;
2137
2138 for (i = 0; i < opt_state->n_blocks; ++i)
2139 opt_state->blocks[i]->in_edges = 0;
2140
2141 /*
2142 * Traverse the graph, adding each edge to the predecessor
2143 * list of its successors. Skip the leaves (i.e. level 0).
2144 */
2145 for (level = root->level; level > 0; --level) {
2146 for (b = opt_state->levels[level]; b != 0; b = b->link) {
2147 link_inedge(&b->et, JT(b));
2148 link_inedge(&b->ef, JF(b));
2149 }
2150 }
2151 }
2152
2153 static void
2154 opt_root(struct block **b)
2155 {
2156 struct slist *tmp, *s;
2157
2158 s = (*b)->stmts;
2159 (*b)->stmts = 0;
2160 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2161 *b = JT(*b);
2162
2163 tmp = (*b)->stmts;
2164 if (tmp != 0)
2165 sappend(s, tmp);
2166 (*b)->stmts = s;
2167
2168 /*
2169 * If the root node is a return, then there is no
2170 * point executing any statements (since the bpf machine
2171 * has no side effects).
2172 */
2173 if (BPF_CLASS((*b)->s.code) == BPF_RET)
2174 (*b)->stmts = 0;
2175 }
2176
2177 static void
2178 opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2179 {
2180
2181 #ifdef BDEBUG
2182 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2183 printf("opt_loop(root, %d) begin\n", do_stmts);
2184 opt_dump(opt_state, ic);
2185 }
2186 #endif
2187
2188 /*
2189 * XXX - optimizer loop detection.
2190 */
2191 int loop_count = 0;
2192 for (;;) {
2193 opt_state->done = 1;
2194 /*
2195 * XXX - optimizer loop detection.
2196 */
2197 opt_state->non_branch_movement_performed = 0;
2198 find_levels(opt_state, ic);
2199 find_dom(opt_state, ic->root);
2200 find_closure(opt_state, ic->root);
2201 find_ud(opt_state, ic->root);
2202 find_edom(opt_state, ic->root);
2203 opt_blks(opt_state, ic, do_stmts);
2204 #ifdef BDEBUG
2205 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2206 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2207 opt_dump(opt_state, ic);
2208 }
2209 #endif
2210
2211 /*
2212 * Was anything done in this optimizer pass?
2213 */
2214 if (opt_state->done) {
2215 /*
2216 * No, so we've reached a fixed point.
2217 * We're done.
2218 */
2219 break;
2220 }
2221
2222 /*
2223 * XXX - was anything done other than branch movement
2224 * in this pass?
2225 */
2226 if (opt_state->non_branch_movement_performed) {
2227 /*
2228 * Yes. Clear any loop-detection counter;
2229 * we're making some form of progress (assuming
2230 * we can't get into a cycle doing *other*
2231 * optimizations...).
2232 */
2233 loop_count = 0;
2234 } else {
2235 /*
2236 * No - increment the counter, and quit if
2237 * it's up to 100.
2238 */
2239 loop_count++;
2240 if (loop_count >= 100) {
2241 /*
2242 * We've done nothing but branch movement
2243 * for 100 passes; we're probably
2244 * in a cycle and will never reach a
2245 * fixed point.
2246 *
2247 * XXX - yes, we really need a non-
2248 * heuristic way of detecting a cycle.
2249 */
2250 opt_state->done = 1;
2251 break;
2252 }
2253 }
2254 }
2255 }
2256
2257 /*
2258 * Optimize the filter code in its dag representation.
2259 * Return 0 on success, -1 on error.
2260 */
2261 int
2262 bpf_optimize(struct icode *ic, char *errbuf)
2263 {
2264 opt_state_t opt_state;
2265
2266 memset(&opt_state, 0, sizeof(opt_state));
2267 opt_state.errbuf = errbuf;
2268 opt_state.non_branch_movement_performed = 0;
2269 if (setjmp(opt_state.top_ctx)) {
2270 opt_cleanup(&opt_state);
2271 return -1;
2272 }
2273 opt_init(&opt_state, ic);
2274 opt_loop(&opt_state, ic, 0);
2275 opt_loop(&opt_state, ic, 1);
2276 intern_blocks(&opt_state, ic);
2277 #ifdef BDEBUG
2278 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2279 printf("after intern_blocks()\n");
2280 opt_dump(&opt_state, ic);
2281 }
2282 #endif
2283 opt_root(&ic->root);
2284 #ifdef BDEBUG
2285 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2286 printf("after opt_root()\n");
2287 opt_dump(&opt_state, ic);
2288 }
2289 #endif
2290 opt_cleanup(&opt_state);
2291 return 0;
2292 }
2293
2294 static void
2295 make_marks(struct icode *ic, struct block *p)
2296 {
2297 if (!isMarked(ic, p)) {
2298 Mark(ic, p);
2299 if (BPF_CLASS(p->s.code) != BPF_RET) {
2300 make_marks(ic, JT(p));
2301 make_marks(ic, JF(p));
2302 }
2303 }
2304 }
2305
2306 /*
2307 * Mark code array such that isMarked(ic->cur_mark, i) is true
2308 * only for nodes that are alive.
2309 */
2310 static void
2311 mark_code(struct icode *ic)
2312 {
2313 ic->cur_mark += 1;
2314 make_marks(ic, ic->root);
2315 }
2316
2317 /*
2318 * True iff the two stmt lists load the same value from the packet into
2319 * the accumulator.
2320 */
2321 static int
2322 eq_slist(struct slist *x, struct slist *y)
2323 {
2324 for (;;) {
2325 while (x && x->s.code == NOP)
2326 x = x->next;
2327 while (y && y->s.code == NOP)
2328 y = y->next;
2329 if (x == 0)
2330 return y == 0;
2331 if (y == 0)
2332 return x == 0;
2333 if (x->s.code != y->s.code || x->s.k != y->s.k)
2334 return 0;
2335 x = x->next;
2336 y = y->next;
2337 }
2338 }
2339
2340 static inline int
2341 eq_blk(struct block *b0, struct block *b1)
2342 {
2343 if (b0->s.code == b1->s.code &&
2344 b0->s.k == b1->s.k &&
2345 b0->et.succ == b1->et.succ &&
2346 b0->ef.succ == b1->ef.succ)
2347 return eq_slist(b0->stmts, b1->stmts);
2348 return 0;
2349 }
2350
2351 static void
2352 intern_blocks(opt_state_t *opt_state, struct icode *ic)
2353 {
2354 struct block *p;
2355 u_int i, j;
2356 int done1; /* don't shadow global */
2357 top:
2358 done1 = 1;
2359 for (i = 0; i < opt_state->n_blocks; ++i)
2360 opt_state->blocks[i]->link = 0;
2361
2362 mark_code(ic);
2363
2364 for (i = opt_state->n_blocks - 1; i != 0; ) {
2365 --i;
2366 if (!isMarked(ic, opt_state->blocks[i]))
2367 continue;
2368 for (j = i + 1; j < opt_state->n_blocks; ++j) {
2369 if (!isMarked(ic, opt_state->blocks[j]))
2370 continue;
2371 if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2372 opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2373 opt_state->blocks[j]->link : opt_state->blocks[j];
2374 break;
2375 }
2376 }
2377 }
2378 for (i = 0; i < opt_state->n_blocks; ++i) {
2379 p = opt_state->blocks[i];
2380 if (JT(p) == 0)
2381 continue;
2382 if (JT(p)->link) {
2383 done1 = 0;
2384 JT(p) = JT(p)->link;
2385 }
2386 if (JF(p)->link) {
2387 done1 = 0;
2388 JF(p) = JF(p)->link;
2389 }
2390 }
2391 if (!done1)
2392 goto top;
2393 }
2394
2395 static void
2396 opt_cleanup(opt_state_t *opt_state)
2397 {
2398 free((void *)opt_state->vnode_base);
2399 free((void *)opt_state->vmap);
2400 free((void *)opt_state->edges);
2401 free((void *)opt_state->space);
2402 free((void *)opt_state->levels);
2403 free((void *)opt_state->blocks);
2404 }
2405
2406 /*
2407 * For optimizer errors.
2408 */
2409 static void PCAP_NORETURN
2410 opt_error(opt_state_t *opt_state, const char *fmt, ...)
2411 {
2412 va_list ap;
2413
2414 if (opt_state->errbuf != NULL) {
2415 va_start(ap, fmt);
2416 (void)vsnprintf(opt_state->errbuf,
2417 PCAP_ERRBUF_SIZE, fmt, ap);
2418 va_end(ap);
2419 }
2420 longjmp(opt_state->top_ctx, 1);
2421 /* NOTREACHED */
2422 #ifdef _AIX
2423 PCAP_UNREACHABLE
2424 #endif /* _AIX */
2425 }
2426
2427 /*
2428 * Return the number of stmts in 's'.
2429 */
2430 static u_int
2431 slength(struct slist *s)
2432 {
2433 u_int n = 0;
2434
2435 for (; s; s = s->next)
2436 if (s->s.code != NOP)
2437 ++n;
2438 return n;
2439 }
2440
2441 /*
2442 * Return the number of nodes reachable by 'p'.
2443 * All nodes should be initially unmarked.
2444 */
2445 static int
2446 count_blocks(struct icode *ic, struct block *p)
2447 {
2448 if (p == 0 || isMarked(ic, p))
2449 return 0;
2450 Mark(ic, p);
2451 return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2452 }
2453
2454 /*
2455 * Do a depth first search on the flow graph, numbering the
2456 * the basic blocks, and entering them into the 'blocks' array.`
2457 */
2458 static void
2459 number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2460 {
2461 u_int n;
2462
2463 if (p == 0 || isMarked(ic, p))
2464 return;
2465
2466 Mark(ic, p);
2467 n = opt_state->n_blocks++;
2468 if (opt_state->n_blocks == 0) {
2469 /*
2470 * Overflow.
2471 */
2472 opt_error(opt_state, "filter is too complex to optimize");
2473 }
2474 p->id = n;
2475 opt_state->blocks[n] = p;
2476
2477 number_blks_r(opt_state, ic, JT(p));
2478 number_blks_r(opt_state, ic, JF(p));
2479 }
2480
2481 /*
2482 * Return the number of stmts in the flowgraph reachable by 'p'.
2483 * The nodes should be unmarked before calling.
2484 *
2485 * Note that "stmts" means "instructions", and that this includes
2486 *
2487 * side-effect statements in 'p' (slength(p->stmts));
2488 *
2489 * statements in the true branch from 'p' (count_stmts(JT(p)));
2490 *
2491 * statements in the false branch from 'p' (count_stmts(JF(p)));
2492 *
2493 * the conditional jump itself (1);
2494 *
2495 * an extra long jump if the true branch requires it (p->longjt);
2496 *
2497 * an extra long jump if the false branch requires it (p->longjf).
2498 */
2499 static u_int
2500 count_stmts(struct icode *ic, struct block *p)
2501 {
2502 u_int n;
2503
2504 if (p == 0 || isMarked(ic, p))
2505 return 0;
2506 Mark(ic, p);
2507 n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2508 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2509 }
2510
2511 /*
2512 * Allocate memory. All allocation is done before optimization
2513 * is begun. A linear bound on the size of all data structures is computed
2514 * from the total number of blocks and/or statements.
2515 */
2516 static void
2517 opt_init(opt_state_t *opt_state, struct icode *ic)
2518 {
2519 bpf_u_int32 *p;
2520 int i, n, max_stmts;
2521 u_int product;
2522 size_t block_memsize, edge_memsize;
2523
2524 /*
2525 * First, count the blocks, so we can malloc an array to map
2526 * block number to block. Then, put the blocks into the array.
2527 */
2528 unMarkAll(ic);
2529 n = count_blocks(ic, ic->root);
2530 opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2531 if (opt_state->blocks == NULL)
2532 opt_error(opt_state, "malloc");
2533 unMarkAll(ic);
2534 opt_state->n_blocks = 0;
2535 number_blks_r(opt_state, ic, ic->root);
2536
2537 /*
2538 * This "should not happen".
2539 */
2540 if (opt_state->n_blocks == 0)
2541 opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2542
2543 opt_state->n_edges = 2 * opt_state->n_blocks;
2544 if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2545 /*
2546 * Overflow.
2547 */
2548 opt_error(opt_state, "filter is too complex to optimize");
2549 }
2550 opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2551 if (opt_state->edges == NULL) {
2552 opt_error(opt_state, "malloc");
2553 }
2554
2555 /*
2556 * The number of levels is bounded by the number of nodes.
2557 */
2558 opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2559 if (opt_state->levels == NULL) {
2560 opt_error(opt_state, "malloc");
2561 }
2562
2563 opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2564 opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2565
2566 /*
2567 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2568 * in a u_int; we use it as a u_int number-of-iterations
2569 * value.
2570 */
2571 product = opt_state->n_blocks * opt_state->nodewords;
2572 if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2573 /*
2574 * XXX - just punt and don't try to optimize?
2575 * In practice, this is unlikely to happen with
2576 * a normal filter.
2577 */
2578 opt_error(opt_state, "filter is too complex to optimize");
2579 }
2580
2581 /*
2582 * Make sure the total memory required for that doesn't
2583 * overflow.
2584 */
2585 block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2586 if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2587 opt_error(opt_state, "filter is too complex to optimize");
2588 }
2589
2590 /*
2591 * Make sure opt_state->n_edges * opt_state->edgewords fits
2592 * in a u_int; we use it as a u_int number-of-iterations
2593 * value.
2594 */
2595 product = opt_state->n_edges * opt_state->edgewords;
2596 if ((product / opt_state->n_edges) != opt_state->edgewords) {
2597 opt_error(opt_state, "filter is too complex to optimize");
2598 }
2599
2600 /*
2601 * Make sure the total memory required for that doesn't
2602 * overflow.
2603 */
2604 edge_memsize = (size_t)product * sizeof(*opt_state->space);
2605 if (edge_memsize / product != sizeof(*opt_state->space)) {
2606 opt_error(opt_state, "filter is too complex to optimize");
2607 }
2608
2609 /*
2610 * Make sure the total memory required for both of them doesn't
2611 * overflow.
2612 */
2613 if (block_memsize > SIZE_MAX - edge_memsize) {
2614 opt_error(opt_state, "filter is too complex to optimize");
2615 }
2616
2617 /* XXX */
2618 opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2619 if (opt_state->space == NULL) {
2620 opt_error(opt_state, "malloc");
2621 }
2622 p = opt_state->space;
2623 opt_state->all_dom_sets = p;
2624 for (i = 0; i < n; ++i) {
2625 opt_state->blocks[i]->dom = p;
2626 p += opt_state->nodewords;
2627 }
2628 opt_state->all_closure_sets = p;
2629 for (i = 0; i < n; ++i) {
2630 opt_state->blocks[i]->closure = p;
2631 p += opt_state->nodewords;
2632 }
2633 opt_state->all_edge_sets = p;
2634 for (i = 0; i < n; ++i) {
2635 register struct block *b = opt_state->blocks[i];
2636
2637 b->et.edom = p;
2638 p += opt_state->edgewords;
2639 b->ef.edom = p;
2640 p += opt_state->edgewords;
2641 b->et.id = i;
2642 opt_state->edges[i] = &b->et;
2643 b->ef.id = opt_state->n_blocks + i;
2644 opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2645 b->et.pred = b;
2646 b->ef.pred = b;
2647 }
2648 max_stmts = 0;
2649 for (i = 0; i < n; ++i)
2650 max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2651 /*
2652 * We allocate at most 3 value numbers per statement,
2653 * so this is an upper bound on the number of valnodes
2654 * we'll need.
2655 */
2656 opt_state->maxval = 3 * max_stmts;
2657 opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2658 if (opt_state->vmap == NULL) {
2659 opt_error(opt_state, "malloc");
2660 }
2661 opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2662 if (opt_state->vnode_base == NULL) {
2663 opt_error(opt_state, "malloc");
2664 }
2665 }
2666
2667 /*
2668 * This is only used when supporting optimizer debugging. It is
2669 * global state, so do *not* do more than one compile in parallel
2670 * and expect it to provide meaningful information.
2671 */
2672 #ifdef BDEBUG
2673 int bids[NBIDS];
2674 #endif
2675
2676 static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2677 PCAP_PRINTFLIKE(2, 3);
2678
2679 /*
2680 * Returns true if successful. Returns false if a branch has
2681 * an offset that is too large. If so, we have marked that
2682 * branch so that on a subsequent iteration, it will be treated
2683 * properly.
2684 */
2685 static int
2686 convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2687 {
2688 struct bpf_insn *dst;
2689 struct slist *src;
2690 u_int slen;
2691 u_int off;
2692 struct slist **offset = NULL;
2693
2694 if (p == 0 || isMarked(ic, p))
2695 return (1);
2696 Mark(ic, p);
2697
2698 if (convert_code_r(conv_state, ic, JF(p)) == 0)
2699 return (0);
2700 if (convert_code_r(conv_state, ic, JT(p)) == 0)
2701 return (0);
2702
2703 slen = slength(p->stmts);
2704 dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2705 /* inflate length by any extra jumps */
2706
2707 p->offset = (int)(dst - conv_state->fstart);
2708
2709 /* generate offset[] for convenience */
2710 if (slen) {
2711 offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2712 if (!offset) {
2713 conv_error(conv_state, "not enough core");
2714 /*NOTREACHED*/
2715 }
2716 }
2717 src = p->stmts;
2718 for (off = 0; off < slen && src; off++) {
2719 #if 0
2720 printf("off=%d src=%x\n", off, src);
2721 #endif
2722 offset[off] = src;
2723 src = src->next;
2724 }
2725
2726 off = 0;
2727 for (src = p->stmts; src; src = src->next) {
2728 if (src->s.code == NOP)
2729 continue;
2730 dst->code = (u_short)src->s.code;
2731 dst->k = src->s.k;
2732
2733 /* fill block-local relative jump */
2734 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2735 #if 0
2736 if (src->s.jt || src->s.jf) {
2737 free(offset);
2738 conv_error(conv_state, "illegal jmp destination");
2739 /*NOTREACHED*/
2740 }
2741 #endif
2742 goto filled;
2743 }
2744 if (off == slen - 2) /*???*/
2745 goto filled;
2746
2747 {
2748 u_int i;
2749 int jt, jf;
2750 const char ljerr[] = "%s for block-local relative jump: off=%d";
2751
2752 #if 0
2753 printf("code=%x off=%d %x %x\n", src->s.code,
2754 off, src->s.jt, src->s.jf);
2755 #endif
2756
2757 if (!src->s.jt || !src->s.jf) {
2758 free(offset);
2759 conv_error(conv_state, ljerr, "no jmp destination", off);
2760 /*NOTREACHED*/
2761 }
2762
2763 jt = jf = 0;
2764 for (i = 0; i < slen; i++) {
2765 if (offset[i] == src->s.jt) {
2766 if (jt) {
2767 free(offset);
2768 conv_error(conv_state, ljerr, "multiple matches", off);
2769 /*NOTREACHED*/
2770 }
2771
2772 if (i - off - 1 >= 256) {
2773 free(offset);
2774 conv_error(conv_state, ljerr, "out-of-range jump", off);
2775 /*NOTREACHED*/
2776 }
2777 dst->jt = (u_char)(i - off - 1);
2778 jt++;
2779 }
2780 if (offset[i] == src->s.jf) {
2781 if (jf) {
2782 free(offset);
2783 conv_error(conv_state, ljerr, "multiple matches", off);
2784 /*NOTREACHED*/
2785 }
2786 if (i - off - 1 >= 256) {
2787 free(offset);
2788 conv_error(conv_state, ljerr, "out-of-range jump", off);
2789 /*NOTREACHED*/
2790 }
2791 dst->jf = (u_char)(i - off - 1);
2792 jf++;
2793 }
2794 }
2795 if (!jt || !jf) {
2796 free(offset);
2797 conv_error(conv_state, ljerr, "no destination found", off);
2798 /*NOTREACHED*/
2799 }
2800 }
2801 filled:
2802 ++dst;
2803 ++off;
2804 }
2805 if (offset)
2806 free(offset);
2807
2808 #ifdef BDEBUG
2809 if (dst - conv_state->fstart < NBIDS)
2810 bids[dst - conv_state->fstart] = p->id + 1;
2811 #endif
2812 dst->code = (u_short)p->s.code;
2813 dst->k = p->s.k;
2814 if (JT(p)) {
2815 /* number of extra jumps inserted */
2816 u_char extrajmps = 0;
2817 off = JT(p)->offset - (p->offset + slen) - 1;
2818 if (off >= 256) {
2819 /* offset too large for branch, must add a jump */
2820 if (p->longjt == 0) {
2821 /* mark this instruction and retry */
2822 p->longjt++;
2823 return(0);
2824 }
2825 dst->jt = extrajmps;
2826 extrajmps++;
2827 dst[extrajmps].code = BPF_JMP|BPF_JA;
2828 dst[extrajmps].k = off - extrajmps;
2829 }
2830 else
2831 dst->jt = (u_char)off;
2832 off = JF(p)->offset - (p->offset + slen) - 1;
2833 if (off >= 256) {
2834 /* offset too large for branch, must add a jump */
2835 if (p->longjf == 0) {
2836 /* mark this instruction and retry */
2837 p->longjf++;
2838 return(0);
2839 }
2840 /* branch if F to following jump */
2841 /* if two jumps are inserted, F goes to second one */
2842 dst->jf = extrajmps;
2843 extrajmps++;
2844 dst[extrajmps].code = BPF_JMP|BPF_JA;
2845 dst[extrajmps].k = off - extrajmps;
2846 }
2847 else
2848 dst->jf = (u_char)off;
2849 }
2850 return (1);
2851 }
2852
2853
2854 /*
2855 * Convert flowgraph intermediate representation to the
2856 * BPF array representation. Set *lenp to the number of instructions.
2857 *
2858 * This routine does *NOT* leak the memory pointed to by fp. It *must
2859 * not* do free(fp) before returning fp; doing so would make no sense,
2860 * as the BPF array pointed to by the return value of icode_to_fcode()
2861 * must be valid - it's being returned for use in a bpf_program structure.
2862 *
2863 * If it appears that icode_to_fcode() is leaking, the problem is that
2864 * the program using pcap_compile() is failing to free the memory in
2865 * the BPF program when it's done - the leak is in the program, not in
2866 * the routine that happens to be allocating the memory. (By analogy, if
2867 * a program calls fopen() without ever calling fclose() on the FILE *,
2868 * it will leak the FILE structure; the leak is not in fopen(), it's in
2869 * the program.) Change the program to use pcap_freecode() when it's
2870 * done with the filter program. See the pcap man page.
2871 */
2872 struct bpf_insn *
2873 icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2874 char *errbuf)
2875 {
2876 u_int n;
2877 struct bpf_insn *fp;
2878 conv_state_t conv_state;
2879
2880 conv_state.fstart = NULL;
2881 conv_state.errbuf = errbuf;
2882 if (setjmp(conv_state.top_ctx) != 0) {
2883 free(conv_state.fstart);
2884 return NULL;
2885 }
2886
2887 /*
2888 * Loop doing convert_code_r() until no branches remain
2889 * with too-large offsets.
2890 */
2891 for (;;) {
2892 unMarkAll(ic);
2893 n = *lenp = count_stmts(ic, root);
2894
2895 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2896 if (fp == NULL) {
2897 (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2898 "malloc");
2899 return NULL;
2900 }
2901 memset((char *)fp, 0, sizeof(*fp) * n);
2902 conv_state.fstart = fp;
2903 conv_state.ftail = fp + n;
2904
2905 unMarkAll(ic);
2906 if (convert_code_r(&conv_state, ic, root))
2907 break;
2908 free(fp);
2909 }
2910
2911 return fp;
2912 }
2913
2914 /*
2915 * For iconv_to_fconv() errors.
2916 */
2917 static void PCAP_NORETURN
2918 conv_error(conv_state_t *conv_state, const char *fmt, ...)
2919 {
2920 va_list ap;
2921
2922 va_start(ap, fmt);
2923 (void)vsnprintf(conv_state->errbuf,
2924 PCAP_ERRBUF_SIZE, fmt, ap);
2925 va_end(ap);
2926 longjmp(conv_state->top_ctx, 1);
2927 /* NOTREACHED */
2928 #ifdef _AIX
2929 PCAP_UNREACHABLE
2930 #endif /* _AIX */
2931 }
2932
2933 /*
2934 * Make a copy of a BPF program and put it in the "fcode" member of
2935 * a "pcap_t".
2936 *
2937 * If we fail to allocate memory for the copy, fill in the "errbuf"
2938 * member of the "pcap_t" with an error message, and return -1;
2939 * otherwise, return 0.
2940 */
2941 int
2942 pcapint_install_bpf_program(pcap_t *p, struct bpf_program *fp)
2943 {
2944 size_t prog_size;
2945
2946 /*
2947 * Validate the program.
2948 */
2949 if (!pcapint_validate_filter(fp->bf_insns, fp->bf_len)) {
2950 snprintf(p->errbuf, sizeof(p->errbuf),
2951 "BPF program is not valid");
2952 return (-1);
2953 }
2954
2955 /*
2956 * Free up any already installed program.
2957 */
2958 pcap_freecode(&p->fcode);
2959
2960 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2961 p->fcode.bf_len = fp->bf_len;
2962 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2963 if (p->fcode.bf_insns == NULL) {
2964 pcapint_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2965 errno, "malloc");
2966 return (-1);
2967 }
2968 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2969 return (0);
2970 }
2971
2972 #ifdef BDEBUG
2973 static void
2974 dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2975 FILE *out)
2976 {
2977 int icount, noffset;
2978 int i;
2979
2980 if (block == NULL || isMarked(ic, block))
2981 return;
2982 Mark(ic, block);
2983
2984 icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2985 noffset = min(block->offset + icount, (int)prog->bf_len);
2986
2987 fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2988 for (i = block->offset; i < noffset; i++) {
2989 fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2990 }
2991 fprintf(out, "\" tooltip=\"");
2992 for (i = 0; i < BPF_MEMWORDS; i++)
2993 if (block->val[i] != VAL_UNKNOWN)
2994 fprintf(out, "val[%d]=%d ", i, block->val[i]);
2995 fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2996 fprintf(out, "val[X]=%d", block->val[X_ATOM]);
2997 fprintf(out, "\"");
2998 if (JT(block) == NULL)
2999 fprintf(out, ", peripheries=2");
3000 fprintf(out, "];\n");
3001
3002 dot_dump_node(ic, JT(block), prog, out);
3003 dot_dump_node(ic, JF(block), prog, out);
3004 }
3005
3006 static void
3007 dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3008 {
3009 if (block == NULL || isMarked(ic, block))
3010 return;
3011 Mark(ic, block);
3012
3013 if (JT(block)) {
3014 fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3015 block->id, JT(block)->id);
3016 fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3017 block->id, JF(block)->id);
3018 }
3019 dot_dump_edge(ic, JT(block), out);
3020 dot_dump_edge(ic, JF(block), out);
3021 }
3022
3023 /* Output the block CFG using graphviz/DOT language
3024 * In the CFG, block's code, value index for each registers at EXIT,
3025 * and the jump relationship is show.
3026 *
3027 * example DOT for BPF `ip src host 1.1.1.1' is:
3028 digraph BPF {
3029 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3030 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3031 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3032 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3033 "block0":se -> "block1":n [label="T"];
3034 "block0":sw -> "block3":n [label="F"];
3035 "block1":se -> "block2":n [label="T"];
3036 "block1":sw -> "block3":n [label="F"];
3037 }
3038 *
3039 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3040 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3041 */
3042 static int
3043 dot_dump(struct icode *ic, char *errbuf)
3044 {
3045 struct bpf_program f;
3046 FILE *out = stdout;
3047
3048 memset(bids, 0, sizeof bids);
3049 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3050 if (f.bf_insns == NULL)
3051 return -1;
3052
3053 fprintf(out, "digraph BPF {\n");
3054 unMarkAll(ic);
3055 dot_dump_node(ic, ic->root, &f, out);
3056 unMarkAll(ic);
3057 dot_dump_edge(ic, ic->root, out);
3058 fprintf(out, "}\n");
3059
3060 free((char *)f.bf_insns);
3061 return 0;
3062 }
3063
3064 static int
3065 plain_dump(struct icode *ic, char *errbuf)
3066 {
3067 struct bpf_program f;
3068
3069 memset(bids, 0, sizeof bids);
3070 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3071 if (f.bf_insns == NULL)
3072 return -1;
3073 bpf_dump(&f, 1);
3074 putchar('\n');
3075 free((char *)f.bf_insns);
3076 return 0;
3077 }
3078
3079 static void
3080 opt_dump(opt_state_t *opt_state, struct icode *ic)
3081 {
3082 int status;
3083 char errbuf[PCAP_ERRBUF_SIZE];
3084
3085 /*
3086 * If the CFG, in DOT format, is requested, output it rather than
3087 * the code that would be generated from that graph.
3088 */
3089 if (pcap_print_dot_graph)
3090 status = dot_dump(ic, errbuf);
3091 else
3092 status = plain_dump(ic, errbuf);
3093 if (status == -1)
3094 opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3095 }
3096 #endif