]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
Remove prototype header for HP-UX 11.x.
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for BPF code intermediate representation.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <pcap-types.h>
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <memory.h>
33 #include <setjmp.h>
34 #include <string.h>
35 #include <limits.h> /* for SIZE_MAX */
36 #include <errno.h>
37
38 #include "pcap-int.h"
39
40 #include "gencode.h"
41 #include "optimize.h"
42 #include "diag-control.h"
43
44 #ifdef HAVE_OS_PROTO_H
45 #include "os-proto.h"
46 #endif
47
48 #ifdef BDEBUG
49 /*
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
53 * defined.
54 */
55 static int pcap_optimizer_debug;
56
57 /*
58 * Routine to set that flag.
59 *
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
65 */
66 PCAP_API void pcap_set_optimizer_debug(int value);
67
68 PCAP_API_DEF void
69 pcap_set_optimizer_debug(int value)
70 {
71 pcap_optimizer_debug = value;
72 }
73
74 /*
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
78 * defined.
79 */
80 static int pcap_print_dot_graph;
81
82 /*
83 * Routine to set that flag.
84 *
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
90 */
91 PCAP_API void pcap_set_print_dot_graph(int value);
92
93 PCAP_API_DEF void
94 pcap_set_print_dot_graph(int value)
95 {
96 pcap_print_dot_graph = value;
97 }
98
99 #endif
100
101 /*
102 * lowest_set_bit().
103 *
104 * Takes a 32-bit integer as an argument.
105 *
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
108 *
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
112 *
113 * This is the same as the count of trailing zeroes in the word.
114 */
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116 /*
117 * GCC 3.4 and later; we have __builtin_ctz().
118 */
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
121 /*
122 * Visual Studio; we support only 2005 and later, so use
123 * _BitScanForward().
124 */
125 #include <intrin.h>
126
127 #ifndef __clang__
128 #pragma intrinsic(_BitScanForward)
129 #endif
130
131 static __forceinline u_int
132 lowest_set_bit(int mask)
133 {
134 unsigned long bit;
135
136 /*
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139 */
140 if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141 abort(); /* mask is zero */
142 return (u_int)bit;
143 }
144 #elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
145 /*
146 * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
147 * or some other platform (UN*X conforming to a sufficient recent version
148 * of the Single UNIX Specification).
149 */
150 #include <strings.h>
151 #define lowest_set_bit(mask) (u_int)((ffs((mask)) - 1))
152 #elif (defined(MSDOS) && defined(__DJGPP__)) || defined(__hpux)
153 /*
154 * MS-DOS with DJGPP or HP-UX 11i v3, which declare ffs() in <string.h>,
155 * which we've already included. Place this branch after the <strings.h>
156 * branch, in case a later release of HP-UX makes the declaration available
157 * via the standard header.
158 */
159 #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
160 #else
161 /*
162 * None of the above.
163 * Use a perfect-hash-function-based function.
164 */
165 static u_int
166 lowest_set_bit(int mask)
167 {
168 unsigned int v = (unsigned int)mask;
169
170 static const u_int MultiplyDeBruijnBitPosition[32] = {
171 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
172 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
173 };
174
175 /*
176 * We strip off all but the lowermost set bit (v & ~v),
177 * and perform a minimal perfect hash on it to look up the
178 * number of low-order zero bits in a table.
179 *
180 * See:
181 *
182 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
183 *
184 * http://supertech.csail.mit.edu/papers/debruijn.pdf
185 */
186 return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
187 }
188 #endif
189
190 /*
191 * Represents a deleted instruction.
192 */
193 #define NOP -1
194
195 /*
196 * Register numbers for use-def values.
197 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
198 * location. A_ATOM is the accumulator and X_ATOM is the index
199 * register.
200 */
201 #define A_ATOM BPF_MEMWORDS
202 #define X_ATOM (BPF_MEMWORDS+1)
203
204 /*
205 * This define is used to represent *both* the accumulator and
206 * x register in use-def computations.
207 * Currently, the use-def code assumes only one definition per instruction.
208 */
209 #define AX_ATOM N_ATOMS
210
211 /*
212 * These data structures are used in a Cocke and Shwarz style
213 * value numbering scheme. Since the flowgraph is acyclic,
214 * exit values can be propagated from a node's predecessors
215 * provided it is uniquely defined.
216 */
217 struct valnode {
218 int code;
219 bpf_u_int32 v0, v1;
220 int val; /* the value number */
221 struct valnode *next;
222 };
223
224 /* Integer constants mapped with the load immediate opcode. */
225 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
226
227 struct vmapinfo {
228 int is_const;
229 bpf_u_int32 const_val;
230 };
231
232 typedef struct {
233 /*
234 * Place to longjmp to on an error.
235 */
236 jmp_buf top_ctx;
237
238 /*
239 * The buffer into which to put error message.
240 */
241 char *errbuf;
242
243 /*
244 * A flag to indicate that further optimization is needed.
245 * Iterative passes are continued until a given pass yields no
246 * code simplification or branch movement.
247 */
248 int done;
249
250 /*
251 * XXX - detect loops that do nothing but repeated AND/OR pullups
252 * and edge moves.
253 * If 100 passes in a row do nothing but that, treat that as a
254 * sign that we're in a loop that just shuffles in a cycle in
255 * which each pass just shuffles the code and we eventually
256 * get back to the original configuration.
257 *
258 * XXX - we need a non-heuristic way of detecting, or preventing,
259 * such a cycle.
260 */
261 int non_branch_movement_performed;
262
263 u_int n_blocks; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
264 struct block **blocks;
265 u_int n_edges; /* twice n_blocks, so guaranteed to be > 0 */
266 struct edge **edges;
267
268 /*
269 * A bit vector set representation of the dominators.
270 * We round up the set size to the next power of two.
271 */
272 u_int nodewords; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
273 u_int edgewords; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
274 struct block **levels;
275 bpf_u_int32 *space;
276
277 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
278 /*
279 * True if a is in uset {p}
280 */
281 #define SET_MEMBER(p, a) \
282 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
283
284 /*
285 * Add 'a' to uset p.
286 */
287 #define SET_INSERT(p, a) \
288 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
289
290 /*
291 * Delete 'a' from uset p.
292 */
293 #define SET_DELETE(p, a) \
294 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
295
296 /*
297 * a := a intersect b
298 * n must be guaranteed to be > 0
299 */
300 #define SET_INTERSECT(a, b, n)\
301 {\
302 register bpf_u_int32 *_x = a, *_y = b;\
303 register u_int _n = n;\
304 do *_x++ &= *_y++; while (--_n != 0);\
305 }
306
307 /*
308 * a := a - b
309 * n must be guaranteed to be > 0
310 */
311 #define SET_SUBTRACT(a, b, n)\
312 {\
313 register bpf_u_int32 *_x = a, *_y = b;\
314 register u_int _n = n;\
315 do *_x++ &=~ *_y++; while (--_n != 0);\
316 }
317
318 /*
319 * a := a union b
320 * n must be guaranteed to be > 0
321 */
322 #define SET_UNION(a, b, n)\
323 {\
324 register bpf_u_int32 *_x = a, *_y = b;\
325 register u_int _n = n;\
326 do *_x++ |= *_y++; while (--_n != 0);\
327 }
328
329 uset all_dom_sets;
330 uset all_closure_sets;
331 uset all_edge_sets;
332
333 #define MODULUS 213
334 struct valnode *hashtbl[MODULUS];
335 bpf_u_int32 curval;
336 bpf_u_int32 maxval;
337
338 struct vmapinfo *vmap;
339 struct valnode *vnode_base;
340 struct valnode *next_vnode;
341 } opt_state_t;
342
343 typedef struct {
344 /*
345 * Place to longjmp to on an error.
346 */
347 jmp_buf top_ctx;
348
349 /*
350 * The buffer into which to put error message.
351 */
352 char *errbuf;
353
354 /*
355 * Some pointers used to convert the basic block form of the code,
356 * into the array form that BPF requires. 'fstart' will point to
357 * the malloc'd array while 'ftail' is used during the recursive
358 * traversal.
359 */
360 struct bpf_insn *fstart;
361 struct bpf_insn *ftail;
362 } conv_state_t;
363
364 static void opt_init(opt_state_t *, struct icode *);
365 static void opt_cleanup(opt_state_t *);
366 static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
367 PCAP_PRINTFLIKE(2, 3);
368
369 static void intern_blocks(opt_state_t *, struct icode *);
370
371 static void find_inedges(opt_state_t *, struct block *);
372 #ifdef BDEBUG
373 static void opt_dump(opt_state_t *, struct icode *);
374 #endif
375
376 #ifndef MAX
377 #define MAX(a,b) ((a)>(b)?(a):(b))
378 #endif
379
380 static void
381 find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
382 {
383 int level;
384
385 if (isMarked(ic, b))
386 return;
387
388 Mark(ic, b);
389 b->link = 0;
390
391 if (JT(b)) {
392 find_levels_r(opt_state, ic, JT(b));
393 find_levels_r(opt_state, ic, JF(b));
394 level = MAX(JT(b)->level, JF(b)->level) + 1;
395 } else
396 level = 0;
397 b->level = level;
398 b->link = opt_state->levels[level];
399 opt_state->levels[level] = b;
400 }
401
402 /*
403 * Level graph. The levels go from 0 at the leaves to
404 * N_LEVELS at the root. The opt_state->levels[] array points to the
405 * first node of the level list, whose elements are linked
406 * with the 'link' field of the struct block.
407 */
408 static void
409 find_levels(opt_state_t *opt_state, struct icode *ic)
410 {
411 memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
412 unMarkAll(ic);
413 find_levels_r(opt_state, ic, ic->root);
414 }
415
416 /*
417 * Find dominator relationships.
418 * Assumes graph has been leveled.
419 */
420 static void
421 find_dom(opt_state_t *opt_state, struct block *root)
422 {
423 u_int i;
424 int level;
425 struct block *b;
426 bpf_u_int32 *x;
427
428 /*
429 * Initialize sets to contain all nodes.
430 */
431 x = opt_state->all_dom_sets;
432 /*
433 * In opt_init(), we've made sure the product doesn't overflow.
434 */
435 i = opt_state->n_blocks * opt_state->nodewords;
436 while (i != 0) {
437 --i;
438 *x++ = 0xFFFFFFFFU;
439 }
440 /* Root starts off empty. */
441 for (i = opt_state->nodewords; i != 0;) {
442 --i;
443 root->dom[i] = 0;
444 }
445
446 /* root->level is the highest level no found. */
447 for (level = root->level; level >= 0; --level) {
448 for (b = opt_state->levels[level]; b; b = b->link) {
449 SET_INSERT(b->dom, b->id);
450 if (JT(b) == 0)
451 continue;
452 SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
453 SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
454 }
455 }
456 }
457
458 static void
459 propedom(opt_state_t *opt_state, struct edge *ep)
460 {
461 SET_INSERT(ep->edom, ep->id);
462 if (ep->succ) {
463 SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
464 SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
465 }
466 }
467
468 /*
469 * Compute edge dominators.
470 * Assumes graph has been leveled and predecessors established.
471 */
472 static void
473 find_edom(opt_state_t *opt_state, struct block *root)
474 {
475 u_int i;
476 uset x;
477 int level;
478 struct block *b;
479
480 x = opt_state->all_edge_sets;
481 /*
482 * In opt_init(), we've made sure the product doesn't overflow.
483 */
484 for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
485 --i;
486 x[i] = 0xFFFFFFFFU;
487 }
488
489 /* root->level is the highest level no found. */
490 memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
491 memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
492 for (level = root->level; level >= 0; --level) {
493 for (b = opt_state->levels[level]; b != 0; b = b->link) {
494 propedom(opt_state, &b->et);
495 propedom(opt_state, &b->ef);
496 }
497 }
498 }
499
500 /*
501 * Find the backwards transitive closure of the flow graph. These sets
502 * are backwards in the sense that we find the set of nodes that reach
503 * a given node, not the set of nodes that can be reached by a node.
504 *
505 * Assumes graph has been leveled.
506 */
507 static void
508 find_closure(opt_state_t *opt_state, struct block *root)
509 {
510 int level;
511 struct block *b;
512
513 /*
514 * Initialize sets to contain no nodes.
515 */
516 memset((char *)opt_state->all_closure_sets, 0,
517 opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
518
519 /* root->level is the highest level no found. */
520 for (level = root->level; level >= 0; --level) {
521 for (b = opt_state->levels[level]; b; b = b->link) {
522 SET_INSERT(b->closure, b->id);
523 if (JT(b) == 0)
524 continue;
525 SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
526 SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
527 }
528 }
529 }
530
531 /*
532 * Return the register number that is used by s.
533 *
534 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
535 * are used, the scratch memory location's number if a scratch memory
536 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
537 *
538 * The implementation should probably change to an array access.
539 */
540 static int
541 atomuse(struct stmt *s)
542 {
543 register int c = s->code;
544
545 if (c == NOP)
546 return -1;
547
548 switch (BPF_CLASS(c)) {
549
550 case BPF_RET:
551 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
552 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
553
554 case BPF_LD:
555 case BPF_LDX:
556 /*
557 * As there are fewer than 2^31 memory locations,
558 * s->k should be convertible to int without problems.
559 */
560 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
561 (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
562
563 case BPF_ST:
564 return A_ATOM;
565
566 case BPF_STX:
567 return X_ATOM;
568
569 case BPF_JMP:
570 case BPF_ALU:
571 if (BPF_SRC(c) == BPF_X)
572 return AX_ATOM;
573 return A_ATOM;
574
575 case BPF_MISC:
576 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
577 }
578 abort();
579 /* NOTREACHED */
580 }
581
582 /*
583 * Return the register number that is defined by 's'. We assume that
584 * a single stmt cannot define more than one register. If no register
585 * is defined, return -1.
586 *
587 * The implementation should probably change to an array access.
588 */
589 static int
590 atomdef(struct stmt *s)
591 {
592 if (s->code == NOP)
593 return -1;
594
595 switch (BPF_CLASS(s->code)) {
596
597 case BPF_LD:
598 case BPF_ALU:
599 return A_ATOM;
600
601 case BPF_LDX:
602 return X_ATOM;
603
604 case BPF_ST:
605 case BPF_STX:
606 return s->k;
607
608 case BPF_MISC:
609 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
610 }
611 return -1;
612 }
613
614 /*
615 * Compute the sets of registers used, defined, and killed by 'b'.
616 *
617 * "Used" means that a statement in 'b' uses the register before any
618 * statement in 'b' defines it, i.e. it uses the value left in
619 * that register by a predecessor block of this block.
620 * "Defined" means that a statement in 'b' defines it.
621 * "Killed" means that a statement in 'b' defines it before any
622 * statement in 'b' uses it, i.e. it kills the value left in that
623 * register by a predecessor block of this block.
624 */
625 static void
626 compute_local_ud(struct block *b)
627 {
628 struct slist *s;
629 atomset def = 0, use = 0, killed = 0;
630 int atom;
631
632 for (s = b->stmts; s; s = s->next) {
633 if (s->s.code == NOP)
634 continue;
635 atom = atomuse(&s->s);
636 if (atom >= 0) {
637 if (atom == AX_ATOM) {
638 if (!ATOMELEM(def, X_ATOM))
639 use |= ATOMMASK(X_ATOM);
640 if (!ATOMELEM(def, A_ATOM))
641 use |= ATOMMASK(A_ATOM);
642 }
643 else if (atom < N_ATOMS) {
644 if (!ATOMELEM(def, atom))
645 use |= ATOMMASK(atom);
646 }
647 else
648 abort();
649 }
650 atom = atomdef(&s->s);
651 if (atom >= 0) {
652 if (!ATOMELEM(use, atom))
653 killed |= ATOMMASK(atom);
654 def |= ATOMMASK(atom);
655 }
656 }
657 if (BPF_CLASS(b->s.code) == BPF_JMP) {
658 /*
659 * XXX - what about RET?
660 */
661 atom = atomuse(&b->s);
662 if (atom >= 0) {
663 if (atom == AX_ATOM) {
664 if (!ATOMELEM(def, X_ATOM))
665 use |= ATOMMASK(X_ATOM);
666 if (!ATOMELEM(def, A_ATOM))
667 use |= ATOMMASK(A_ATOM);
668 }
669 else if (atom < N_ATOMS) {
670 if (!ATOMELEM(def, atom))
671 use |= ATOMMASK(atom);
672 }
673 else
674 abort();
675 }
676 }
677
678 b->def = def;
679 b->kill = killed;
680 b->in_use = use;
681 }
682
683 /*
684 * Assume graph is already leveled.
685 */
686 static void
687 find_ud(opt_state_t *opt_state, struct block *root)
688 {
689 int i, maxlevel;
690 struct block *p;
691
692 /*
693 * root->level is the highest level no found;
694 * count down from there.
695 */
696 maxlevel = root->level;
697 for (i = maxlevel; i >= 0; --i)
698 for (p = opt_state->levels[i]; p; p = p->link) {
699 compute_local_ud(p);
700 p->out_use = 0;
701 }
702
703 for (i = 1; i <= maxlevel; ++i) {
704 for (p = opt_state->levels[i]; p; p = p->link) {
705 p->out_use |= JT(p)->in_use | JF(p)->in_use;
706 p->in_use |= p->out_use &~ p->kill;
707 }
708 }
709 }
710 static void
711 init_val(opt_state_t *opt_state)
712 {
713 opt_state->curval = 0;
714 opt_state->next_vnode = opt_state->vnode_base;
715 memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
716 memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
717 }
718
719 /*
720 * Because we really don't have an IR, this stuff is a little messy.
721 *
722 * This routine looks in the table of existing value number for a value
723 * with generated from an operation with the specified opcode and
724 * the specified values. If it finds it, it returns its value number,
725 * otherwise it makes a new entry in the table and returns the
726 * value number of that entry.
727 */
728 static bpf_u_int32
729 F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
730 {
731 u_int hash;
732 bpf_u_int32 val;
733 struct valnode *p;
734
735 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
736 hash %= MODULUS;
737
738 for (p = opt_state->hashtbl[hash]; p; p = p->next)
739 if (p->code == code && p->v0 == v0 && p->v1 == v1)
740 return p->val;
741
742 /*
743 * Not found. Allocate a new value, and assign it a new
744 * value number.
745 *
746 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
747 * increment it before using it as the new value number, which
748 * means we never assign VAL_UNKNOWN.
749 *
750 * XXX - unless we overflow, but we probably won't have 2^32-1
751 * values; we treat 32 bits as effectively infinite.
752 */
753 val = ++opt_state->curval;
754 if (BPF_MODE(code) == BPF_IMM &&
755 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
756 opt_state->vmap[val].const_val = v0;
757 opt_state->vmap[val].is_const = 1;
758 }
759 p = opt_state->next_vnode++;
760 p->val = val;
761 p->code = code;
762 p->v0 = v0;
763 p->v1 = v1;
764 p->next = opt_state->hashtbl[hash];
765 opt_state->hashtbl[hash] = p;
766
767 return val;
768 }
769
770 static inline void
771 vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
772 {
773 if (alter && newval != VAL_UNKNOWN && *valp == newval)
774 s->code = NOP;
775 else
776 *valp = newval;
777 }
778
779 /*
780 * Do constant-folding on binary operators.
781 * (Unary operators are handled elsewhere.)
782 */
783 static void
784 fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
785 {
786 bpf_u_int32 a, b;
787
788 a = opt_state->vmap[v0].const_val;
789 b = opt_state->vmap[v1].const_val;
790
791 switch (BPF_OP(s->code)) {
792 case BPF_ADD:
793 a += b;
794 break;
795
796 case BPF_SUB:
797 a -= b;
798 break;
799
800 case BPF_MUL:
801 a *= b;
802 break;
803
804 case BPF_DIV:
805 if (b == 0)
806 opt_error(opt_state, "division by zero");
807 a /= b;
808 break;
809
810 case BPF_MOD:
811 if (b == 0)
812 opt_error(opt_state, "modulus by zero");
813 a %= b;
814 break;
815
816 case BPF_AND:
817 a &= b;
818 break;
819
820 case BPF_OR:
821 a |= b;
822 break;
823
824 case BPF_XOR:
825 a ^= b;
826 break;
827
828 case BPF_LSH:
829 /*
830 * A left shift of more than the width of the type
831 * is undefined in C; we'll just treat it as shifting
832 * all the bits out.
833 *
834 * XXX - the BPF interpreter doesn't check for this,
835 * so its behavior is dependent on the behavior of
836 * the processor on which it's running. There are
837 * processors on which it shifts all the bits out
838 * and processors on which it does no shift.
839 */
840 if (b < 32)
841 a <<= b;
842 else
843 a = 0;
844 break;
845
846 case BPF_RSH:
847 /*
848 * A right shift of more than the width of the type
849 * is undefined in C; we'll just treat it as shifting
850 * all the bits out.
851 *
852 * XXX - the BPF interpreter doesn't check for this,
853 * so its behavior is dependent on the behavior of
854 * the processor on which it's running. There are
855 * processors on which it shifts all the bits out
856 * and processors on which it does no shift.
857 */
858 if (b < 32)
859 a >>= b;
860 else
861 a = 0;
862 break;
863
864 default:
865 abort();
866 }
867 s->k = a;
868 s->code = BPF_LD|BPF_IMM;
869 /*
870 * XXX - optimizer loop detection.
871 */
872 opt_state->non_branch_movement_performed = 1;
873 opt_state->done = 0;
874 }
875
876 static inline struct slist *
877 this_op(struct slist *s)
878 {
879 while (s != 0 && s->s.code == NOP)
880 s = s->next;
881 return s;
882 }
883
884 static void
885 opt_not(struct block *b)
886 {
887 struct block *tmp = JT(b);
888
889 JT(b) = JF(b);
890 JF(b) = tmp;
891 }
892
893 static void
894 opt_peep(opt_state_t *opt_state, struct block *b)
895 {
896 struct slist *s;
897 struct slist *next, *last;
898 bpf_u_int32 val;
899
900 s = b->stmts;
901 if (s == 0)
902 return;
903
904 last = s;
905 for (/*empty*/; /*empty*/; s = next) {
906 /*
907 * Skip over nops.
908 */
909 s = this_op(s);
910 if (s == 0)
911 break; /* nothing left in the block */
912
913 /*
914 * Find the next real instruction after that one
915 * (skipping nops).
916 */
917 next = this_op(s->next);
918 if (next == 0)
919 break; /* no next instruction */
920 last = next;
921
922 /*
923 * st M[k] --> st M[k]
924 * ldx M[k] tax
925 */
926 if (s->s.code == BPF_ST &&
927 next->s.code == (BPF_LDX|BPF_MEM) &&
928 s->s.k == next->s.k) {
929 /*
930 * XXX - optimizer loop detection.
931 */
932 opt_state->non_branch_movement_performed = 1;
933 opt_state->done = 0;
934 next->s.code = BPF_MISC|BPF_TAX;
935 }
936 /*
937 * ld #k --> ldx #k
938 * tax txa
939 */
940 if (s->s.code == (BPF_LD|BPF_IMM) &&
941 next->s.code == (BPF_MISC|BPF_TAX)) {
942 s->s.code = BPF_LDX|BPF_IMM;
943 next->s.code = BPF_MISC|BPF_TXA;
944 /*
945 * XXX - optimizer loop detection.
946 */
947 opt_state->non_branch_movement_performed = 1;
948 opt_state->done = 0;
949 }
950 /*
951 * This is an ugly special case, but it happens
952 * when you say tcp[k] or udp[k] where k is a constant.
953 */
954 if (s->s.code == (BPF_LD|BPF_IMM)) {
955 struct slist *add, *tax, *ild;
956
957 /*
958 * Check that X isn't used on exit from this
959 * block (which the optimizer might cause).
960 * We know the code generator won't generate
961 * any local dependencies.
962 */
963 if (ATOMELEM(b->out_use, X_ATOM))
964 continue;
965
966 /*
967 * Check that the instruction following the ldi
968 * is an addx, or it's an ldxms with an addx
969 * following it (with 0 or more nops between the
970 * ldxms and addx).
971 */
972 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
973 add = next;
974 else
975 add = this_op(next->next);
976 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
977 continue;
978
979 /*
980 * Check that a tax follows that (with 0 or more
981 * nops between them).
982 */
983 tax = this_op(add->next);
984 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
985 continue;
986
987 /*
988 * Check that an ild follows that (with 0 or more
989 * nops between them).
990 */
991 ild = this_op(tax->next);
992 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
993 BPF_MODE(ild->s.code) != BPF_IND)
994 continue;
995 /*
996 * We want to turn this sequence:
997 *
998 * (004) ldi #0x2 {s}
999 * (005) ldxms [14] {next} -- optional
1000 * (006) addx {add}
1001 * (007) tax {tax}
1002 * (008) ild [x+0] {ild}
1003 *
1004 * into this sequence:
1005 *
1006 * (004) nop
1007 * (005) ldxms [14]
1008 * (006) nop
1009 * (007) nop
1010 * (008) ild [x+2]
1011 *
1012 * XXX We need to check that X is not
1013 * subsequently used, because we want to change
1014 * what'll be in it after this sequence.
1015 *
1016 * We know we can eliminate the accumulator
1017 * modifications earlier in the sequence since
1018 * it is defined by the last stmt of this sequence
1019 * (i.e., the last statement of the sequence loads
1020 * a value into the accumulator, so we can eliminate
1021 * earlier operations on the accumulator).
1022 */
1023 ild->s.k += s->s.k;
1024 s->s.code = NOP;
1025 add->s.code = NOP;
1026 tax->s.code = NOP;
1027 /*
1028 * XXX - optimizer loop detection.
1029 */
1030 opt_state->non_branch_movement_performed = 1;
1031 opt_state->done = 0;
1032 }
1033 }
1034 /*
1035 * If the comparison at the end of a block is an equality
1036 * comparison against a constant, and nobody uses the value
1037 * we leave in the A register at the end of a block, and
1038 * the operation preceding the comparison is an arithmetic
1039 * operation, we can sometime optimize it away.
1040 */
1041 if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1042 !ATOMELEM(b->out_use, A_ATOM)) {
1043 /*
1044 * We can optimize away certain subtractions of the
1045 * X register.
1046 */
1047 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1048 val = b->val[X_ATOM];
1049 if (opt_state->vmap[val].is_const) {
1050 /*
1051 * If we have a subtract to do a comparison,
1052 * and the X register is a known constant,
1053 * we can merge this value into the
1054 * comparison:
1055 *
1056 * sub x -> nop
1057 * jeq #y jeq #(x+y)
1058 */
1059 b->s.k += opt_state->vmap[val].const_val;
1060 last->s.code = NOP;
1061 /*
1062 * XXX - optimizer loop detection.
1063 */
1064 opt_state->non_branch_movement_performed = 1;
1065 opt_state->done = 0;
1066 } else if (b->s.k == 0) {
1067 /*
1068 * If the X register isn't a constant,
1069 * and the comparison in the test is
1070 * against 0, we can compare with the
1071 * X register, instead:
1072 *
1073 * sub x -> nop
1074 * jeq #0 jeq x
1075 */
1076 last->s.code = NOP;
1077 b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1078 /*
1079 * XXX - optimizer loop detection.
1080 */
1081 opt_state->non_branch_movement_performed = 1;
1082 opt_state->done = 0;
1083 }
1084 }
1085 /*
1086 * Likewise, a constant subtract can be simplified:
1087 *
1088 * sub #x -> nop
1089 * jeq #y -> jeq #(x+y)
1090 */
1091 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1092 last->s.code = NOP;
1093 b->s.k += last->s.k;
1094 /*
1095 * XXX - optimizer loop detection.
1096 */
1097 opt_state->non_branch_movement_performed = 1;
1098 opt_state->done = 0;
1099 }
1100 /*
1101 * And, similarly, a constant AND can be simplified
1102 * if we're testing against 0, i.e.:
1103 *
1104 * and #k nop
1105 * jeq #0 -> jset #k
1106 */
1107 else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1108 b->s.k == 0) {
1109 b->s.k = last->s.k;
1110 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1111 last->s.code = NOP;
1112 /*
1113 * XXX - optimizer loop detection.
1114 */
1115 opt_state->non_branch_movement_performed = 1;
1116 opt_state->done = 0;
1117 opt_not(b);
1118 }
1119 }
1120 /*
1121 * jset #0 -> never
1122 * jset #ffffffff -> always
1123 */
1124 if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1125 if (b->s.k == 0)
1126 JT(b) = JF(b);
1127 if (b->s.k == 0xffffffffU)
1128 JF(b) = JT(b);
1129 }
1130 /*
1131 * If we're comparing against the index register, and the index
1132 * register is a known constant, we can just compare against that
1133 * constant.
1134 */
1135 val = b->val[X_ATOM];
1136 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1137 bpf_u_int32 v = opt_state->vmap[val].const_val;
1138 b->s.code &= ~BPF_X;
1139 b->s.k = v;
1140 }
1141 /*
1142 * If the accumulator is a known constant, we can compute the
1143 * comparison result.
1144 */
1145 val = b->val[A_ATOM];
1146 if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1147 bpf_u_int32 v = opt_state->vmap[val].const_val;
1148 switch (BPF_OP(b->s.code)) {
1149
1150 case BPF_JEQ:
1151 v = v == b->s.k;
1152 break;
1153
1154 case BPF_JGT:
1155 v = v > b->s.k;
1156 break;
1157
1158 case BPF_JGE:
1159 v = v >= b->s.k;
1160 break;
1161
1162 case BPF_JSET:
1163 v &= b->s.k;
1164 break;
1165
1166 default:
1167 abort();
1168 }
1169 if (JF(b) != JT(b)) {
1170 /*
1171 * XXX - optimizer loop detection.
1172 */
1173 opt_state->non_branch_movement_performed = 1;
1174 opt_state->done = 0;
1175 }
1176 if (v)
1177 JF(b) = JT(b);
1178 else
1179 JT(b) = JF(b);
1180 }
1181 }
1182
1183 /*
1184 * Compute the symbolic value of expression of 's', and update
1185 * anything it defines in the value table 'val'. If 'alter' is true,
1186 * do various optimizations. This code would be cleaner if symbolic
1187 * evaluation and code transformations weren't folded together.
1188 */
1189 static void
1190 opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1191 {
1192 int op;
1193 bpf_u_int32 v;
1194
1195 switch (s->code) {
1196
1197 case BPF_LD|BPF_ABS|BPF_W:
1198 case BPF_LD|BPF_ABS|BPF_H:
1199 case BPF_LD|BPF_ABS|BPF_B:
1200 v = F(opt_state, s->code, s->k, 0L);
1201 vstore(s, &val[A_ATOM], v, alter);
1202 break;
1203
1204 case BPF_LD|BPF_IND|BPF_W:
1205 case BPF_LD|BPF_IND|BPF_H:
1206 case BPF_LD|BPF_IND|BPF_B:
1207 v = val[X_ATOM];
1208 if (alter && opt_state->vmap[v].is_const) {
1209 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1210 s->k += opt_state->vmap[v].const_val;
1211 v = F(opt_state, s->code, s->k, 0L);
1212 /*
1213 * XXX - optimizer loop detection.
1214 */
1215 opt_state->non_branch_movement_performed = 1;
1216 opt_state->done = 0;
1217 }
1218 else
1219 v = F(opt_state, s->code, s->k, v);
1220 vstore(s, &val[A_ATOM], v, alter);
1221 break;
1222
1223 case BPF_LD|BPF_LEN:
1224 v = F(opt_state, s->code, 0L, 0L);
1225 vstore(s, &val[A_ATOM], v, alter);
1226 break;
1227
1228 case BPF_LD|BPF_IMM:
1229 v = K(s->k);
1230 vstore(s, &val[A_ATOM], v, alter);
1231 break;
1232
1233 case BPF_LDX|BPF_IMM:
1234 v = K(s->k);
1235 vstore(s, &val[X_ATOM], v, alter);
1236 break;
1237
1238 case BPF_LDX|BPF_MSH|BPF_B:
1239 v = F(opt_state, s->code, s->k, 0L);
1240 vstore(s, &val[X_ATOM], v, alter);
1241 break;
1242
1243 case BPF_ALU|BPF_NEG:
1244 if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1245 s->code = BPF_LD|BPF_IMM;
1246 /*
1247 * Do this negation as unsigned arithmetic; that's
1248 * what modern BPF engines do, and it guarantees
1249 * that all possible values can be negated. (Yeah,
1250 * negating 0x80000000, the minimum signed 32-bit
1251 * two's-complement value, results in 0x80000000,
1252 * so it's still negative, but we *should* be doing
1253 * all unsigned arithmetic here, to match what
1254 * modern BPF engines do.)
1255 *
1256 * Express it as 0U - (unsigned value) so that we
1257 * don't get compiler warnings about negating an
1258 * unsigned value and don't get UBSan warnings
1259 * about the result of negating 0x80000000 being
1260 * undefined.
1261 */
1262 s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1263 val[A_ATOM] = K(s->k);
1264 }
1265 else
1266 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1267 break;
1268
1269 case BPF_ALU|BPF_ADD|BPF_K:
1270 case BPF_ALU|BPF_SUB|BPF_K:
1271 case BPF_ALU|BPF_MUL|BPF_K:
1272 case BPF_ALU|BPF_DIV|BPF_K:
1273 case BPF_ALU|BPF_MOD|BPF_K:
1274 case BPF_ALU|BPF_AND|BPF_K:
1275 case BPF_ALU|BPF_OR|BPF_K:
1276 case BPF_ALU|BPF_XOR|BPF_K:
1277 case BPF_ALU|BPF_LSH|BPF_K:
1278 case BPF_ALU|BPF_RSH|BPF_K:
1279 op = BPF_OP(s->code);
1280 if (alter) {
1281 if (s->k == 0) {
1282 /*
1283 * Optimize operations where the constant
1284 * is zero.
1285 *
1286 * Don't optimize away "sub #0"
1287 * as it may be needed later to
1288 * fixup the generated math code.
1289 *
1290 * Fail if we're dividing by zero or taking
1291 * a modulus by zero.
1292 */
1293 if (op == BPF_ADD ||
1294 op == BPF_LSH || op == BPF_RSH ||
1295 op == BPF_OR || op == BPF_XOR) {
1296 s->code = NOP;
1297 break;
1298 }
1299 if (op == BPF_MUL || op == BPF_AND) {
1300 s->code = BPF_LD|BPF_IMM;
1301 val[A_ATOM] = K(s->k);
1302 break;
1303 }
1304 if (op == BPF_DIV)
1305 opt_error(opt_state,
1306 "division by zero");
1307 if (op == BPF_MOD)
1308 opt_error(opt_state,
1309 "modulus by zero");
1310 }
1311 if (opt_state->vmap[val[A_ATOM]].is_const) {
1312 fold_op(opt_state, s, val[A_ATOM], K(s->k));
1313 val[A_ATOM] = K(s->k);
1314 break;
1315 }
1316 }
1317 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1318 break;
1319
1320 case BPF_ALU|BPF_ADD|BPF_X:
1321 case BPF_ALU|BPF_SUB|BPF_X:
1322 case BPF_ALU|BPF_MUL|BPF_X:
1323 case BPF_ALU|BPF_DIV|BPF_X:
1324 case BPF_ALU|BPF_MOD|BPF_X:
1325 case BPF_ALU|BPF_AND|BPF_X:
1326 case BPF_ALU|BPF_OR|BPF_X:
1327 case BPF_ALU|BPF_XOR|BPF_X:
1328 case BPF_ALU|BPF_LSH|BPF_X:
1329 case BPF_ALU|BPF_RSH|BPF_X:
1330 op = BPF_OP(s->code);
1331 if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1332 if (opt_state->vmap[val[A_ATOM]].is_const) {
1333 fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1334 val[A_ATOM] = K(s->k);
1335 }
1336 else {
1337 s->code = BPF_ALU|BPF_K|op;
1338 s->k = opt_state->vmap[val[X_ATOM]].const_val;
1339 if ((op == BPF_LSH || op == BPF_RSH) &&
1340 s->k > 31)
1341 opt_error(opt_state,
1342 "shift by more than 31 bits");
1343 /*
1344 * XXX - optimizer loop detection.
1345 */
1346 opt_state->non_branch_movement_performed = 1;
1347 opt_state->done = 0;
1348 val[A_ATOM] =
1349 F(opt_state, s->code, val[A_ATOM], K(s->k));
1350 }
1351 break;
1352 }
1353 /*
1354 * Check if we're doing something to an accumulator
1355 * that is 0, and simplify. This may not seem like
1356 * much of a simplification but it could open up further
1357 * optimizations.
1358 * XXX We could also check for mul by 1, etc.
1359 */
1360 if (alter && opt_state->vmap[val[A_ATOM]].is_const
1361 && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1362 if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1363 s->code = BPF_MISC|BPF_TXA;
1364 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1365 break;
1366 }
1367 else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1368 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1369 s->code = BPF_LD|BPF_IMM;
1370 s->k = 0;
1371 vstore(s, &val[A_ATOM], K(s->k), alter);
1372 break;
1373 }
1374 else if (op == BPF_NEG) {
1375 s->code = NOP;
1376 break;
1377 }
1378 }
1379 val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1380 break;
1381
1382 case BPF_MISC|BPF_TXA:
1383 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1384 break;
1385
1386 case BPF_LD|BPF_MEM:
1387 v = val[s->k];
1388 if (alter && opt_state->vmap[v].is_const) {
1389 s->code = BPF_LD|BPF_IMM;
1390 s->k = opt_state->vmap[v].const_val;
1391 /*
1392 * XXX - optimizer loop detection.
1393 */
1394 opt_state->non_branch_movement_performed = 1;
1395 opt_state->done = 0;
1396 }
1397 vstore(s, &val[A_ATOM], v, alter);
1398 break;
1399
1400 case BPF_MISC|BPF_TAX:
1401 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1402 break;
1403
1404 case BPF_LDX|BPF_MEM:
1405 v = val[s->k];
1406 if (alter && opt_state->vmap[v].is_const) {
1407 s->code = BPF_LDX|BPF_IMM;
1408 s->k = opt_state->vmap[v].const_val;
1409 /*
1410 * XXX - optimizer loop detection.
1411 */
1412 opt_state->non_branch_movement_performed = 1;
1413 opt_state->done = 0;
1414 }
1415 vstore(s, &val[X_ATOM], v, alter);
1416 break;
1417
1418 case BPF_ST:
1419 vstore(s, &val[s->k], val[A_ATOM], alter);
1420 break;
1421
1422 case BPF_STX:
1423 vstore(s, &val[s->k], val[X_ATOM], alter);
1424 break;
1425 }
1426 }
1427
1428 static void
1429 deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1430 {
1431 register int atom;
1432
1433 atom = atomuse(s);
1434 if (atom >= 0) {
1435 if (atom == AX_ATOM) {
1436 last[X_ATOM] = 0;
1437 last[A_ATOM] = 0;
1438 }
1439 else
1440 last[atom] = 0;
1441 }
1442 atom = atomdef(s);
1443 if (atom >= 0) {
1444 if (last[atom]) {
1445 /*
1446 * XXX - optimizer loop detection.
1447 */
1448 opt_state->non_branch_movement_performed = 1;
1449 opt_state->done = 0;
1450 last[atom]->code = NOP;
1451 }
1452 last[atom] = s;
1453 }
1454 }
1455
1456 static void
1457 opt_deadstores(opt_state_t *opt_state, register struct block *b)
1458 {
1459 register struct slist *s;
1460 register int atom;
1461 struct stmt *last[N_ATOMS];
1462
1463 memset((char *)last, 0, sizeof last);
1464
1465 for (s = b->stmts; s != 0; s = s->next)
1466 deadstmt(opt_state, &s->s, last);
1467 deadstmt(opt_state, &b->s, last);
1468
1469 for (atom = 0; atom < N_ATOMS; ++atom)
1470 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1471 last[atom]->code = NOP;
1472 /*
1473 * XXX - optimizer loop detection.
1474 */
1475 opt_state->non_branch_movement_performed = 1;
1476 opt_state->done = 0;
1477 }
1478 }
1479
1480 static void
1481 opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1482 {
1483 struct slist *s;
1484 struct edge *p;
1485 int i;
1486 bpf_u_int32 aval, xval;
1487
1488 #if 0
1489 for (s = b->stmts; s && s->next; s = s->next)
1490 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1491 do_stmts = 0;
1492 break;
1493 }
1494 #endif
1495
1496 /*
1497 * Initialize the atom values.
1498 */
1499 p = b->in_edges;
1500 if (p == 0) {
1501 /*
1502 * We have no predecessors, so everything is undefined
1503 * upon entry to this block.
1504 */
1505 memset((char *)b->val, 0, sizeof(b->val));
1506 } else {
1507 /*
1508 * Inherit values from our predecessors.
1509 *
1510 * First, get the values from the predecessor along the
1511 * first edge leading to this node.
1512 */
1513 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1514 /*
1515 * Now look at all the other nodes leading to this node.
1516 * If, for the predecessor along that edge, a register
1517 * has a different value from the one we have (i.e.,
1518 * control paths are merging, and the merging paths
1519 * assign different values to that register), give the
1520 * register the undefined value of 0.
1521 */
1522 while ((p = p->next) != NULL) {
1523 for (i = 0; i < N_ATOMS; ++i)
1524 if (b->val[i] != p->pred->val[i])
1525 b->val[i] = 0;
1526 }
1527 }
1528 aval = b->val[A_ATOM];
1529 xval = b->val[X_ATOM];
1530 for (s = b->stmts; s; s = s->next)
1531 opt_stmt(opt_state, &s->s, b->val, do_stmts);
1532
1533 /*
1534 * This is a special case: if we don't use anything from this
1535 * block, and we load the accumulator or index register with a
1536 * value that is already there, or if this block is a return,
1537 * eliminate all the statements.
1538 *
1539 * XXX - what if it does a store? Presumably that falls under
1540 * the heading of "if we don't use anything from this block",
1541 * i.e., if we use any memory location set to a different
1542 * value by this block, then we use something from this block.
1543 *
1544 * XXX - why does it matter whether we use anything from this
1545 * block? If the accumulator or index register doesn't change
1546 * its value, isn't that OK even if we use that value?
1547 *
1548 * XXX - if we load the accumulator with a different value,
1549 * and the block ends with a conditional branch, we obviously
1550 * can't eliminate it, as the branch depends on that value.
1551 * For the index register, the conditional branch only depends
1552 * on the index register value if the test is against the index
1553 * register value rather than a constant; if nothing uses the
1554 * value we put into the index register, and we're not testing
1555 * against the index register's value, and there aren't any
1556 * other problems that would keep us from eliminating this
1557 * block, can we eliminate it?
1558 */
1559 if (do_stmts &&
1560 ((b->out_use == 0 &&
1561 aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1562 xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1563 BPF_CLASS(b->s.code) == BPF_RET)) {
1564 if (b->stmts != 0) {
1565 b->stmts = 0;
1566 /*
1567 * XXX - optimizer loop detection.
1568 */
1569 opt_state->non_branch_movement_performed = 1;
1570 opt_state->done = 0;
1571 }
1572 } else {
1573 opt_peep(opt_state, b);
1574 opt_deadstores(opt_state, b);
1575 }
1576 /*
1577 * Set up values for branch optimizer.
1578 */
1579 if (BPF_SRC(b->s.code) == BPF_K)
1580 b->oval = K(b->s.k);
1581 else
1582 b->oval = b->val[X_ATOM];
1583 b->et.code = b->s.code;
1584 b->ef.code = -b->s.code;
1585 }
1586
1587 /*
1588 * Return true if any register that is used on exit from 'succ', has
1589 * an exit value that is different from the corresponding exit value
1590 * from 'b'.
1591 */
1592 static int
1593 use_conflict(struct block *b, struct block *succ)
1594 {
1595 int atom;
1596 atomset use = succ->out_use;
1597
1598 if (use == 0)
1599 return 0;
1600
1601 for (atom = 0; atom < N_ATOMS; ++atom)
1602 if (ATOMELEM(use, atom))
1603 if (b->val[atom] != succ->val[atom])
1604 return 1;
1605 return 0;
1606 }
1607
1608 /*
1609 * Given a block that is the successor of an edge, and an edge that
1610 * dominates that edge, return either a pointer to a child of that
1611 * block (a block to which that block jumps) if that block is a
1612 * candidate to replace the successor of the latter edge or NULL
1613 * if neither of the children of the first block are candidates.
1614 */
1615 static struct block *
1616 fold_edge(struct block *child, struct edge *ep)
1617 {
1618 int sense;
1619 bpf_u_int32 aval0, aval1, oval0, oval1;
1620 int code = ep->code;
1621
1622 if (code < 0) {
1623 /*
1624 * This edge is a "branch if false" edge.
1625 */
1626 code = -code;
1627 sense = 0;
1628 } else {
1629 /*
1630 * This edge is a "branch if true" edge.
1631 */
1632 sense = 1;
1633 }
1634
1635 /*
1636 * If the opcode for the branch at the end of the block we
1637 * were handed isn't the same as the opcode for the branch
1638 * to which the edge we were handed corresponds, the tests
1639 * for those branches aren't testing the same conditions,
1640 * so the blocks to which the first block branches aren't
1641 * candidates to replace the successor of the edge.
1642 */
1643 if (child->s.code != code)
1644 return 0;
1645
1646 aval0 = child->val[A_ATOM];
1647 oval0 = child->oval;
1648 aval1 = ep->pred->val[A_ATOM];
1649 oval1 = ep->pred->oval;
1650
1651 /*
1652 * If the A register value on exit from the successor block
1653 * isn't the same as the A register value on exit from the
1654 * predecessor of the edge, the blocks to which the first
1655 * block branches aren't candidates to replace the successor
1656 * of the edge.
1657 */
1658 if (aval0 != aval1)
1659 return 0;
1660
1661 if (oval0 == oval1)
1662 /*
1663 * The operands of the branch instructions are
1664 * identical, so the branches are testing the
1665 * same condition, and the result is true if a true
1666 * branch was taken to get here, otherwise false.
1667 */
1668 return sense ? JT(child) : JF(child);
1669
1670 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1671 /*
1672 * At this point, we only know the comparison if we
1673 * came down the true branch, and it was an equality
1674 * comparison with a constant.
1675 *
1676 * I.e., if we came down the true branch, and the branch
1677 * was an equality comparison with a constant, we know the
1678 * accumulator contains that constant. If we came down
1679 * the false branch, or the comparison wasn't with a
1680 * constant, we don't know what was in the accumulator.
1681 *
1682 * We rely on the fact that distinct constants have distinct
1683 * value numbers.
1684 */
1685 return JF(child);
1686
1687 return 0;
1688 }
1689
1690 /*
1691 * If we can make this edge go directly to a child of the edge's current
1692 * successor, do so.
1693 */
1694 static void
1695 opt_j(opt_state_t *opt_state, struct edge *ep)
1696 {
1697 register u_int i, k;
1698 register struct block *target;
1699
1700 /*
1701 * Does this edge go to a block where, if the test
1702 * at the end of it succeeds, it goes to a block
1703 * that's a leaf node of the DAG, i.e. a return
1704 * statement?
1705 * If so, there's nothing to optimize.
1706 */
1707 if (JT(ep->succ) == 0)
1708 return;
1709
1710 /*
1711 * Does this edge go to a block that goes, in turn, to
1712 * the same block regardless of whether the test at the
1713 * end succeeds or fails?
1714 */
1715 if (JT(ep->succ) == JF(ep->succ)) {
1716 /*
1717 * Common branch targets can be eliminated, provided
1718 * there is no data dependency.
1719 *
1720 * Check whether any register used on exit from the
1721 * block to which the successor of this edge goes
1722 * has a value at that point that's different from
1723 * the value it has on exit from the predecessor of
1724 * this edge. If not, the predecessor of this edge
1725 * can just go to the block to which the successor
1726 * of this edge goes, bypassing the successor of this
1727 * edge, as the successor of this edge isn't doing
1728 * any calculations whose results are different
1729 * from what the blocks before it did and isn't
1730 * doing any tests the results of which matter.
1731 */
1732 if (!use_conflict(ep->pred, JT(ep->succ))) {
1733 /*
1734 * No, there isn't.
1735 * Make this edge go to the block to
1736 * which the successor of that edge
1737 * goes.
1738 *
1739 * XXX - optimizer loop detection.
1740 */
1741 opt_state->non_branch_movement_performed = 1;
1742 opt_state->done = 0;
1743 ep->succ = JT(ep->succ);
1744 }
1745 }
1746 /*
1747 * For each edge dominator that matches the successor of this
1748 * edge, promote the edge successor to the its grandchild.
1749 *
1750 * XXX We violate the set abstraction here in favor a reasonably
1751 * efficient loop.
1752 */
1753 top:
1754 for (i = 0; i < opt_state->edgewords; ++i) {
1755 /* i'th word in the bitset of dominators */
1756 register bpf_u_int32 x = ep->edom[i];
1757
1758 while (x != 0) {
1759 /* Find the next dominator in that word and mark it as found */
1760 k = lowest_set_bit(x);
1761 x &=~ ((bpf_u_int32)1 << k);
1762 k += i * BITS_PER_WORD;
1763
1764 target = fold_edge(ep->succ, opt_state->edges[k]);
1765 /*
1766 * We have a candidate to replace the successor
1767 * of ep.
1768 *
1769 * Check that there is no data dependency between
1770 * nodes that will be violated if we move the edge;
1771 * i.e., if any register used on exit from the
1772 * candidate has a value at that point different
1773 * from the value it has when we exit the
1774 * predecessor of that edge, there's a data
1775 * dependency that will be violated.
1776 */
1777 if (target != 0 && !use_conflict(ep->pred, target)) {
1778 /*
1779 * It's safe to replace the successor of
1780 * ep; do so, and note that we've made
1781 * at least one change.
1782 *
1783 * XXX - this is one of the operations that
1784 * happens when the optimizer gets into
1785 * one of those infinite loops.
1786 */
1787 opt_state->done = 0;
1788 ep->succ = target;
1789 if (JT(target) != 0)
1790 /*
1791 * Start over unless we hit a leaf.
1792 */
1793 goto top;
1794 return;
1795 }
1796 }
1797 }
1798 }
1799
1800 /*
1801 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1802 * "Predicate Assertion Propagation" in the BPF+ paper?
1803 *
1804 * Note that this looks at block dominators, not edge dominators.
1805 * Don't think so.
1806 *
1807 * "A or B" compiles into
1808 *
1809 * A
1810 * t / \ f
1811 * / B
1812 * / t / \ f
1813 * \ /
1814 * \ /
1815 * X
1816 *
1817 *
1818 */
1819 static void
1820 or_pullup(opt_state_t *opt_state, struct block *b)
1821 {
1822 bpf_u_int32 val;
1823 int at_top;
1824 struct block *pull;
1825 struct block **diffp, **samep;
1826 struct edge *ep;
1827
1828 ep = b->in_edges;
1829 if (ep == 0)
1830 return;
1831
1832 /*
1833 * Make sure each predecessor loads the same value.
1834 * XXX why?
1835 */
1836 val = ep->pred->val[A_ATOM];
1837 for (ep = ep->next; ep != 0; ep = ep->next)
1838 if (val != ep->pred->val[A_ATOM])
1839 return;
1840
1841 /*
1842 * For the first edge in the list of edges coming into this block,
1843 * see whether the predecessor of that edge comes here via a true
1844 * branch or a false branch.
1845 */
1846 if (JT(b->in_edges->pred) == b)
1847 diffp = &JT(b->in_edges->pred); /* jt */
1848 else
1849 diffp = &JF(b->in_edges->pred); /* jf */
1850
1851 /*
1852 * diffp is a pointer to a pointer to the block.
1853 *
1854 * Go down the false chain looking as far as you can,
1855 * making sure that each jump-compare is doing the
1856 * same as the original block.
1857 *
1858 * If you reach the bottom before you reach a
1859 * different jump-compare, just exit. There's nothing
1860 * to do here. XXX - no, this version is checking for
1861 * the value leaving the block; that's from the BPF+
1862 * pullup routine.
1863 */
1864 at_top = 1;
1865 for (;;) {
1866 /*
1867 * Done if that's not going anywhere XXX
1868 */
1869 if (*diffp == 0)
1870 return;
1871
1872 /*
1873 * Done if that predecessor blah blah blah isn't
1874 * going the same place we're going XXX
1875 *
1876 * Does the true edge of this block point to the same
1877 * location as the true edge of b?
1878 */
1879 if (JT(*diffp) != JT(b))
1880 return;
1881
1882 /*
1883 * Done if this node isn't a dominator of that
1884 * node blah blah blah XXX
1885 *
1886 * Does b dominate diffp?
1887 */
1888 if (!SET_MEMBER((*diffp)->dom, b->id))
1889 return;
1890
1891 /*
1892 * Break out of the loop if that node's value of A
1893 * isn't the value of A above XXX
1894 */
1895 if ((*diffp)->val[A_ATOM] != val)
1896 break;
1897
1898 /*
1899 * Get the JF for that node XXX
1900 * Go down the false path.
1901 */
1902 diffp = &JF(*diffp);
1903 at_top = 0;
1904 }
1905
1906 /*
1907 * Now that we've found a different jump-compare in a chain
1908 * below b, search further down until we find another
1909 * jump-compare that looks at the original value. This
1910 * jump-compare should get pulled up. XXX again we're
1911 * comparing values not jump-compares.
1912 */
1913 samep = &JF(*diffp);
1914 for (;;) {
1915 /*
1916 * Done if that's not going anywhere XXX
1917 */
1918 if (*samep == 0)
1919 return;
1920
1921 /*
1922 * Done if that predecessor blah blah blah isn't
1923 * going the same place we're going XXX
1924 */
1925 if (JT(*samep) != JT(b))
1926 return;
1927
1928 /*
1929 * Done if this node isn't a dominator of that
1930 * node blah blah blah XXX
1931 *
1932 * Does b dominate samep?
1933 */
1934 if (!SET_MEMBER((*samep)->dom, b->id))
1935 return;
1936
1937 /*
1938 * Break out of the loop if that node's value of A
1939 * is the value of A above XXX
1940 */
1941 if ((*samep)->val[A_ATOM] == val)
1942 break;
1943
1944 /* XXX Need to check that there are no data dependencies
1945 between dp0 and dp1. Currently, the code generator
1946 will not produce such dependencies. */
1947 samep = &JF(*samep);
1948 }
1949 #ifdef notdef
1950 /* XXX This doesn't cover everything. */
1951 for (i = 0; i < N_ATOMS; ++i)
1952 if ((*samep)->val[i] != pred->val[i])
1953 return;
1954 #endif
1955 /* Pull up the node. */
1956 pull = *samep;
1957 *samep = JF(pull);
1958 JF(pull) = *diffp;
1959
1960 /*
1961 * At the top of the chain, each predecessor needs to point at the
1962 * pulled up node. Inside the chain, there is only one predecessor
1963 * to worry about.
1964 */
1965 if (at_top) {
1966 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1967 if (JT(ep->pred) == b)
1968 JT(ep->pred) = pull;
1969 else
1970 JF(ep->pred) = pull;
1971 }
1972 }
1973 else
1974 *diffp = pull;
1975
1976 /*
1977 * XXX - this is one of the operations that happens when the
1978 * optimizer gets into one of those infinite loops.
1979 */
1980 opt_state->done = 0;
1981 }
1982
1983 static void
1984 and_pullup(opt_state_t *opt_state, struct block *b)
1985 {
1986 bpf_u_int32 val;
1987 int at_top;
1988 struct block *pull;
1989 struct block **diffp, **samep;
1990 struct edge *ep;
1991
1992 ep = b->in_edges;
1993 if (ep == 0)
1994 return;
1995
1996 /*
1997 * Make sure each predecessor loads the same value.
1998 */
1999 val = ep->pred->val[A_ATOM];
2000 for (ep = ep->next; ep != 0; ep = ep->next)
2001 if (val != ep->pred->val[A_ATOM])
2002 return;
2003
2004 if (JT(b->in_edges->pred) == b)
2005 diffp = &JT(b->in_edges->pred);
2006 else
2007 diffp = &JF(b->in_edges->pred);
2008
2009 at_top = 1;
2010 for (;;) {
2011 if (*diffp == 0)
2012 return;
2013
2014 if (JF(*diffp) != JF(b))
2015 return;
2016
2017 if (!SET_MEMBER((*diffp)->dom, b->id))
2018 return;
2019
2020 if ((*diffp)->val[A_ATOM] != val)
2021 break;
2022
2023 diffp = &JT(*diffp);
2024 at_top = 0;
2025 }
2026 samep = &JT(*diffp);
2027 for (;;) {
2028 if (*samep == 0)
2029 return;
2030
2031 if (JF(*samep) != JF(b))
2032 return;
2033
2034 if (!SET_MEMBER((*samep)->dom, b->id))
2035 return;
2036
2037 if ((*samep)->val[A_ATOM] == val)
2038 break;
2039
2040 /* XXX Need to check that there are no data dependencies
2041 between diffp and samep. Currently, the code generator
2042 will not produce such dependencies. */
2043 samep = &JT(*samep);
2044 }
2045 #ifdef notdef
2046 /* XXX This doesn't cover everything. */
2047 for (i = 0; i < N_ATOMS; ++i)
2048 if ((*samep)->val[i] != pred->val[i])
2049 return;
2050 #endif
2051 /* Pull up the node. */
2052 pull = *samep;
2053 *samep = JT(pull);
2054 JT(pull) = *diffp;
2055
2056 /*
2057 * At the top of the chain, each predecessor needs to point at the
2058 * pulled up node. Inside the chain, there is only one predecessor
2059 * to worry about.
2060 */
2061 if (at_top) {
2062 for (ep = b->in_edges; ep != 0; ep = ep->next) {
2063 if (JT(ep->pred) == b)
2064 JT(ep->pred) = pull;
2065 else
2066 JF(ep->pred) = pull;
2067 }
2068 }
2069 else
2070 *diffp = pull;
2071
2072 /*
2073 * XXX - this is one of the operations that happens when the
2074 * optimizer gets into one of those infinite loops.
2075 */
2076 opt_state->done = 0;
2077 }
2078
2079 static void
2080 opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2081 {
2082 int i, maxlevel;
2083 struct block *p;
2084
2085 init_val(opt_state);
2086 maxlevel = ic->root->level;
2087
2088 find_inedges(opt_state, ic->root);
2089 for (i = maxlevel; i >= 0; --i)
2090 for (p = opt_state->levels[i]; p; p = p->link)
2091 opt_blk(opt_state, p, do_stmts);
2092
2093 if (do_stmts)
2094 /*
2095 * No point trying to move branches; it can't possibly
2096 * make a difference at this point.
2097 *
2098 * XXX - this might be after we detect a loop where
2099 * we were just looping infinitely moving branches
2100 * in such a fashion that we went through two or more
2101 * versions of the machine code, eventually returning
2102 * to the first version. (We're really not doing a
2103 * full loop detection, we're just testing for two
2104 * passes in a row where we do nothing but
2105 * move branches.)
2106 */
2107 return;
2108
2109 /*
2110 * Is this what the BPF+ paper describes in sections 6.1.1,
2111 * 6.1.2, and 6.1.3?
2112 */
2113 for (i = 1; i <= maxlevel; ++i) {
2114 for (p = opt_state->levels[i]; p; p = p->link) {
2115 opt_j(opt_state, &p->et);
2116 opt_j(opt_state, &p->ef);
2117 }
2118 }
2119
2120 find_inedges(opt_state, ic->root);
2121 for (i = 1; i <= maxlevel; ++i) {
2122 for (p = opt_state->levels[i]; p; p = p->link) {
2123 or_pullup(opt_state, p);
2124 and_pullup(opt_state, p);
2125 }
2126 }
2127 }
2128
2129 static inline void
2130 link_inedge(struct edge *parent, struct block *child)
2131 {
2132 parent->next = child->in_edges;
2133 child->in_edges = parent;
2134 }
2135
2136 static void
2137 find_inedges(opt_state_t *opt_state, struct block *root)
2138 {
2139 u_int i;
2140 int level;
2141 struct block *b;
2142
2143 for (i = 0; i < opt_state->n_blocks; ++i)
2144 opt_state->blocks[i]->in_edges = 0;
2145
2146 /*
2147 * Traverse the graph, adding each edge to the predecessor
2148 * list of its successors. Skip the leaves (i.e. level 0).
2149 */
2150 for (level = root->level; level > 0; --level) {
2151 for (b = opt_state->levels[level]; b != 0; b = b->link) {
2152 link_inedge(&b->et, JT(b));
2153 link_inedge(&b->ef, JF(b));
2154 }
2155 }
2156 }
2157
2158 static void
2159 opt_root(struct block **b)
2160 {
2161 struct slist *tmp, *s;
2162
2163 s = (*b)->stmts;
2164 (*b)->stmts = 0;
2165 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2166 *b = JT(*b);
2167
2168 tmp = (*b)->stmts;
2169 if (tmp != 0)
2170 sappend(s, tmp);
2171 (*b)->stmts = s;
2172
2173 /*
2174 * If the root node is a return, then there is no
2175 * point executing any statements (since the bpf machine
2176 * has no side effects).
2177 */
2178 if (BPF_CLASS((*b)->s.code) == BPF_RET)
2179 (*b)->stmts = 0;
2180 }
2181
2182 static void
2183 opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2184 {
2185
2186 #ifdef BDEBUG
2187 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2188 printf("opt_loop(root, %d) begin\n", do_stmts);
2189 opt_dump(opt_state, ic);
2190 }
2191 #endif
2192
2193 /*
2194 * XXX - optimizer loop detection.
2195 */
2196 int loop_count = 0;
2197 for (;;) {
2198 opt_state->done = 1;
2199 /*
2200 * XXX - optimizer loop detection.
2201 */
2202 opt_state->non_branch_movement_performed = 0;
2203 find_levels(opt_state, ic);
2204 find_dom(opt_state, ic->root);
2205 find_closure(opt_state, ic->root);
2206 find_ud(opt_state, ic->root);
2207 find_edom(opt_state, ic->root);
2208 opt_blks(opt_state, ic, do_stmts);
2209 #ifdef BDEBUG
2210 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2211 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2212 opt_dump(opt_state, ic);
2213 }
2214 #endif
2215
2216 /*
2217 * Was anything done in this optimizer pass?
2218 */
2219 if (opt_state->done) {
2220 /*
2221 * No, so we've reached a fixed point.
2222 * We're done.
2223 */
2224 break;
2225 }
2226
2227 /*
2228 * XXX - was anything done other than branch movement
2229 * in this pass?
2230 */
2231 if (opt_state->non_branch_movement_performed) {
2232 /*
2233 * Yes. Clear any loop-detection counter;
2234 * we're making some form of progress (assuming
2235 * we can't get into a cycle doing *other*
2236 * optimizations...).
2237 */
2238 loop_count = 0;
2239 } else {
2240 /*
2241 * No - increment the counter, and quit if
2242 * it's up to 100.
2243 */
2244 loop_count++;
2245 if (loop_count >= 100) {
2246 /*
2247 * We've done nothing but branch movement
2248 * for 100 passes; we're probably
2249 * in a cycle and will never reach a
2250 * fixed point.
2251 *
2252 * XXX - yes, we really need a non-
2253 * heuristic way of detecting a cycle.
2254 */
2255 opt_state->done = 1;
2256 break;
2257 }
2258 }
2259 }
2260 }
2261
2262 /*
2263 * Optimize the filter code in its dag representation.
2264 * Return 0 on success, -1 on error.
2265 */
2266 int
2267 bpf_optimize(struct icode *ic, char *errbuf)
2268 {
2269 opt_state_t opt_state;
2270
2271 memset(&opt_state, 0, sizeof(opt_state));
2272 opt_state.errbuf = errbuf;
2273 opt_state.non_branch_movement_performed = 0;
2274 if (setjmp(opt_state.top_ctx)) {
2275 opt_cleanup(&opt_state);
2276 return -1;
2277 }
2278 opt_init(&opt_state, ic);
2279 opt_loop(&opt_state, ic, 0);
2280 opt_loop(&opt_state, ic, 1);
2281 intern_blocks(&opt_state, ic);
2282 #ifdef BDEBUG
2283 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2284 printf("after intern_blocks()\n");
2285 opt_dump(&opt_state, ic);
2286 }
2287 #endif
2288 opt_root(&ic->root);
2289 #ifdef BDEBUG
2290 if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2291 printf("after opt_root()\n");
2292 opt_dump(&opt_state, ic);
2293 }
2294 #endif
2295 opt_cleanup(&opt_state);
2296 return 0;
2297 }
2298
2299 static void
2300 make_marks(struct icode *ic, struct block *p)
2301 {
2302 if (!isMarked(ic, p)) {
2303 Mark(ic, p);
2304 if (BPF_CLASS(p->s.code) != BPF_RET) {
2305 make_marks(ic, JT(p));
2306 make_marks(ic, JF(p));
2307 }
2308 }
2309 }
2310
2311 /*
2312 * Mark code array such that isMarked(ic->cur_mark, i) is true
2313 * only for nodes that are alive.
2314 */
2315 static void
2316 mark_code(struct icode *ic)
2317 {
2318 ic->cur_mark += 1;
2319 make_marks(ic, ic->root);
2320 }
2321
2322 /*
2323 * True iff the two stmt lists load the same value from the packet into
2324 * the accumulator.
2325 */
2326 static int
2327 eq_slist(struct slist *x, struct slist *y)
2328 {
2329 for (;;) {
2330 while (x && x->s.code == NOP)
2331 x = x->next;
2332 while (y && y->s.code == NOP)
2333 y = y->next;
2334 if (x == 0)
2335 return y == 0;
2336 if (y == 0)
2337 return x == 0;
2338 if (x->s.code != y->s.code || x->s.k != y->s.k)
2339 return 0;
2340 x = x->next;
2341 y = y->next;
2342 }
2343 }
2344
2345 static inline int
2346 eq_blk(struct block *b0, struct block *b1)
2347 {
2348 if (b0->s.code == b1->s.code &&
2349 b0->s.k == b1->s.k &&
2350 b0->et.succ == b1->et.succ &&
2351 b0->ef.succ == b1->ef.succ)
2352 return eq_slist(b0->stmts, b1->stmts);
2353 return 0;
2354 }
2355
2356 static void
2357 intern_blocks(opt_state_t *opt_state, struct icode *ic)
2358 {
2359 struct block *p;
2360 u_int i, j;
2361 int done1; /* don't shadow global */
2362 top:
2363 done1 = 1;
2364 for (i = 0; i < opt_state->n_blocks; ++i)
2365 opt_state->blocks[i]->link = 0;
2366
2367 mark_code(ic);
2368
2369 for (i = opt_state->n_blocks - 1; i != 0; ) {
2370 --i;
2371 if (!isMarked(ic, opt_state->blocks[i]))
2372 continue;
2373 for (j = i + 1; j < opt_state->n_blocks; ++j) {
2374 if (!isMarked(ic, opt_state->blocks[j]))
2375 continue;
2376 if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2377 opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2378 opt_state->blocks[j]->link : opt_state->blocks[j];
2379 break;
2380 }
2381 }
2382 }
2383 for (i = 0; i < opt_state->n_blocks; ++i) {
2384 p = opt_state->blocks[i];
2385 if (JT(p) == 0)
2386 continue;
2387 if (JT(p)->link) {
2388 done1 = 0;
2389 JT(p) = JT(p)->link;
2390 }
2391 if (JF(p)->link) {
2392 done1 = 0;
2393 JF(p) = JF(p)->link;
2394 }
2395 }
2396 if (!done1)
2397 goto top;
2398 }
2399
2400 static void
2401 opt_cleanup(opt_state_t *opt_state)
2402 {
2403 free((void *)opt_state->vnode_base);
2404 free((void *)opt_state->vmap);
2405 free((void *)opt_state->edges);
2406 free((void *)opt_state->space);
2407 free((void *)opt_state->levels);
2408 free((void *)opt_state->blocks);
2409 }
2410
2411 /*
2412 * For optimizer errors.
2413 */
2414 static void PCAP_NORETURN
2415 opt_error(opt_state_t *opt_state, const char *fmt, ...)
2416 {
2417 va_list ap;
2418
2419 if (opt_state->errbuf != NULL) {
2420 va_start(ap, fmt);
2421 (void)vsnprintf(opt_state->errbuf,
2422 PCAP_ERRBUF_SIZE, fmt, ap);
2423 va_end(ap);
2424 }
2425 longjmp(opt_state->top_ctx, 1);
2426 /* NOTREACHED */
2427 #ifdef _AIX
2428 PCAP_UNREACHABLE
2429 #endif /* _AIX */
2430 }
2431
2432 /*
2433 * Return the number of stmts in 's'.
2434 */
2435 static u_int
2436 slength(struct slist *s)
2437 {
2438 u_int n = 0;
2439
2440 for (; s; s = s->next)
2441 if (s->s.code != NOP)
2442 ++n;
2443 return n;
2444 }
2445
2446 /*
2447 * Return the number of nodes reachable by 'p'.
2448 * All nodes should be initially unmarked.
2449 */
2450 static int
2451 count_blocks(struct icode *ic, struct block *p)
2452 {
2453 if (p == 0 || isMarked(ic, p))
2454 return 0;
2455 Mark(ic, p);
2456 return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2457 }
2458
2459 /*
2460 * Do a depth first search on the flow graph, numbering the
2461 * the basic blocks, and entering them into the 'blocks' array.`
2462 */
2463 static void
2464 number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2465 {
2466 u_int n;
2467
2468 if (p == 0 || isMarked(ic, p))
2469 return;
2470
2471 Mark(ic, p);
2472 n = opt_state->n_blocks++;
2473 if (opt_state->n_blocks == 0) {
2474 /*
2475 * Overflow.
2476 */
2477 opt_error(opt_state, "filter is too complex to optimize");
2478 }
2479 p->id = n;
2480 opt_state->blocks[n] = p;
2481
2482 number_blks_r(opt_state, ic, JT(p));
2483 number_blks_r(opt_state, ic, JF(p));
2484 }
2485
2486 /*
2487 * Return the number of stmts in the flowgraph reachable by 'p'.
2488 * The nodes should be unmarked before calling.
2489 *
2490 * Note that "stmts" means "instructions", and that this includes
2491 *
2492 * side-effect statements in 'p' (slength(p->stmts));
2493 *
2494 * statements in the true branch from 'p' (count_stmts(JT(p)));
2495 *
2496 * statements in the false branch from 'p' (count_stmts(JF(p)));
2497 *
2498 * the conditional jump itself (1);
2499 *
2500 * an extra long jump if the true branch requires it (p->longjt);
2501 *
2502 * an extra long jump if the false branch requires it (p->longjf).
2503 */
2504 static u_int
2505 count_stmts(struct icode *ic, struct block *p)
2506 {
2507 u_int n;
2508
2509 if (p == 0 || isMarked(ic, p))
2510 return 0;
2511 Mark(ic, p);
2512 n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2513 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2514 }
2515
2516 /*
2517 * Allocate memory. All allocation is done before optimization
2518 * is begun. A linear bound on the size of all data structures is computed
2519 * from the total number of blocks and/or statements.
2520 */
2521 static void
2522 opt_init(opt_state_t *opt_state, struct icode *ic)
2523 {
2524 bpf_u_int32 *p;
2525 int i, n, max_stmts;
2526 u_int product;
2527 size_t block_memsize, edge_memsize;
2528
2529 /*
2530 * First, count the blocks, so we can malloc an array to map
2531 * block number to block. Then, put the blocks into the array.
2532 */
2533 unMarkAll(ic);
2534 n = count_blocks(ic, ic->root);
2535 opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2536 if (opt_state->blocks == NULL)
2537 opt_error(opt_state, "malloc");
2538 unMarkAll(ic);
2539 opt_state->n_blocks = 0;
2540 number_blks_r(opt_state, ic, ic->root);
2541
2542 /*
2543 * This "should not happen".
2544 */
2545 if (opt_state->n_blocks == 0)
2546 opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2547
2548 opt_state->n_edges = 2 * opt_state->n_blocks;
2549 if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2550 /*
2551 * Overflow.
2552 */
2553 opt_error(opt_state, "filter is too complex to optimize");
2554 }
2555 opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2556 if (opt_state->edges == NULL) {
2557 opt_error(opt_state, "malloc");
2558 }
2559
2560 /*
2561 * The number of levels is bounded by the number of nodes.
2562 */
2563 opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2564 if (opt_state->levels == NULL) {
2565 opt_error(opt_state, "malloc");
2566 }
2567
2568 opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2569 opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2570
2571 /*
2572 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2573 * in a u_int; we use it as a u_int number-of-iterations
2574 * value.
2575 */
2576 product = opt_state->n_blocks * opt_state->nodewords;
2577 if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2578 /*
2579 * XXX - just punt and don't try to optimize?
2580 * In practice, this is unlikely to happen with
2581 * a normal filter.
2582 */
2583 opt_error(opt_state, "filter is too complex to optimize");
2584 }
2585
2586 /*
2587 * Make sure the total memory required for that doesn't
2588 * overflow.
2589 */
2590 block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2591 if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2592 opt_error(opt_state, "filter is too complex to optimize");
2593 }
2594
2595 /*
2596 * Make sure opt_state->n_edges * opt_state->edgewords fits
2597 * in a u_int; we use it as a u_int number-of-iterations
2598 * value.
2599 */
2600 product = opt_state->n_edges * opt_state->edgewords;
2601 if ((product / opt_state->n_edges) != opt_state->edgewords) {
2602 opt_error(opt_state, "filter is too complex to optimize");
2603 }
2604
2605 /*
2606 * Make sure the total memory required for that doesn't
2607 * overflow.
2608 */
2609 edge_memsize = (size_t)product * sizeof(*opt_state->space);
2610 if (edge_memsize / product != sizeof(*opt_state->space)) {
2611 opt_error(opt_state, "filter is too complex to optimize");
2612 }
2613
2614 /*
2615 * Make sure the total memory required for both of them doesn't
2616 * overflow.
2617 */
2618 if (block_memsize > SIZE_MAX - edge_memsize) {
2619 opt_error(opt_state, "filter is too complex to optimize");
2620 }
2621
2622 /* XXX */
2623 opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2624 if (opt_state->space == NULL) {
2625 opt_error(opt_state, "malloc");
2626 }
2627 p = opt_state->space;
2628 opt_state->all_dom_sets = p;
2629 for (i = 0; i < n; ++i) {
2630 opt_state->blocks[i]->dom = p;
2631 p += opt_state->nodewords;
2632 }
2633 opt_state->all_closure_sets = p;
2634 for (i = 0; i < n; ++i) {
2635 opt_state->blocks[i]->closure = p;
2636 p += opt_state->nodewords;
2637 }
2638 opt_state->all_edge_sets = p;
2639 for (i = 0; i < n; ++i) {
2640 register struct block *b = opt_state->blocks[i];
2641
2642 b->et.edom = p;
2643 p += opt_state->edgewords;
2644 b->ef.edom = p;
2645 p += opt_state->edgewords;
2646 b->et.id = i;
2647 opt_state->edges[i] = &b->et;
2648 b->ef.id = opt_state->n_blocks + i;
2649 opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2650 b->et.pred = b;
2651 b->ef.pred = b;
2652 }
2653 max_stmts = 0;
2654 for (i = 0; i < n; ++i)
2655 max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2656 /*
2657 * We allocate at most 3 value numbers per statement,
2658 * so this is an upper bound on the number of valnodes
2659 * we'll need.
2660 */
2661 opt_state->maxval = 3 * max_stmts;
2662 opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2663 if (opt_state->vmap == NULL) {
2664 opt_error(opt_state, "malloc");
2665 }
2666 opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2667 if (opt_state->vnode_base == NULL) {
2668 opt_error(opt_state, "malloc");
2669 }
2670 }
2671
2672 /*
2673 * This is only used when supporting optimizer debugging. It is
2674 * global state, so do *not* do more than one compile in parallel
2675 * and expect it to provide meaningful information.
2676 */
2677 #ifdef BDEBUG
2678 int bids[NBIDS];
2679 #endif
2680
2681 static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2682 PCAP_PRINTFLIKE(2, 3);
2683
2684 /*
2685 * Returns true if successful. Returns false if a branch has
2686 * an offset that is too large. If so, we have marked that
2687 * branch so that on a subsequent iteration, it will be treated
2688 * properly.
2689 */
2690 static int
2691 convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2692 {
2693 struct bpf_insn *dst;
2694 struct slist *src;
2695 u_int slen;
2696 u_int off;
2697 struct slist **offset = NULL;
2698
2699 if (p == 0 || isMarked(ic, p))
2700 return (1);
2701 Mark(ic, p);
2702
2703 if (convert_code_r(conv_state, ic, JF(p)) == 0)
2704 return (0);
2705 if (convert_code_r(conv_state, ic, JT(p)) == 0)
2706 return (0);
2707
2708 slen = slength(p->stmts);
2709 dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2710 /* inflate length by any extra jumps */
2711
2712 p->offset = (int)(dst - conv_state->fstart);
2713
2714 /* generate offset[] for convenience */
2715 if (slen) {
2716 offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2717 if (!offset) {
2718 conv_error(conv_state, "not enough core");
2719 /*NOTREACHED*/
2720 }
2721 }
2722 src = p->stmts;
2723 for (off = 0; off < slen && src; off++) {
2724 #if 0
2725 printf("off=%d src=%x\n", off, src);
2726 #endif
2727 offset[off] = src;
2728 src = src->next;
2729 }
2730
2731 off = 0;
2732 for (src = p->stmts; src; src = src->next) {
2733 if (src->s.code == NOP)
2734 continue;
2735 dst->code = (u_short)src->s.code;
2736 dst->k = src->s.k;
2737
2738 /* fill block-local relative jump */
2739 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2740 #if 0
2741 if (src->s.jt || src->s.jf) {
2742 free(offset);
2743 conv_error(conv_state, "illegal jmp destination");
2744 /*NOTREACHED*/
2745 }
2746 #endif
2747 goto filled;
2748 }
2749 if (off == slen - 2) /*???*/
2750 goto filled;
2751
2752 {
2753 u_int i;
2754 int jt, jf;
2755 const char ljerr[] = "%s for block-local relative jump: off=%d";
2756
2757 #if 0
2758 printf("code=%x off=%d %x %x\n", src->s.code,
2759 off, src->s.jt, src->s.jf);
2760 #endif
2761
2762 if (!src->s.jt || !src->s.jf) {
2763 free(offset);
2764 conv_error(conv_state, ljerr, "no jmp destination", off);
2765 /*NOTREACHED*/
2766 }
2767
2768 jt = jf = 0;
2769 for (i = 0; i < slen; i++) {
2770 if (offset[i] == src->s.jt) {
2771 if (jt) {
2772 free(offset);
2773 conv_error(conv_state, ljerr, "multiple matches", off);
2774 /*NOTREACHED*/
2775 }
2776
2777 if (i - off - 1 >= 256) {
2778 free(offset);
2779 conv_error(conv_state, ljerr, "out-of-range jump", off);
2780 /*NOTREACHED*/
2781 }
2782 dst->jt = (u_char)(i - off - 1);
2783 jt++;
2784 }
2785 if (offset[i] == src->s.jf) {
2786 if (jf) {
2787 free(offset);
2788 conv_error(conv_state, ljerr, "multiple matches", off);
2789 /*NOTREACHED*/
2790 }
2791 if (i - off - 1 >= 256) {
2792 free(offset);
2793 conv_error(conv_state, ljerr, "out-of-range jump", off);
2794 /*NOTREACHED*/
2795 }
2796 dst->jf = (u_char)(i - off - 1);
2797 jf++;
2798 }
2799 }
2800 if (!jt || !jf) {
2801 free(offset);
2802 conv_error(conv_state, ljerr, "no destination found", off);
2803 /*NOTREACHED*/
2804 }
2805 }
2806 filled:
2807 ++dst;
2808 ++off;
2809 }
2810 if (offset)
2811 free(offset);
2812
2813 #ifdef BDEBUG
2814 if (dst - conv_state->fstart < NBIDS)
2815 bids[dst - conv_state->fstart] = p->id + 1;
2816 #endif
2817 dst->code = (u_short)p->s.code;
2818 dst->k = p->s.k;
2819 if (JT(p)) {
2820 /* number of extra jumps inserted */
2821 u_char extrajmps = 0;
2822 off = JT(p)->offset - (p->offset + slen) - 1;
2823 if (off >= 256) {
2824 /* offset too large for branch, must add a jump */
2825 if (p->longjt == 0) {
2826 /* mark this instruction and retry */
2827 p->longjt++;
2828 return(0);
2829 }
2830 dst->jt = extrajmps;
2831 extrajmps++;
2832 dst[extrajmps].code = BPF_JMP|BPF_JA;
2833 dst[extrajmps].k = off - extrajmps;
2834 }
2835 else
2836 dst->jt = (u_char)off;
2837 off = JF(p)->offset - (p->offset + slen) - 1;
2838 if (off >= 256) {
2839 /* offset too large for branch, must add a jump */
2840 if (p->longjf == 0) {
2841 /* mark this instruction and retry */
2842 p->longjf++;
2843 return(0);
2844 }
2845 /* branch if F to following jump */
2846 /* if two jumps are inserted, F goes to second one */
2847 dst->jf = extrajmps;
2848 extrajmps++;
2849 dst[extrajmps].code = BPF_JMP|BPF_JA;
2850 dst[extrajmps].k = off - extrajmps;
2851 }
2852 else
2853 dst->jf = (u_char)off;
2854 }
2855 return (1);
2856 }
2857
2858
2859 /*
2860 * Convert flowgraph intermediate representation to the
2861 * BPF array representation. Set *lenp to the number of instructions.
2862 *
2863 * This routine does *NOT* leak the memory pointed to by fp. It *must
2864 * not* do free(fp) before returning fp; doing so would make no sense,
2865 * as the BPF array pointed to by the return value of icode_to_fcode()
2866 * must be valid - it's being returned for use in a bpf_program structure.
2867 *
2868 * If it appears that icode_to_fcode() is leaking, the problem is that
2869 * the program using pcap_compile() is failing to free the memory in
2870 * the BPF program when it's done - the leak is in the program, not in
2871 * the routine that happens to be allocating the memory. (By analogy, if
2872 * a program calls fopen() without ever calling fclose() on the FILE *,
2873 * it will leak the FILE structure; the leak is not in fopen(), it's in
2874 * the program.) Change the program to use pcap_freecode() when it's
2875 * done with the filter program. See the pcap man page.
2876 */
2877 struct bpf_insn *
2878 icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2879 char *errbuf)
2880 {
2881 u_int n;
2882 struct bpf_insn *fp;
2883 conv_state_t conv_state;
2884
2885 conv_state.fstart = NULL;
2886 conv_state.errbuf = errbuf;
2887 if (setjmp(conv_state.top_ctx) != 0) {
2888 free(conv_state.fstart);
2889 return NULL;
2890 }
2891
2892 /*
2893 * Loop doing convert_code_r() until no branches remain
2894 * with too-large offsets.
2895 */
2896 for (;;) {
2897 unMarkAll(ic);
2898 n = *lenp = count_stmts(ic, root);
2899
2900 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2901 if (fp == NULL) {
2902 (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2903 "malloc");
2904 return NULL;
2905 }
2906 memset((char *)fp, 0, sizeof(*fp) * n);
2907 conv_state.fstart = fp;
2908 conv_state.ftail = fp + n;
2909
2910 unMarkAll(ic);
2911 if (convert_code_r(&conv_state, ic, root))
2912 break;
2913 free(fp);
2914 }
2915
2916 return fp;
2917 }
2918
2919 /*
2920 * For iconv_to_fconv() errors.
2921 */
2922 static void PCAP_NORETURN
2923 conv_error(conv_state_t *conv_state, const char *fmt, ...)
2924 {
2925 va_list ap;
2926
2927 va_start(ap, fmt);
2928 (void)vsnprintf(conv_state->errbuf,
2929 PCAP_ERRBUF_SIZE, fmt, ap);
2930 va_end(ap);
2931 longjmp(conv_state->top_ctx, 1);
2932 /* NOTREACHED */
2933 #ifdef _AIX
2934 PCAP_UNREACHABLE
2935 #endif /* _AIX */
2936 }
2937
2938 /*
2939 * Make a copy of a BPF program and put it in the "fcode" member of
2940 * a "pcap_t".
2941 *
2942 * If we fail to allocate memory for the copy, fill in the "errbuf"
2943 * member of the "pcap_t" with an error message, and return -1;
2944 * otherwise, return 0.
2945 */
2946 int
2947 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2948 {
2949 size_t prog_size;
2950
2951 /*
2952 * Validate the program.
2953 */
2954 if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2955 snprintf(p->errbuf, sizeof(p->errbuf),
2956 "BPF program is not valid");
2957 return (-1);
2958 }
2959
2960 /*
2961 * Free up any already installed program.
2962 */
2963 pcap_freecode(&p->fcode);
2964
2965 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2966 p->fcode.bf_len = fp->bf_len;
2967 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2968 if (p->fcode.bf_insns == NULL) {
2969 pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2970 errno, "malloc");
2971 return (-1);
2972 }
2973 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2974 return (0);
2975 }
2976
2977 #ifdef BDEBUG
2978 static void
2979 dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2980 FILE *out)
2981 {
2982 int icount, noffset;
2983 int i;
2984
2985 if (block == NULL || isMarked(ic, block))
2986 return;
2987 Mark(ic, block);
2988
2989 icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2990 noffset = min(block->offset + icount, (int)prog->bf_len);
2991
2992 fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2993 for (i = block->offset; i < noffset; i++) {
2994 fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2995 }
2996 fprintf(out, "\" tooltip=\"");
2997 for (i = 0; i < BPF_MEMWORDS; i++)
2998 if (block->val[i] != VAL_UNKNOWN)
2999 fprintf(out, "val[%d]=%d ", i, block->val[i]);
3000 fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
3001 fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3002 fprintf(out, "\"");
3003 if (JT(block) == NULL)
3004 fprintf(out, ", peripheries=2");
3005 fprintf(out, "];\n");
3006
3007 dot_dump_node(ic, JT(block), prog, out);
3008 dot_dump_node(ic, JF(block), prog, out);
3009 }
3010
3011 static void
3012 dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3013 {
3014 if (block == NULL || isMarked(ic, block))
3015 return;
3016 Mark(ic, block);
3017
3018 if (JT(block)) {
3019 fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3020 block->id, JT(block)->id);
3021 fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3022 block->id, JF(block)->id);
3023 }
3024 dot_dump_edge(ic, JT(block), out);
3025 dot_dump_edge(ic, JF(block), out);
3026 }
3027
3028 /* Output the block CFG using graphviz/DOT language
3029 * In the CFG, block's code, value index for each registers at EXIT,
3030 * and the jump relationship is show.
3031 *
3032 * example DOT for BPF `ip src host 1.1.1.1' is:
3033 digraph BPF {
3034 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3035 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3036 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3037 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3038 "block0":se -> "block1":n [label="T"];
3039 "block0":sw -> "block3":n [label="F"];
3040 "block1":se -> "block2":n [label="T"];
3041 "block1":sw -> "block3":n [label="F"];
3042 }
3043 *
3044 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3045 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3046 */
3047 static int
3048 dot_dump(struct icode *ic, char *errbuf)
3049 {
3050 struct bpf_program f;
3051 FILE *out = stdout;
3052
3053 memset(bids, 0, sizeof bids);
3054 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3055 if (f.bf_insns == NULL)
3056 return -1;
3057
3058 fprintf(out, "digraph BPF {\n");
3059 unMarkAll(ic);
3060 dot_dump_node(ic, ic->root, &f, out);
3061 unMarkAll(ic);
3062 dot_dump_edge(ic, ic->root, out);
3063 fprintf(out, "}\n");
3064
3065 free((char *)f.bf_insns);
3066 return 0;
3067 }
3068
3069 static int
3070 plain_dump(struct icode *ic, char *errbuf)
3071 {
3072 struct bpf_program f;
3073
3074 memset(bids, 0, sizeof bids);
3075 f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3076 if (f.bf_insns == NULL)
3077 return -1;
3078 bpf_dump(&f, 1);
3079 putchar('\n');
3080 free((char *)f.bf_insns);
3081 return 0;
3082 }
3083
3084 static void
3085 opt_dump(opt_state_t *opt_state, struct icode *ic)
3086 {
3087 int status;
3088 char errbuf[PCAP_ERRBUF_SIZE];
3089
3090 /*
3091 * If the CFG, in DOT format, is requested, output it rather than
3092 * the code that would be generated from that graph.
3093 */
3094 if (pcap_print_dot_graph)
3095 status = dot_dump(ic, errbuf);
3096 else
3097 status = plain_dump(ic, errbuf);
3098 if (status == -1)
3099 opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3100 }
3101 #endif