2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for BPF code intermediate representation.
28 #include <pcap-types.h>
35 #include <limits.h> /* for SIZE_MAX */
42 #include "diag-control.h"
44 #ifdef HAVE_OS_PROTO_H
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
55 static int pcap_optimizer_debug
;
58 * Routine to set that flag.
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
66 PCAP_API
void pcap_set_optimizer_debug(int value
);
69 pcap_set_optimizer_debug(int value
)
71 pcap_optimizer_debug
= value
;
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
80 static int pcap_print_dot_graph
;
83 * Routine to set that flag.
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
91 PCAP_API
void pcap_set_print_dot_graph(int value
);
94 pcap_set_print_dot_graph(int value
)
96 pcap_print_dot_graph
= value
;
104 * Takes a 32-bit integer as an argument.
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
113 * This is the same as the count of trailing zeroes in the word.
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
117 * GCC 3.4 and later; we have __builtin_ctz().
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
122 * Visual Studio; we support only 2005 and later, so use
128 #pragma intrinsic(_BitScanForward)
131 static __forceinline u_int
132 lowest_set_bit(int mask
)
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
140 if (_BitScanForward(&bit
, (unsigned int)mask
) == 0)
141 abort(); /* mask is zero */
144 #elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
146 * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
147 * or some other platform (UN*X conforming to a sufficient recent version
148 * of the Single UNIX Specification).
151 #define lowest_set_bit(mask) (u_int)((ffs((mask)) - 1))
152 #elif (defined(MSDOS) && defined(__DJGPP__)) || defined(__hpux)
154 * MS-DOS with DJGPP or HP-UX 11i v3, which declare ffs() in <string.h>,
155 * which we've already included. Place this branch after the <strings.h>
156 * branch, in case a later release of HP-UX makes the declaration available
157 * via the standard header.
159 #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
163 * Use a perfect-hash-function-based function.
166 lowest_set_bit(int mask
)
168 unsigned int v
= (unsigned int)mask
;
170 static const u_int MultiplyDeBruijnBitPosition
[32] = {
171 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
172 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
176 * We strip off all but the lowermost set bit (v & ~v),
177 * and perform a minimal perfect hash on it to look up the
178 * number of low-order zero bits in a table.
182 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
184 * http://supertech.csail.mit.edu/papers/debruijn.pdf
186 return (MultiplyDeBruijnBitPosition
[((v
& -v
) * 0x077CB531U
) >> 27]);
191 * Represents a deleted instruction.
196 * Register numbers for use-def values.
197 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
198 * location. A_ATOM is the accumulator and X_ATOM is the index
201 #define A_ATOM BPF_MEMWORDS
202 #define X_ATOM (BPF_MEMWORDS+1)
205 * This define is used to represent *both* the accumulator and
206 * x register in use-def computations.
207 * Currently, the use-def code assumes only one definition per instruction.
209 #define AX_ATOM N_ATOMS
212 * These data structures are used in a Cocke and Shwarz style
213 * value numbering scheme. Since the flowgraph is acyclic,
214 * exit values can be propagated from a node's predecessors
215 * provided it is uniquely defined.
220 int val
; /* the value number */
221 struct valnode
*next
;
224 /* Integer constants mapped with the load immediate opcode. */
225 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
229 bpf_u_int32 const_val
;
234 * Place to longjmp to on an error.
239 * The buffer into which to put error message.
244 * A flag to indicate that further optimization is needed.
245 * Iterative passes are continued until a given pass yields no
246 * code simplification or branch movement.
251 * XXX - detect loops that do nothing but repeated AND/OR pullups
253 * If 100 passes in a row do nothing but that, treat that as a
254 * sign that we're in a loop that just shuffles in a cycle in
255 * which each pass just shuffles the code and we eventually
256 * get back to the original configuration.
258 * XXX - we need a non-heuristic way of detecting, or preventing,
261 int non_branch_movement_performed
;
263 u_int n_blocks
; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
264 struct block
**blocks
;
265 u_int n_edges
; /* twice n_blocks, so guaranteed to be > 0 */
269 * A bit vector set representation of the dominators.
270 * We round up the set size to the next power of two.
272 u_int nodewords
; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
273 u_int edgewords
; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
274 struct block
**levels
;
277 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
279 * True if a is in uset {p}
281 #define SET_MEMBER(p, a) \
282 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
287 #define SET_INSERT(p, a) \
288 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
291 * Delete 'a' from uset p.
293 #define SET_DELETE(p, a) \
294 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
298 * n must be guaranteed to be > 0
300 #define SET_INTERSECT(a, b, n)\
302 register bpf_u_int32 *_x = a, *_y = b;\
303 register u_int _n = n;\
304 do *_x++ &= *_y++; while (--_n != 0);\
309 * n must be guaranteed to be > 0
311 #define SET_SUBTRACT(a, b, n)\
313 register bpf_u_int32 *_x = a, *_y = b;\
314 register u_int _n = n;\
315 do *_x++ &=~ *_y++; while (--_n != 0);\
320 * n must be guaranteed to be > 0
322 #define SET_UNION(a, b, n)\
324 register bpf_u_int32 *_x = a, *_y = b;\
325 register u_int _n = n;\
326 do *_x++ |= *_y++; while (--_n != 0);\
330 uset all_closure_sets
;
334 struct valnode
*hashtbl
[MODULUS
];
338 struct vmapinfo
*vmap
;
339 struct valnode
*vnode_base
;
340 struct valnode
*next_vnode
;
345 * Place to longjmp to on an error.
350 * The buffer into which to put error message.
355 * Some pointers used to convert the basic block form of the code,
356 * into the array form that BPF requires. 'fstart' will point to
357 * the malloc'd array while 'ftail' is used during the recursive
360 struct bpf_insn
*fstart
;
361 struct bpf_insn
*ftail
;
364 static void opt_init(opt_state_t
*, struct icode
*);
365 static void opt_cleanup(opt_state_t
*);
366 static void PCAP_NORETURN
opt_error(opt_state_t
*, const char *, ...)
367 PCAP_PRINTFLIKE(2, 3);
369 static void intern_blocks(opt_state_t
*, struct icode
*);
371 static void find_inedges(opt_state_t
*, struct block
*);
373 static void opt_dump(opt_state_t
*, struct icode
*);
377 #define MAX(a,b) ((a)>(b)?(a):(b))
381 find_levels_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*b
)
392 find_levels_r(opt_state
, ic
, JT(b
));
393 find_levels_r(opt_state
, ic
, JF(b
));
394 level
= MAX(JT(b
)->level
, JF(b
)->level
) + 1;
398 b
->link
= opt_state
->levels
[level
];
399 opt_state
->levels
[level
] = b
;
403 * Level graph. The levels go from 0 at the leaves to
404 * N_LEVELS at the root. The opt_state->levels[] array points to the
405 * first node of the level list, whose elements are linked
406 * with the 'link' field of the struct block.
409 find_levels(opt_state_t
*opt_state
, struct icode
*ic
)
411 memset((char *)opt_state
->levels
, 0, opt_state
->n_blocks
* sizeof(*opt_state
->levels
));
413 find_levels_r(opt_state
, ic
, ic
->root
);
417 * Find dominator relationships.
418 * Assumes graph has been leveled.
421 find_dom(opt_state_t
*opt_state
, struct block
*root
)
429 * Initialize sets to contain all nodes.
431 x
= opt_state
->all_dom_sets
;
433 * In opt_init(), we've made sure the product doesn't overflow.
435 i
= opt_state
->n_blocks
* opt_state
->nodewords
;
440 /* Root starts off empty. */
441 for (i
= opt_state
->nodewords
; i
!= 0;) {
446 /* root->level is the highest level no found. */
447 for (level
= root
->level
; level
>= 0; --level
) {
448 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
449 SET_INSERT(b
->dom
, b
->id
);
452 SET_INTERSECT(JT(b
)->dom
, b
->dom
, opt_state
->nodewords
);
453 SET_INTERSECT(JF(b
)->dom
, b
->dom
, opt_state
->nodewords
);
459 propedom(opt_state_t
*opt_state
, struct edge
*ep
)
461 SET_INSERT(ep
->edom
, ep
->id
);
463 SET_INTERSECT(ep
->succ
->et
.edom
, ep
->edom
, opt_state
->edgewords
);
464 SET_INTERSECT(ep
->succ
->ef
.edom
, ep
->edom
, opt_state
->edgewords
);
469 * Compute edge dominators.
470 * Assumes graph has been leveled and predecessors established.
473 find_edom(opt_state_t
*opt_state
, struct block
*root
)
480 x
= opt_state
->all_edge_sets
;
482 * In opt_init(), we've made sure the product doesn't overflow.
484 for (i
= opt_state
->n_edges
* opt_state
->edgewords
; i
!= 0; ) {
489 /* root->level is the highest level no found. */
490 memset(root
->et
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
491 memset(root
->ef
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
492 for (level
= root
->level
; level
>= 0; --level
) {
493 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
494 propedom(opt_state
, &b
->et
);
495 propedom(opt_state
, &b
->ef
);
501 * Find the backwards transitive closure of the flow graph. These sets
502 * are backwards in the sense that we find the set of nodes that reach
503 * a given node, not the set of nodes that can be reached by a node.
505 * Assumes graph has been leveled.
508 find_closure(opt_state_t
*opt_state
, struct block
*root
)
514 * Initialize sets to contain no nodes.
516 memset((char *)opt_state
->all_closure_sets
, 0,
517 opt_state
->n_blocks
* opt_state
->nodewords
* sizeof(*opt_state
->all_closure_sets
));
519 /* root->level is the highest level no found. */
520 for (level
= root
->level
; level
>= 0; --level
) {
521 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
522 SET_INSERT(b
->closure
, b
->id
);
525 SET_UNION(JT(b
)->closure
, b
->closure
, opt_state
->nodewords
);
526 SET_UNION(JF(b
)->closure
, b
->closure
, opt_state
->nodewords
);
532 * Return the register number that is used by s.
534 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
535 * are used, the scratch memory location's number if a scratch memory
536 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
538 * The implementation should probably change to an array access.
541 atomuse(struct stmt
*s
)
543 register int c
= s
->code
;
548 switch (BPF_CLASS(c
)) {
551 return (BPF_RVAL(c
) == BPF_A
) ? A_ATOM
:
552 (BPF_RVAL(c
) == BPF_X
) ? X_ATOM
: -1;
557 * As there are fewer than 2^31 memory locations,
558 * s->k should be convertible to int without problems.
560 return (BPF_MODE(c
) == BPF_IND
) ? X_ATOM
:
561 (BPF_MODE(c
) == BPF_MEM
) ? (int)s
->k
: -1;
571 if (BPF_SRC(c
) == BPF_X
)
576 return BPF_MISCOP(c
) == BPF_TXA
? X_ATOM
: A_ATOM
;
583 * Return the register number that is defined by 's'. We assume that
584 * a single stmt cannot define more than one register. If no register
585 * is defined, return -1.
587 * The implementation should probably change to an array access.
590 atomdef(struct stmt
*s
)
595 switch (BPF_CLASS(s
->code
)) {
609 return BPF_MISCOP(s
->code
) == BPF_TAX
? X_ATOM
: A_ATOM
;
615 * Compute the sets of registers used, defined, and killed by 'b'.
617 * "Used" means that a statement in 'b' uses the register before any
618 * statement in 'b' defines it, i.e. it uses the value left in
619 * that register by a predecessor block of this block.
620 * "Defined" means that a statement in 'b' defines it.
621 * "Killed" means that a statement in 'b' defines it before any
622 * statement in 'b' uses it, i.e. it kills the value left in that
623 * register by a predecessor block of this block.
626 compute_local_ud(struct block
*b
)
629 atomset def
= 0, use
= 0, killed
= 0;
632 for (s
= b
->stmts
; s
; s
= s
->next
) {
633 if (s
->s
.code
== NOP
)
635 atom
= atomuse(&s
->s
);
637 if (atom
== AX_ATOM
) {
638 if (!ATOMELEM(def
, X_ATOM
))
639 use
|= ATOMMASK(X_ATOM
);
640 if (!ATOMELEM(def
, A_ATOM
))
641 use
|= ATOMMASK(A_ATOM
);
643 else if (atom
< N_ATOMS
) {
644 if (!ATOMELEM(def
, atom
))
645 use
|= ATOMMASK(atom
);
650 atom
= atomdef(&s
->s
);
652 if (!ATOMELEM(use
, atom
))
653 killed
|= ATOMMASK(atom
);
654 def
|= ATOMMASK(atom
);
657 if (BPF_CLASS(b
->s
.code
) == BPF_JMP
) {
659 * XXX - what about RET?
661 atom
= atomuse(&b
->s
);
663 if (atom
== AX_ATOM
) {
664 if (!ATOMELEM(def
, X_ATOM
))
665 use
|= ATOMMASK(X_ATOM
);
666 if (!ATOMELEM(def
, A_ATOM
))
667 use
|= ATOMMASK(A_ATOM
);
669 else if (atom
< N_ATOMS
) {
670 if (!ATOMELEM(def
, atom
))
671 use
|= ATOMMASK(atom
);
684 * Assume graph is already leveled.
687 find_ud(opt_state_t
*opt_state
, struct block
*root
)
693 * root->level is the highest level no found;
694 * count down from there.
696 maxlevel
= root
->level
;
697 for (i
= maxlevel
; i
>= 0; --i
)
698 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
703 for (i
= 1; i
<= maxlevel
; ++i
) {
704 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
705 p
->out_use
|= JT(p
)->in_use
| JF(p
)->in_use
;
706 p
->in_use
|= p
->out_use
&~ p
->kill
;
711 init_val(opt_state_t
*opt_state
)
713 opt_state
->curval
= 0;
714 opt_state
->next_vnode
= opt_state
->vnode_base
;
715 memset((char *)opt_state
->vmap
, 0, opt_state
->maxval
* sizeof(*opt_state
->vmap
));
716 memset((char *)opt_state
->hashtbl
, 0, sizeof opt_state
->hashtbl
);
720 * Because we really don't have an IR, this stuff is a little messy.
722 * This routine looks in the table of existing value number for a value
723 * with generated from an operation with the specified opcode and
724 * the specified values. If it finds it, it returns its value number,
725 * otherwise it makes a new entry in the table and returns the
726 * value number of that entry.
729 F(opt_state_t
*opt_state
, int code
, bpf_u_int32 v0
, bpf_u_int32 v1
)
735 hash
= (u_int
)code
^ (v0
<< 4) ^ (v1
<< 8);
738 for (p
= opt_state
->hashtbl
[hash
]; p
; p
= p
->next
)
739 if (p
->code
== code
&& p
->v0
== v0
&& p
->v1
== v1
)
743 * Not found. Allocate a new value, and assign it a new
746 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
747 * increment it before using it as the new value number, which
748 * means we never assign VAL_UNKNOWN.
750 * XXX - unless we overflow, but we probably won't have 2^32-1
751 * values; we treat 32 bits as effectively infinite.
753 val
= ++opt_state
->curval
;
754 if (BPF_MODE(code
) == BPF_IMM
&&
755 (BPF_CLASS(code
) == BPF_LD
|| BPF_CLASS(code
) == BPF_LDX
)) {
756 opt_state
->vmap
[val
].const_val
= v0
;
757 opt_state
->vmap
[val
].is_const
= 1;
759 p
= opt_state
->next_vnode
++;
764 p
->next
= opt_state
->hashtbl
[hash
];
765 opt_state
->hashtbl
[hash
] = p
;
771 vstore(struct stmt
*s
, bpf_u_int32
*valp
, bpf_u_int32 newval
, int alter
)
773 if (alter
&& newval
!= VAL_UNKNOWN
&& *valp
== newval
)
780 * Do constant-folding on binary operators.
781 * (Unary operators are handled elsewhere.)
784 fold_op(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 v0
, bpf_u_int32 v1
)
788 a
= opt_state
->vmap
[v0
].const_val
;
789 b
= opt_state
->vmap
[v1
].const_val
;
791 switch (BPF_OP(s
->code
)) {
806 opt_error(opt_state
, "division by zero");
812 opt_error(opt_state
, "modulus by zero");
830 * A left shift of more than the width of the type
831 * is undefined in C; we'll just treat it as shifting
834 * XXX - the BPF interpreter doesn't check for this,
835 * so its behavior is dependent on the behavior of
836 * the processor on which it's running. There are
837 * processors on which it shifts all the bits out
838 * and processors on which it does no shift.
848 * A right shift of more than the width of the type
849 * is undefined in C; we'll just treat it as shifting
852 * XXX - the BPF interpreter doesn't check for this,
853 * so its behavior is dependent on the behavior of
854 * the processor on which it's running. There are
855 * processors on which it shifts all the bits out
856 * and processors on which it does no shift.
868 s
->code
= BPF_LD
|BPF_IMM
;
870 * XXX - optimizer loop detection.
872 opt_state
->non_branch_movement_performed
= 1;
876 static inline struct slist
*
877 this_op(struct slist
*s
)
879 while (s
!= 0 && s
->s
.code
== NOP
)
885 opt_not(struct block
*b
)
887 struct block
*tmp
= JT(b
);
894 opt_peep(opt_state_t
*opt_state
, struct block
*b
)
897 struct slist
*next
, *last
;
905 for (/*empty*/; /*empty*/; s
= next
) {
911 break; /* nothing left in the block */
914 * Find the next real instruction after that one
917 next
= this_op(s
->next
);
919 break; /* no next instruction */
923 * st M[k] --> st M[k]
926 if (s
->s
.code
== BPF_ST
&&
927 next
->s
.code
== (BPF_LDX
|BPF_MEM
) &&
928 s
->s
.k
== next
->s
.k
) {
930 * XXX - optimizer loop detection.
932 opt_state
->non_branch_movement_performed
= 1;
934 next
->s
.code
= BPF_MISC
|BPF_TAX
;
940 if (s
->s
.code
== (BPF_LD
|BPF_IMM
) &&
941 next
->s
.code
== (BPF_MISC
|BPF_TAX
)) {
942 s
->s
.code
= BPF_LDX
|BPF_IMM
;
943 next
->s
.code
= BPF_MISC
|BPF_TXA
;
945 * XXX - optimizer loop detection.
947 opt_state
->non_branch_movement_performed
= 1;
951 * This is an ugly special case, but it happens
952 * when you say tcp[k] or udp[k] where k is a constant.
954 if (s
->s
.code
== (BPF_LD
|BPF_IMM
)) {
955 struct slist
*add
, *tax
, *ild
;
958 * Check that X isn't used on exit from this
959 * block (which the optimizer might cause).
960 * We know the code generator won't generate
961 * any local dependencies.
963 if (ATOMELEM(b
->out_use
, X_ATOM
))
967 * Check that the instruction following the ldi
968 * is an addx, or it's an ldxms with an addx
969 * following it (with 0 or more nops between the
972 if (next
->s
.code
!= (BPF_LDX
|BPF_MSH
|BPF_B
))
975 add
= this_op(next
->next
);
976 if (add
== 0 || add
->s
.code
!= (BPF_ALU
|BPF_ADD
|BPF_X
))
980 * Check that a tax follows that (with 0 or more
981 * nops between them).
983 tax
= this_op(add
->next
);
984 if (tax
== 0 || tax
->s
.code
!= (BPF_MISC
|BPF_TAX
))
988 * Check that an ild follows that (with 0 or more
989 * nops between them).
991 ild
= this_op(tax
->next
);
992 if (ild
== 0 || BPF_CLASS(ild
->s
.code
) != BPF_LD
||
993 BPF_MODE(ild
->s
.code
) != BPF_IND
)
996 * We want to turn this sequence:
999 * (005) ldxms [14] {next} -- optional
1002 * (008) ild [x+0] {ild}
1004 * into this sequence:
1012 * XXX We need to check that X is not
1013 * subsequently used, because we want to change
1014 * what'll be in it after this sequence.
1016 * We know we can eliminate the accumulator
1017 * modifications earlier in the sequence since
1018 * it is defined by the last stmt of this sequence
1019 * (i.e., the last statement of the sequence loads
1020 * a value into the accumulator, so we can eliminate
1021 * earlier operations on the accumulator).
1028 * XXX - optimizer loop detection.
1030 opt_state
->non_branch_movement_performed
= 1;
1031 opt_state
->done
= 0;
1035 * If the comparison at the end of a block is an equality
1036 * comparison against a constant, and nobody uses the value
1037 * we leave in the A register at the end of a block, and
1038 * the operation preceding the comparison is an arithmetic
1039 * operation, we can sometime optimize it away.
1041 if (b
->s
.code
== (BPF_JMP
|BPF_JEQ
|BPF_K
) &&
1042 !ATOMELEM(b
->out_use
, A_ATOM
)) {
1044 * We can optimize away certain subtractions of the
1047 if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_X
)) {
1048 val
= b
->val
[X_ATOM
];
1049 if (opt_state
->vmap
[val
].is_const
) {
1051 * If we have a subtract to do a comparison,
1052 * and the X register is a known constant,
1053 * we can merge this value into the
1059 b
->s
.k
+= opt_state
->vmap
[val
].const_val
;
1062 * XXX - optimizer loop detection.
1064 opt_state
->non_branch_movement_performed
= 1;
1065 opt_state
->done
= 0;
1066 } else if (b
->s
.k
== 0) {
1068 * If the X register isn't a constant,
1069 * and the comparison in the test is
1070 * against 0, we can compare with the
1071 * X register, instead:
1077 b
->s
.code
= BPF_JMP
|BPF_JEQ
|BPF_X
;
1079 * XXX - optimizer loop detection.
1081 opt_state
->non_branch_movement_performed
= 1;
1082 opt_state
->done
= 0;
1086 * Likewise, a constant subtract can be simplified:
1089 * jeq #y -> jeq #(x+y)
1091 else if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_K
)) {
1093 b
->s
.k
+= last
->s
.k
;
1095 * XXX - optimizer loop detection.
1097 opt_state
->non_branch_movement_performed
= 1;
1098 opt_state
->done
= 0;
1101 * And, similarly, a constant AND can be simplified
1102 * if we're testing against 0, i.e.:
1107 else if (last
->s
.code
== (BPF_ALU
|BPF_AND
|BPF_K
) &&
1110 b
->s
.code
= BPF_JMP
|BPF_K
|BPF_JSET
;
1113 * XXX - optimizer loop detection.
1115 opt_state
->non_branch_movement_performed
= 1;
1116 opt_state
->done
= 0;
1122 * jset #ffffffff -> always
1124 if (b
->s
.code
== (BPF_JMP
|BPF_K
|BPF_JSET
)) {
1127 if (b
->s
.k
== 0xffffffffU
)
1131 * If we're comparing against the index register, and the index
1132 * register is a known constant, we can just compare against that
1135 val
= b
->val
[X_ATOM
];
1136 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_X
) {
1137 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1138 b
->s
.code
&= ~BPF_X
;
1142 * If the accumulator is a known constant, we can compute the
1143 * comparison result.
1145 val
= b
->val
[A_ATOM
];
1146 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_K
) {
1147 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1148 switch (BPF_OP(b
->s
.code
)) {
1169 if (JF(b
) != JT(b
)) {
1171 * XXX - optimizer loop detection.
1173 opt_state
->non_branch_movement_performed
= 1;
1174 opt_state
->done
= 0;
1184 * Compute the symbolic value of expression of 's', and update
1185 * anything it defines in the value table 'val'. If 'alter' is true,
1186 * do various optimizations. This code would be cleaner if symbolic
1187 * evaluation and code transformations weren't folded together.
1190 opt_stmt(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 val
[], int alter
)
1197 case BPF_LD
|BPF_ABS
|BPF_W
:
1198 case BPF_LD
|BPF_ABS
|BPF_H
:
1199 case BPF_LD
|BPF_ABS
|BPF_B
:
1200 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1201 vstore(s
, &val
[A_ATOM
], v
, alter
);
1204 case BPF_LD
|BPF_IND
|BPF_W
:
1205 case BPF_LD
|BPF_IND
|BPF_H
:
1206 case BPF_LD
|BPF_IND
|BPF_B
:
1208 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1209 s
->code
= BPF_LD
|BPF_ABS
|BPF_SIZE(s
->code
);
1210 s
->k
+= opt_state
->vmap
[v
].const_val
;
1211 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1213 * XXX - optimizer loop detection.
1215 opt_state
->non_branch_movement_performed
= 1;
1216 opt_state
->done
= 0;
1219 v
= F(opt_state
, s
->code
, s
->k
, v
);
1220 vstore(s
, &val
[A_ATOM
], v
, alter
);
1223 case BPF_LD
|BPF_LEN
:
1224 v
= F(opt_state
, s
->code
, 0L, 0L);
1225 vstore(s
, &val
[A_ATOM
], v
, alter
);
1228 case BPF_LD
|BPF_IMM
:
1230 vstore(s
, &val
[A_ATOM
], v
, alter
);
1233 case BPF_LDX
|BPF_IMM
:
1235 vstore(s
, &val
[X_ATOM
], v
, alter
);
1238 case BPF_LDX
|BPF_MSH
|BPF_B
:
1239 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1240 vstore(s
, &val
[X_ATOM
], v
, alter
);
1243 case BPF_ALU
|BPF_NEG
:
1244 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1245 s
->code
= BPF_LD
|BPF_IMM
;
1247 * Do this negation as unsigned arithmetic; that's
1248 * what modern BPF engines do, and it guarantees
1249 * that all possible values can be negated. (Yeah,
1250 * negating 0x80000000, the minimum signed 32-bit
1251 * two's-complement value, results in 0x80000000,
1252 * so it's still negative, but we *should* be doing
1253 * all unsigned arithmetic here, to match what
1254 * modern BPF engines do.)
1256 * Express it as 0U - (unsigned value) so that we
1257 * don't get compiler warnings about negating an
1258 * unsigned value and don't get UBSan warnings
1259 * about the result of negating 0x80000000 being
1262 s
->k
= 0U - opt_state
->vmap
[val
[A_ATOM
]].const_val
;
1263 val
[A_ATOM
] = K(s
->k
);
1266 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], 0L);
1269 case BPF_ALU
|BPF_ADD
|BPF_K
:
1270 case BPF_ALU
|BPF_SUB
|BPF_K
:
1271 case BPF_ALU
|BPF_MUL
|BPF_K
:
1272 case BPF_ALU
|BPF_DIV
|BPF_K
:
1273 case BPF_ALU
|BPF_MOD
|BPF_K
:
1274 case BPF_ALU
|BPF_AND
|BPF_K
:
1275 case BPF_ALU
|BPF_OR
|BPF_K
:
1276 case BPF_ALU
|BPF_XOR
|BPF_K
:
1277 case BPF_ALU
|BPF_LSH
|BPF_K
:
1278 case BPF_ALU
|BPF_RSH
|BPF_K
:
1279 op
= BPF_OP(s
->code
);
1283 * Optimize operations where the constant
1286 * Don't optimize away "sub #0"
1287 * as it may be needed later to
1288 * fixup the generated math code.
1290 * Fail if we're dividing by zero or taking
1291 * a modulus by zero.
1293 if (op
== BPF_ADD
||
1294 op
== BPF_LSH
|| op
== BPF_RSH
||
1295 op
== BPF_OR
|| op
== BPF_XOR
) {
1299 if (op
== BPF_MUL
|| op
== BPF_AND
) {
1300 s
->code
= BPF_LD
|BPF_IMM
;
1301 val
[A_ATOM
] = K(s
->k
);
1305 opt_error(opt_state
,
1306 "division by zero");
1308 opt_error(opt_state
,
1311 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1312 fold_op(opt_state
, s
, val
[A_ATOM
], K(s
->k
));
1313 val
[A_ATOM
] = K(s
->k
);
1317 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1320 case BPF_ALU
|BPF_ADD
|BPF_X
:
1321 case BPF_ALU
|BPF_SUB
|BPF_X
:
1322 case BPF_ALU
|BPF_MUL
|BPF_X
:
1323 case BPF_ALU
|BPF_DIV
|BPF_X
:
1324 case BPF_ALU
|BPF_MOD
|BPF_X
:
1325 case BPF_ALU
|BPF_AND
|BPF_X
:
1326 case BPF_ALU
|BPF_OR
|BPF_X
:
1327 case BPF_ALU
|BPF_XOR
|BPF_X
:
1328 case BPF_ALU
|BPF_LSH
|BPF_X
:
1329 case BPF_ALU
|BPF_RSH
|BPF_X
:
1330 op
= BPF_OP(s
->code
);
1331 if (alter
&& opt_state
->vmap
[val
[X_ATOM
]].is_const
) {
1332 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1333 fold_op(opt_state
, s
, val
[A_ATOM
], val
[X_ATOM
]);
1334 val
[A_ATOM
] = K(s
->k
);
1337 s
->code
= BPF_ALU
|BPF_K
|op
;
1338 s
->k
= opt_state
->vmap
[val
[X_ATOM
]].const_val
;
1339 if ((op
== BPF_LSH
|| op
== BPF_RSH
) &&
1341 opt_error(opt_state
,
1342 "shift by more than 31 bits");
1344 * XXX - optimizer loop detection.
1346 opt_state
->non_branch_movement_performed
= 1;
1347 opt_state
->done
= 0;
1349 F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1354 * Check if we're doing something to an accumulator
1355 * that is 0, and simplify. This may not seem like
1356 * much of a simplification but it could open up further
1358 * XXX We could also check for mul by 1, etc.
1360 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
1361 && opt_state
->vmap
[val
[A_ATOM
]].const_val
== 0) {
1362 if (op
== BPF_ADD
|| op
== BPF_OR
|| op
== BPF_XOR
) {
1363 s
->code
= BPF_MISC
|BPF_TXA
;
1364 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1367 else if (op
== BPF_MUL
|| op
== BPF_DIV
|| op
== BPF_MOD
||
1368 op
== BPF_AND
|| op
== BPF_LSH
|| op
== BPF_RSH
) {
1369 s
->code
= BPF_LD
|BPF_IMM
;
1371 vstore(s
, &val
[A_ATOM
], K(s
->k
), alter
);
1374 else if (op
== BPF_NEG
) {
1379 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], val
[X_ATOM
]);
1382 case BPF_MISC
|BPF_TXA
:
1383 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1386 case BPF_LD
|BPF_MEM
:
1388 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1389 s
->code
= BPF_LD
|BPF_IMM
;
1390 s
->k
= opt_state
->vmap
[v
].const_val
;
1392 * XXX - optimizer loop detection.
1394 opt_state
->non_branch_movement_performed
= 1;
1395 opt_state
->done
= 0;
1397 vstore(s
, &val
[A_ATOM
], v
, alter
);
1400 case BPF_MISC
|BPF_TAX
:
1401 vstore(s
, &val
[X_ATOM
], val
[A_ATOM
], alter
);
1404 case BPF_LDX
|BPF_MEM
:
1406 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1407 s
->code
= BPF_LDX
|BPF_IMM
;
1408 s
->k
= opt_state
->vmap
[v
].const_val
;
1410 * XXX - optimizer loop detection.
1412 opt_state
->non_branch_movement_performed
= 1;
1413 opt_state
->done
= 0;
1415 vstore(s
, &val
[X_ATOM
], v
, alter
);
1419 vstore(s
, &val
[s
->k
], val
[A_ATOM
], alter
);
1423 vstore(s
, &val
[s
->k
], val
[X_ATOM
], alter
);
1429 deadstmt(opt_state_t
*opt_state
, register struct stmt
*s
, register struct stmt
*last
[])
1435 if (atom
== AX_ATOM
) {
1446 * XXX - optimizer loop detection.
1448 opt_state
->non_branch_movement_performed
= 1;
1449 opt_state
->done
= 0;
1450 last
[atom
]->code
= NOP
;
1457 opt_deadstores(opt_state_t
*opt_state
, register struct block
*b
)
1459 register struct slist
*s
;
1461 struct stmt
*last
[N_ATOMS
];
1463 memset((char *)last
, 0, sizeof last
);
1465 for (s
= b
->stmts
; s
!= 0; s
= s
->next
)
1466 deadstmt(opt_state
, &s
->s
, last
);
1467 deadstmt(opt_state
, &b
->s
, last
);
1469 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1470 if (last
[atom
] && !ATOMELEM(b
->out_use
, atom
)) {
1471 last
[atom
]->code
= NOP
;
1473 * XXX - optimizer loop detection.
1475 opt_state
->non_branch_movement_performed
= 1;
1476 opt_state
->done
= 0;
1481 opt_blk(opt_state_t
*opt_state
, struct block
*b
, int do_stmts
)
1486 bpf_u_int32 aval
, xval
;
1489 for (s
= b
->stmts
; s
&& s
->next
; s
= s
->next
)
1490 if (BPF_CLASS(s
->s
.code
) == BPF_JMP
) {
1497 * Initialize the atom values.
1502 * We have no predecessors, so everything is undefined
1503 * upon entry to this block.
1505 memset((char *)b
->val
, 0, sizeof(b
->val
));
1508 * Inherit values from our predecessors.
1510 * First, get the values from the predecessor along the
1511 * first edge leading to this node.
1513 memcpy((char *)b
->val
, (char *)p
->pred
->val
, sizeof(b
->val
));
1515 * Now look at all the other nodes leading to this node.
1516 * If, for the predecessor along that edge, a register
1517 * has a different value from the one we have (i.e.,
1518 * control paths are merging, and the merging paths
1519 * assign different values to that register), give the
1520 * register the undefined value of 0.
1522 while ((p
= p
->next
) != NULL
) {
1523 for (i
= 0; i
< N_ATOMS
; ++i
)
1524 if (b
->val
[i
] != p
->pred
->val
[i
])
1528 aval
= b
->val
[A_ATOM
];
1529 xval
= b
->val
[X_ATOM
];
1530 for (s
= b
->stmts
; s
; s
= s
->next
)
1531 opt_stmt(opt_state
, &s
->s
, b
->val
, do_stmts
);
1534 * This is a special case: if we don't use anything from this
1535 * block, and we load the accumulator or index register with a
1536 * value that is already there, or if this block is a return,
1537 * eliminate all the statements.
1539 * XXX - what if it does a store? Presumably that falls under
1540 * the heading of "if we don't use anything from this block",
1541 * i.e., if we use any memory location set to a different
1542 * value by this block, then we use something from this block.
1544 * XXX - why does it matter whether we use anything from this
1545 * block? If the accumulator or index register doesn't change
1546 * its value, isn't that OK even if we use that value?
1548 * XXX - if we load the accumulator with a different value,
1549 * and the block ends with a conditional branch, we obviously
1550 * can't eliminate it, as the branch depends on that value.
1551 * For the index register, the conditional branch only depends
1552 * on the index register value if the test is against the index
1553 * register value rather than a constant; if nothing uses the
1554 * value we put into the index register, and we're not testing
1555 * against the index register's value, and there aren't any
1556 * other problems that would keep us from eliminating this
1557 * block, can we eliminate it?
1560 ((b
->out_use
== 0 &&
1561 aval
!= VAL_UNKNOWN
&& b
->val
[A_ATOM
] == aval
&&
1562 xval
!= VAL_UNKNOWN
&& b
->val
[X_ATOM
] == xval
) ||
1563 BPF_CLASS(b
->s
.code
) == BPF_RET
)) {
1564 if (b
->stmts
!= 0) {
1567 * XXX - optimizer loop detection.
1569 opt_state
->non_branch_movement_performed
= 1;
1570 opt_state
->done
= 0;
1573 opt_peep(opt_state
, b
);
1574 opt_deadstores(opt_state
, b
);
1577 * Set up values for branch optimizer.
1579 if (BPF_SRC(b
->s
.code
) == BPF_K
)
1580 b
->oval
= K(b
->s
.k
);
1582 b
->oval
= b
->val
[X_ATOM
];
1583 b
->et
.code
= b
->s
.code
;
1584 b
->ef
.code
= -b
->s
.code
;
1588 * Return true if any register that is used on exit from 'succ', has
1589 * an exit value that is different from the corresponding exit value
1593 use_conflict(struct block
*b
, struct block
*succ
)
1596 atomset use
= succ
->out_use
;
1601 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1602 if (ATOMELEM(use
, atom
))
1603 if (b
->val
[atom
] != succ
->val
[atom
])
1609 * Given a block that is the successor of an edge, and an edge that
1610 * dominates that edge, return either a pointer to a child of that
1611 * block (a block to which that block jumps) if that block is a
1612 * candidate to replace the successor of the latter edge or NULL
1613 * if neither of the children of the first block are candidates.
1615 static struct block
*
1616 fold_edge(struct block
*child
, struct edge
*ep
)
1619 bpf_u_int32 aval0
, aval1
, oval0
, oval1
;
1620 int code
= ep
->code
;
1624 * This edge is a "branch if false" edge.
1630 * This edge is a "branch if true" edge.
1636 * If the opcode for the branch at the end of the block we
1637 * were handed isn't the same as the opcode for the branch
1638 * to which the edge we were handed corresponds, the tests
1639 * for those branches aren't testing the same conditions,
1640 * so the blocks to which the first block branches aren't
1641 * candidates to replace the successor of the edge.
1643 if (child
->s
.code
!= code
)
1646 aval0
= child
->val
[A_ATOM
];
1647 oval0
= child
->oval
;
1648 aval1
= ep
->pred
->val
[A_ATOM
];
1649 oval1
= ep
->pred
->oval
;
1652 * If the A register value on exit from the successor block
1653 * isn't the same as the A register value on exit from the
1654 * predecessor of the edge, the blocks to which the first
1655 * block branches aren't candidates to replace the successor
1663 * The operands of the branch instructions are
1664 * identical, so the branches are testing the
1665 * same condition, and the result is true if a true
1666 * branch was taken to get here, otherwise false.
1668 return sense
? JT(child
) : JF(child
);
1670 if (sense
&& code
== (BPF_JMP
|BPF_JEQ
|BPF_K
))
1672 * At this point, we only know the comparison if we
1673 * came down the true branch, and it was an equality
1674 * comparison with a constant.
1676 * I.e., if we came down the true branch, and the branch
1677 * was an equality comparison with a constant, we know the
1678 * accumulator contains that constant. If we came down
1679 * the false branch, or the comparison wasn't with a
1680 * constant, we don't know what was in the accumulator.
1682 * We rely on the fact that distinct constants have distinct
1691 * If we can make this edge go directly to a child of the edge's current
1695 opt_j(opt_state_t
*opt_state
, struct edge
*ep
)
1697 register u_int i
, k
;
1698 register struct block
*target
;
1701 * Does this edge go to a block where, if the test
1702 * at the end of it succeeds, it goes to a block
1703 * that's a leaf node of the DAG, i.e. a return
1705 * If so, there's nothing to optimize.
1707 if (JT(ep
->succ
) == 0)
1711 * Does this edge go to a block that goes, in turn, to
1712 * the same block regardless of whether the test at the
1713 * end succeeds or fails?
1715 if (JT(ep
->succ
) == JF(ep
->succ
)) {
1717 * Common branch targets can be eliminated, provided
1718 * there is no data dependency.
1720 * Check whether any register used on exit from the
1721 * block to which the successor of this edge goes
1722 * has a value at that point that's different from
1723 * the value it has on exit from the predecessor of
1724 * this edge. If not, the predecessor of this edge
1725 * can just go to the block to which the successor
1726 * of this edge goes, bypassing the successor of this
1727 * edge, as the successor of this edge isn't doing
1728 * any calculations whose results are different
1729 * from what the blocks before it did and isn't
1730 * doing any tests the results of which matter.
1732 if (!use_conflict(ep
->pred
, JT(ep
->succ
))) {
1735 * Make this edge go to the block to
1736 * which the successor of that edge
1739 * XXX - optimizer loop detection.
1741 opt_state
->non_branch_movement_performed
= 1;
1742 opt_state
->done
= 0;
1743 ep
->succ
= JT(ep
->succ
);
1747 * For each edge dominator that matches the successor of this
1748 * edge, promote the edge successor to the its grandchild.
1750 * XXX We violate the set abstraction here in favor a reasonably
1754 for (i
= 0; i
< opt_state
->edgewords
; ++i
) {
1755 /* i'th word in the bitset of dominators */
1756 register bpf_u_int32 x
= ep
->edom
[i
];
1759 /* Find the next dominator in that word and mark it as found */
1760 k
= lowest_set_bit(x
);
1761 x
&=~ ((bpf_u_int32
)1 << k
);
1762 k
+= i
* BITS_PER_WORD
;
1764 target
= fold_edge(ep
->succ
, opt_state
->edges
[k
]);
1766 * We have a candidate to replace the successor
1769 * Check that there is no data dependency between
1770 * nodes that will be violated if we move the edge;
1771 * i.e., if any register used on exit from the
1772 * candidate has a value at that point different
1773 * from the value it has when we exit the
1774 * predecessor of that edge, there's a data
1775 * dependency that will be violated.
1777 if (target
!= 0 && !use_conflict(ep
->pred
, target
)) {
1779 * It's safe to replace the successor of
1780 * ep; do so, and note that we've made
1781 * at least one change.
1783 * XXX - this is one of the operations that
1784 * happens when the optimizer gets into
1785 * one of those infinite loops.
1787 opt_state
->done
= 0;
1789 if (JT(target
) != 0)
1791 * Start over unless we hit a leaf.
1801 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1802 * "Predicate Assertion Propagation" in the BPF+ paper?
1804 * Note that this looks at block dominators, not edge dominators.
1807 * "A or B" compiles into
1820 or_pullup(opt_state_t
*opt_state
, struct block
*b
)
1825 struct block
**diffp
, **samep
;
1833 * Make sure each predecessor loads the same value.
1836 val
= ep
->pred
->val
[A_ATOM
];
1837 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1838 if (val
!= ep
->pred
->val
[A_ATOM
])
1842 * For the first edge in the list of edges coming into this block,
1843 * see whether the predecessor of that edge comes here via a true
1844 * branch or a false branch.
1846 if (JT(b
->in_edges
->pred
) == b
)
1847 diffp
= &JT(b
->in_edges
->pred
); /* jt */
1849 diffp
= &JF(b
->in_edges
->pred
); /* jf */
1852 * diffp is a pointer to a pointer to the block.
1854 * Go down the false chain looking as far as you can,
1855 * making sure that each jump-compare is doing the
1856 * same as the original block.
1858 * If you reach the bottom before you reach a
1859 * different jump-compare, just exit. There's nothing
1860 * to do here. XXX - no, this version is checking for
1861 * the value leaving the block; that's from the BPF+
1867 * Done if that's not going anywhere XXX
1873 * Done if that predecessor blah blah blah isn't
1874 * going the same place we're going XXX
1876 * Does the true edge of this block point to the same
1877 * location as the true edge of b?
1879 if (JT(*diffp
) != JT(b
))
1883 * Done if this node isn't a dominator of that
1884 * node blah blah blah XXX
1886 * Does b dominate diffp?
1888 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1892 * Break out of the loop if that node's value of A
1893 * isn't the value of A above XXX
1895 if ((*diffp
)->val
[A_ATOM
] != val
)
1899 * Get the JF for that node XXX
1900 * Go down the false path.
1902 diffp
= &JF(*diffp
);
1907 * Now that we've found a different jump-compare in a chain
1908 * below b, search further down until we find another
1909 * jump-compare that looks at the original value. This
1910 * jump-compare should get pulled up. XXX again we're
1911 * comparing values not jump-compares.
1913 samep
= &JF(*diffp
);
1916 * Done if that's not going anywhere XXX
1922 * Done if that predecessor blah blah blah isn't
1923 * going the same place we're going XXX
1925 if (JT(*samep
) != JT(b
))
1929 * Done if this node isn't a dominator of that
1930 * node blah blah blah XXX
1932 * Does b dominate samep?
1934 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1938 * Break out of the loop if that node's value of A
1939 * is the value of A above XXX
1941 if ((*samep
)->val
[A_ATOM
] == val
)
1944 /* XXX Need to check that there are no data dependencies
1945 between dp0 and dp1. Currently, the code generator
1946 will not produce such dependencies. */
1947 samep
= &JF(*samep
);
1950 /* XXX This doesn't cover everything. */
1951 for (i
= 0; i
< N_ATOMS
; ++i
)
1952 if ((*samep
)->val
[i
] != pred
->val
[i
])
1955 /* Pull up the node. */
1961 * At the top of the chain, each predecessor needs to point at the
1962 * pulled up node. Inside the chain, there is only one predecessor
1966 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1967 if (JT(ep
->pred
) == b
)
1968 JT(ep
->pred
) = pull
;
1970 JF(ep
->pred
) = pull
;
1977 * XXX - this is one of the operations that happens when the
1978 * optimizer gets into one of those infinite loops.
1980 opt_state
->done
= 0;
1984 and_pullup(opt_state_t
*opt_state
, struct block
*b
)
1989 struct block
**diffp
, **samep
;
1997 * Make sure each predecessor loads the same value.
1999 val
= ep
->pred
->val
[A_ATOM
];
2000 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
2001 if (val
!= ep
->pred
->val
[A_ATOM
])
2004 if (JT(b
->in_edges
->pred
) == b
)
2005 diffp
= &JT(b
->in_edges
->pred
);
2007 diffp
= &JF(b
->in_edges
->pred
);
2014 if (JF(*diffp
) != JF(b
))
2017 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
2020 if ((*diffp
)->val
[A_ATOM
] != val
)
2023 diffp
= &JT(*diffp
);
2026 samep
= &JT(*diffp
);
2031 if (JF(*samep
) != JF(b
))
2034 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
2037 if ((*samep
)->val
[A_ATOM
] == val
)
2040 /* XXX Need to check that there are no data dependencies
2041 between diffp and samep. Currently, the code generator
2042 will not produce such dependencies. */
2043 samep
= &JT(*samep
);
2046 /* XXX This doesn't cover everything. */
2047 for (i
= 0; i
< N_ATOMS
; ++i
)
2048 if ((*samep
)->val
[i
] != pred
->val
[i
])
2051 /* Pull up the node. */
2057 * At the top of the chain, each predecessor needs to point at the
2058 * pulled up node. Inside the chain, there is only one predecessor
2062 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
2063 if (JT(ep
->pred
) == b
)
2064 JT(ep
->pred
) = pull
;
2066 JF(ep
->pred
) = pull
;
2073 * XXX - this is one of the operations that happens when the
2074 * optimizer gets into one of those infinite loops.
2076 opt_state
->done
= 0;
2080 opt_blks(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2085 init_val(opt_state
);
2086 maxlevel
= ic
->root
->level
;
2088 find_inedges(opt_state
, ic
->root
);
2089 for (i
= maxlevel
; i
>= 0; --i
)
2090 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
)
2091 opt_blk(opt_state
, p
, do_stmts
);
2095 * No point trying to move branches; it can't possibly
2096 * make a difference at this point.
2098 * XXX - this might be after we detect a loop where
2099 * we were just looping infinitely moving branches
2100 * in such a fashion that we went through two or more
2101 * versions of the machine code, eventually returning
2102 * to the first version. (We're really not doing a
2103 * full loop detection, we're just testing for two
2104 * passes in a row where we do nothing but
2110 * Is this what the BPF+ paper describes in sections 6.1.1,
2113 for (i
= 1; i
<= maxlevel
; ++i
) {
2114 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2115 opt_j(opt_state
, &p
->et
);
2116 opt_j(opt_state
, &p
->ef
);
2120 find_inedges(opt_state
, ic
->root
);
2121 for (i
= 1; i
<= maxlevel
; ++i
) {
2122 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2123 or_pullup(opt_state
, p
);
2124 and_pullup(opt_state
, p
);
2130 link_inedge(struct edge
*parent
, struct block
*child
)
2132 parent
->next
= child
->in_edges
;
2133 child
->in_edges
= parent
;
2137 find_inedges(opt_state_t
*opt_state
, struct block
*root
)
2143 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2144 opt_state
->blocks
[i
]->in_edges
= 0;
2147 * Traverse the graph, adding each edge to the predecessor
2148 * list of its successors. Skip the leaves (i.e. level 0).
2150 for (level
= root
->level
; level
> 0; --level
) {
2151 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
2152 link_inedge(&b
->et
, JT(b
));
2153 link_inedge(&b
->ef
, JF(b
));
2159 opt_root(struct block
**b
)
2161 struct slist
*tmp
, *s
;
2165 while (BPF_CLASS((*b
)->s
.code
) == BPF_JMP
&& JT(*b
) == JF(*b
))
2174 * If the root node is a return, then there is no
2175 * point executing any statements (since the bpf machine
2176 * has no side effects).
2178 if (BPF_CLASS((*b
)->s
.code
) == BPF_RET
)
2183 opt_loop(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2187 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2188 printf("opt_loop(root, %d) begin\n", do_stmts
);
2189 opt_dump(opt_state
, ic
);
2194 * XXX - optimizer loop detection.
2198 opt_state
->done
= 1;
2200 * XXX - optimizer loop detection.
2202 opt_state
->non_branch_movement_performed
= 0;
2203 find_levels(opt_state
, ic
);
2204 find_dom(opt_state
, ic
->root
);
2205 find_closure(opt_state
, ic
->root
);
2206 find_ud(opt_state
, ic
->root
);
2207 find_edom(opt_state
, ic
->root
);
2208 opt_blks(opt_state
, ic
, do_stmts
);
2210 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2211 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts
, opt_state
->done
);
2212 opt_dump(opt_state
, ic
);
2217 * Was anything done in this optimizer pass?
2219 if (opt_state
->done
) {
2221 * No, so we've reached a fixed point.
2228 * XXX - was anything done other than branch movement
2231 if (opt_state
->non_branch_movement_performed
) {
2233 * Yes. Clear any loop-detection counter;
2234 * we're making some form of progress (assuming
2235 * we can't get into a cycle doing *other*
2236 * optimizations...).
2241 * No - increment the counter, and quit if
2245 if (loop_count
>= 100) {
2247 * We've done nothing but branch movement
2248 * for 100 passes; we're probably
2249 * in a cycle and will never reach a
2252 * XXX - yes, we really need a non-
2253 * heuristic way of detecting a cycle.
2255 opt_state
->done
= 1;
2263 * Optimize the filter code in its dag representation.
2264 * Return 0 on success, -1 on error.
2267 bpf_optimize(struct icode
*ic
, char *errbuf
)
2269 opt_state_t opt_state
;
2271 memset(&opt_state
, 0, sizeof(opt_state
));
2272 opt_state
.errbuf
= errbuf
;
2273 opt_state
.non_branch_movement_performed
= 0;
2274 if (setjmp(opt_state
.top_ctx
)) {
2275 opt_cleanup(&opt_state
);
2278 opt_init(&opt_state
, ic
);
2279 opt_loop(&opt_state
, ic
, 0);
2280 opt_loop(&opt_state
, ic
, 1);
2281 intern_blocks(&opt_state
, ic
);
2283 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2284 printf("after intern_blocks()\n");
2285 opt_dump(&opt_state
, ic
);
2288 opt_root(&ic
->root
);
2290 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2291 printf("after opt_root()\n");
2292 opt_dump(&opt_state
, ic
);
2295 opt_cleanup(&opt_state
);
2300 make_marks(struct icode
*ic
, struct block
*p
)
2302 if (!isMarked(ic
, p
)) {
2304 if (BPF_CLASS(p
->s
.code
) != BPF_RET
) {
2305 make_marks(ic
, JT(p
));
2306 make_marks(ic
, JF(p
));
2312 * Mark code array such that isMarked(ic->cur_mark, i) is true
2313 * only for nodes that are alive.
2316 mark_code(struct icode
*ic
)
2319 make_marks(ic
, ic
->root
);
2323 * True iff the two stmt lists load the same value from the packet into
2327 eq_slist(struct slist
*x
, struct slist
*y
)
2330 while (x
&& x
->s
.code
== NOP
)
2332 while (y
&& y
->s
.code
== NOP
)
2338 if (x
->s
.code
!= y
->s
.code
|| x
->s
.k
!= y
->s
.k
)
2346 eq_blk(struct block
*b0
, struct block
*b1
)
2348 if (b0
->s
.code
== b1
->s
.code
&&
2349 b0
->s
.k
== b1
->s
.k
&&
2350 b0
->et
.succ
== b1
->et
.succ
&&
2351 b0
->ef
.succ
== b1
->ef
.succ
)
2352 return eq_slist(b0
->stmts
, b1
->stmts
);
2357 intern_blocks(opt_state_t
*opt_state
, struct icode
*ic
)
2361 int done1
; /* don't shadow global */
2364 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2365 opt_state
->blocks
[i
]->link
= 0;
2369 for (i
= opt_state
->n_blocks
- 1; i
!= 0; ) {
2371 if (!isMarked(ic
, opt_state
->blocks
[i
]))
2373 for (j
= i
+ 1; j
< opt_state
->n_blocks
; ++j
) {
2374 if (!isMarked(ic
, opt_state
->blocks
[j
]))
2376 if (eq_blk(opt_state
->blocks
[i
], opt_state
->blocks
[j
])) {
2377 opt_state
->blocks
[i
]->link
= opt_state
->blocks
[j
]->link
?
2378 opt_state
->blocks
[j
]->link
: opt_state
->blocks
[j
];
2383 for (i
= 0; i
< opt_state
->n_blocks
; ++i
) {
2384 p
= opt_state
->blocks
[i
];
2389 JT(p
) = JT(p
)->link
;
2393 JF(p
) = JF(p
)->link
;
2401 opt_cleanup(opt_state_t
*opt_state
)
2403 free((void *)opt_state
->vnode_base
);
2404 free((void *)opt_state
->vmap
);
2405 free((void *)opt_state
->edges
);
2406 free((void *)opt_state
->space
);
2407 free((void *)opt_state
->levels
);
2408 free((void *)opt_state
->blocks
);
2412 * For optimizer errors.
2414 static void PCAP_NORETURN
2415 opt_error(opt_state_t
*opt_state
, const char *fmt
, ...)
2419 if (opt_state
->errbuf
!= NULL
) {
2421 (void)vsnprintf(opt_state
->errbuf
,
2422 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2425 longjmp(opt_state
->top_ctx
, 1);
2433 * Return the number of stmts in 's'.
2436 slength(struct slist
*s
)
2440 for (; s
; s
= s
->next
)
2441 if (s
->s
.code
!= NOP
)
2447 * Return the number of nodes reachable by 'p'.
2448 * All nodes should be initially unmarked.
2451 count_blocks(struct icode
*ic
, struct block
*p
)
2453 if (p
== 0 || isMarked(ic
, p
))
2456 return count_blocks(ic
, JT(p
)) + count_blocks(ic
, JF(p
)) + 1;
2460 * Do a depth first search on the flow graph, numbering the
2461 * the basic blocks, and entering them into the 'blocks' array.`
2464 number_blks_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*p
)
2468 if (p
== 0 || isMarked(ic
, p
))
2472 n
= opt_state
->n_blocks
++;
2473 if (opt_state
->n_blocks
== 0) {
2477 opt_error(opt_state
, "filter is too complex to optimize");
2480 opt_state
->blocks
[n
] = p
;
2482 number_blks_r(opt_state
, ic
, JT(p
));
2483 number_blks_r(opt_state
, ic
, JF(p
));
2487 * Return the number of stmts in the flowgraph reachable by 'p'.
2488 * The nodes should be unmarked before calling.
2490 * Note that "stmts" means "instructions", and that this includes
2492 * side-effect statements in 'p' (slength(p->stmts));
2494 * statements in the true branch from 'p' (count_stmts(JT(p)));
2496 * statements in the false branch from 'p' (count_stmts(JF(p)));
2498 * the conditional jump itself (1);
2500 * an extra long jump if the true branch requires it (p->longjt);
2502 * an extra long jump if the false branch requires it (p->longjf).
2505 count_stmts(struct icode
*ic
, struct block
*p
)
2509 if (p
== 0 || isMarked(ic
, p
))
2512 n
= count_stmts(ic
, JT(p
)) + count_stmts(ic
, JF(p
));
2513 return slength(p
->stmts
) + n
+ 1 + p
->longjt
+ p
->longjf
;
2517 * Allocate memory. All allocation is done before optimization
2518 * is begun. A linear bound on the size of all data structures is computed
2519 * from the total number of blocks and/or statements.
2522 opt_init(opt_state_t
*opt_state
, struct icode
*ic
)
2525 int i
, n
, max_stmts
;
2527 size_t block_memsize
, edge_memsize
;
2530 * First, count the blocks, so we can malloc an array to map
2531 * block number to block. Then, put the blocks into the array.
2534 n
= count_blocks(ic
, ic
->root
);
2535 opt_state
->blocks
= (struct block
**)calloc(n
, sizeof(*opt_state
->blocks
));
2536 if (opt_state
->blocks
== NULL
)
2537 opt_error(opt_state
, "malloc");
2539 opt_state
->n_blocks
= 0;
2540 number_blks_r(opt_state
, ic
, ic
->root
);
2543 * This "should not happen".
2545 if (opt_state
->n_blocks
== 0)
2546 opt_error(opt_state
, "filter has no instructions; please report this as a libpcap issue");
2548 opt_state
->n_edges
= 2 * opt_state
->n_blocks
;
2549 if ((opt_state
->n_edges
/ 2) != opt_state
->n_blocks
) {
2553 opt_error(opt_state
, "filter is too complex to optimize");
2555 opt_state
->edges
= (struct edge
**)calloc(opt_state
->n_edges
, sizeof(*opt_state
->edges
));
2556 if (opt_state
->edges
== NULL
) {
2557 opt_error(opt_state
, "malloc");
2561 * The number of levels is bounded by the number of nodes.
2563 opt_state
->levels
= (struct block
**)calloc(opt_state
->n_blocks
, sizeof(*opt_state
->levels
));
2564 if (opt_state
->levels
== NULL
) {
2565 opt_error(opt_state
, "malloc");
2568 opt_state
->edgewords
= opt_state
->n_edges
/ BITS_PER_WORD
+ 1;
2569 opt_state
->nodewords
= opt_state
->n_blocks
/ BITS_PER_WORD
+ 1;
2572 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2573 * in a u_int; we use it as a u_int number-of-iterations
2576 product
= opt_state
->n_blocks
* opt_state
->nodewords
;
2577 if ((product
/ opt_state
->n_blocks
) != opt_state
->nodewords
) {
2579 * XXX - just punt and don't try to optimize?
2580 * In practice, this is unlikely to happen with
2583 opt_error(opt_state
, "filter is too complex to optimize");
2587 * Make sure the total memory required for that doesn't
2590 block_memsize
= (size_t)2 * product
* sizeof(*opt_state
->space
);
2591 if ((block_memsize
/ product
) != 2 * sizeof(*opt_state
->space
)) {
2592 opt_error(opt_state
, "filter is too complex to optimize");
2596 * Make sure opt_state->n_edges * opt_state->edgewords fits
2597 * in a u_int; we use it as a u_int number-of-iterations
2600 product
= opt_state
->n_edges
* opt_state
->edgewords
;
2601 if ((product
/ opt_state
->n_edges
) != opt_state
->edgewords
) {
2602 opt_error(opt_state
, "filter is too complex to optimize");
2606 * Make sure the total memory required for that doesn't
2609 edge_memsize
= (size_t)product
* sizeof(*opt_state
->space
);
2610 if (edge_memsize
/ product
!= sizeof(*opt_state
->space
)) {
2611 opt_error(opt_state
, "filter is too complex to optimize");
2615 * Make sure the total memory required for both of them doesn't
2618 if (block_memsize
> SIZE_MAX
- edge_memsize
) {
2619 opt_error(opt_state
, "filter is too complex to optimize");
2623 opt_state
->space
= (bpf_u_int32
*)malloc(block_memsize
+ edge_memsize
);
2624 if (opt_state
->space
== NULL
) {
2625 opt_error(opt_state
, "malloc");
2627 p
= opt_state
->space
;
2628 opt_state
->all_dom_sets
= p
;
2629 for (i
= 0; i
< n
; ++i
) {
2630 opt_state
->blocks
[i
]->dom
= p
;
2631 p
+= opt_state
->nodewords
;
2633 opt_state
->all_closure_sets
= p
;
2634 for (i
= 0; i
< n
; ++i
) {
2635 opt_state
->blocks
[i
]->closure
= p
;
2636 p
+= opt_state
->nodewords
;
2638 opt_state
->all_edge_sets
= p
;
2639 for (i
= 0; i
< n
; ++i
) {
2640 register struct block
*b
= opt_state
->blocks
[i
];
2643 p
+= opt_state
->edgewords
;
2645 p
+= opt_state
->edgewords
;
2647 opt_state
->edges
[i
] = &b
->et
;
2648 b
->ef
.id
= opt_state
->n_blocks
+ i
;
2649 opt_state
->edges
[opt_state
->n_blocks
+ i
] = &b
->ef
;
2654 for (i
= 0; i
< n
; ++i
)
2655 max_stmts
+= slength(opt_state
->blocks
[i
]->stmts
) + 1;
2657 * We allocate at most 3 value numbers per statement,
2658 * so this is an upper bound on the number of valnodes
2661 opt_state
->maxval
= 3 * max_stmts
;
2662 opt_state
->vmap
= (struct vmapinfo
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vmap
));
2663 if (opt_state
->vmap
== NULL
) {
2664 opt_error(opt_state
, "malloc");
2666 opt_state
->vnode_base
= (struct valnode
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vnode_base
));
2667 if (opt_state
->vnode_base
== NULL
) {
2668 opt_error(opt_state
, "malloc");
2673 * This is only used when supporting optimizer debugging. It is
2674 * global state, so do *not* do more than one compile in parallel
2675 * and expect it to provide meaningful information.
2681 static void PCAP_NORETURN
conv_error(conv_state_t
*, const char *, ...)
2682 PCAP_PRINTFLIKE(2, 3);
2685 * Returns true if successful. Returns false if a branch has
2686 * an offset that is too large. If so, we have marked that
2687 * branch so that on a subsequent iteration, it will be treated
2691 convert_code_r(conv_state_t
*conv_state
, struct icode
*ic
, struct block
*p
)
2693 struct bpf_insn
*dst
;
2697 struct slist
**offset
= NULL
;
2699 if (p
== 0 || isMarked(ic
, p
))
2703 if (convert_code_r(conv_state
, ic
, JF(p
)) == 0)
2705 if (convert_code_r(conv_state
, ic
, JT(p
)) == 0)
2708 slen
= slength(p
->stmts
);
2709 dst
= conv_state
->ftail
-= (slen
+ 1 + p
->longjt
+ p
->longjf
);
2710 /* inflate length by any extra jumps */
2712 p
->offset
= (int)(dst
- conv_state
->fstart
);
2714 /* generate offset[] for convenience */
2716 offset
= (struct slist
**)calloc(slen
, sizeof(struct slist
*));
2718 conv_error(conv_state
, "not enough core");
2723 for (off
= 0; off
< slen
&& src
; off
++) {
2725 printf("off=%d src=%x\n", off
, src
);
2732 for (src
= p
->stmts
; src
; src
= src
->next
) {
2733 if (src
->s
.code
== NOP
)
2735 dst
->code
= (u_short
)src
->s
.code
;
2738 /* fill block-local relative jump */
2739 if (BPF_CLASS(src
->s
.code
) != BPF_JMP
|| src
->s
.code
== (BPF_JMP
|BPF_JA
)) {
2741 if (src
->s
.jt
|| src
->s
.jf
) {
2743 conv_error(conv_state
, "illegal jmp destination");
2749 if (off
== slen
- 2) /*???*/
2755 const char ljerr
[] = "%s for block-local relative jump: off=%d";
2758 printf("code=%x off=%d %x %x\n", src
->s
.code
,
2759 off
, src
->s
.jt
, src
->s
.jf
);
2762 if (!src
->s
.jt
|| !src
->s
.jf
) {
2764 conv_error(conv_state
, ljerr
, "no jmp destination", off
);
2769 for (i
= 0; i
< slen
; i
++) {
2770 if (offset
[i
] == src
->s
.jt
) {
2773 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2777 if (i
- off
- 1 >= 256) {
2779 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2782 dst
->jt
= (u_char
)(i
- off
- 1);
2785 if (offset
[i
] == src
->s
.jf
) {
2788 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2791 if (i
- off
- 1 >= 256) {
2793 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2796 dst
->jf
= (u_char
)(i
- off
- 1);
2802 conv_error(conv_state
, ljerr
, "no destination found", off
);
2814 if (dst
- conv_state
->fstart
< NBIDS
)
2815 bids
[dst
- conv_state
->fstart
] = p
->id
+ 1;
2817 dst
->code
= (u_short
)p
->s
.code
;
2820 /* number of extra jumps inserted */
2821 u_char extrajmps
= 0;
2822 off
= JT(p
)->offset
- (p
->offset
+ slen
) - 1;
2824 /* offset too large for branch, must add a jump */
2825 if (p
->longjt
== 0) {
2826 /* mark this instruction and retry */
2830 dst
->jt
= extrajmps
;
2832 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2833 dst
[extrajmps
].k
= off
- extrajmps
;
2836 dst
->jt
= (u_char
)off
;
2837 off
= JF(p
)->offset
- (p
->offset
+ slen
) - 1;
2839 /* offset too large for branch, must add a jump */
2840 if (p
->longjf
== 0) {
2841 /* mark this instruction and retry */
2845 /* branch if F to following jump */
2846 /* if two jumps are inserted, F goes to second one */
2847 dst
->jf
= extrajmps
;
2849 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2850 dst
[extrajmps
].k
= off
- extrajmps
;
2853 dst
->jf
= (u_char
)off
;
2860 * Convert flowgraph intermediate representation to the
2861 * BPF array representation. Set *lenp to the number of instructions.
2863 * This routine does *NOT* leak the memory pointed to by fp. It *must
2864 * not* do free(fp) before returning fp; doing so would make no sense,
2865 * as the BPF array pointed to by the return value of icode_to_fcode()
2866 * must be valid - it's being returned for use in a bpf_program structure.
2868 * If it appears that icode_to_fcode() is leaking, the problem is that
2869 * the program using pcap_compile() is failing to free the memory in
2870 * the BPF program when it's done - the leak is in the program, not in
2871 * the routine that happens to be allocating the memory. (By analogy, if
2872 * a program calls fopen() without ever calling fclose() on the FILE *,
2873 * it will leak the FILE structure; the leak is not in fopen(), it's in
2874 * the program.) Change the program to use pcap_freecode() when it's
2875 * done with the filter program. See the pcap man page.
2878 icode_to_fcode(struct icode
*ic
, struct block
*root
, u_int
*lenp
,
2882 struct bpf_insn
*fp
;
2883 conv_state_t conv_state
;
2885 conv_state
.fstart
= NULL
;
2886 conv_state
.errbuf
= errbuf
;
2887 if (setjmp(conv_state
.top_ctx
) != 0) {
2888 free(conv_state
.fstart
);
2893 * Loop doing convert_code_r() until no branches remain
2894 * with too-large offsets.
2898 n
= *lenp
= count_stmts(ic
, root
);
2900 fp
= (struct bpf_insn
*)malloc(sizeof(*fp
) * n
);
2902 (void)snprintf(errbuf
, PCAP_ERRBUF_SIZE
,
2906 memset((char *)fp
, 0, sizeof(*fp
) * n
);
2907 conv_state
.fstart
= fp
;
2908 conv_state
.ftail
= fp
+ n
;
2911 if (convert_code_r(&conv_state
, ic
, root
))
2920 * For iconv_to_fconv() errors.
2922 static void PCAP_NORETURN
2923 conv_error(conv_state_t
*conv_state
, const char *fmt
, ...)
2928 (void)vsnprintf(conv_state
->errbuf
,
2929 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2931 longjmp(conv_state
->top_ctx
, 1);
2939 * Make a copy of a BPF program and put it in the "fcode" member of
2942 * If we fail to allocate memory for the copy, fill in the "errbuf"
2943 * member of the "pcap_t" with an error message, and return -1;
2944 * otherwise, return 0.
2947 install_bpf_program(pcap_t
*p
, struct bpf_program
*fp
)
2952 * Validate the program.
2954 if (!pcap_validate_filter(fp
->bf_insns
, fp
->bf_len
)) {
2955 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2956 "BPF program is not valid");
2961 * Free up any already installed program.
2963 pcap_freecode(&p
->fcode
);
2965 prog_size
= sizeof(*fp
->bf_insns
) * fp
->bf_len
;
2966 p
->fcode
.bf_len
= fp
->bf_len
;
2967 p
->fcode
.bf_insns
= (struct bpf_insn
*)malloc(prog_size
);
2968 if (p
->fcode
.bf_insns
== NULL
) {
2969 pcap_fmt_errmsg_for_errno(p
->errbuf
, sizeof(p
->errbuf
),
2973 memcpy(p
->fcode
.bf_insns
, fp
->bf_insns
, prog_size
);
2979 dot_dump_node(struct icode
*ic
, struct block
*block
, struct bpf_program
*prog
,
2982 int icount
, noffset
;
2985 if (block
== NULL
|| isMarked(ic
, block
))
2989 icount
= slength(block
->stmts
) + 1 + block
->longjt
+ block
->longjf
;
2990 noffset
= min(block
->offset
+ icount
, (int)prog
->bf_len
);
2992 fprintf(out
, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block
->id
, block
->id
, block
->id
);
2993 for (i
= block
->offset
; i
< noffset
; i
++) {
2994 fprintf(out
, "\\n%s", bpf_image(prog
->bf_insns
+ i
, i
));
2996 fprintf(out
, "\" tooltip=\"");
2997 for (i
= 0; i
< BPF_MEMWORDS
; i
++)
2998 if (block
->val
[i
] != VAL_UNKNOWN
)
2999 fprintf(out
, "val[%d]=%d ", i
, block
->val
[i
]);
3000 fprintf(out
, "val[A]=%d ", block
->val
[A_ATOM
]);
3001 fprintf(out
, "val[X]=%d", block
->val
[X_ATOM
]);
3003 if (JT(block
) == NULL
)
3004 fprintf(out
, ", peripheries=2");
3005 fprintf(out
, "];\n");
3007 dot_dump_node(ic
, JT(block
), prog
, out
);
3008 dot_dump_node(ic
, JF(block
), prog
, out
);
3012 dot_dump_edge(struct icode
*ic
, struct block
*block
, FILE *out
)
3014 if (block
== NULL
|| isMarked(ic
, block
))
3019 fprintf(out
, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3020 block
->id
, JT(block
)->id
);
3021 fprintf(out
, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3022 block
->id
, JF(block
)->id
);
3024 dot_dump_edge(ic
, JT(block
), out
);
3025 dot_dump_edge(ic
, JF(block
), out
);
3028 /* Output the block CFG using graphviz/DOT language
3029 * In the CFG, block's code, value index for each registers at EXIT,
3030 * and the jump relationship is show.
3032 * example DOT for BPF `ip src host 1.1.1.1' is:
3034 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3035 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3036 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3037 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3038 "block0":se -> "block1":n [label="T"];
3039 "block0":sw -> "block3":n [label="F"];
3040 "block1":se -> "block2":n [label="T"];
3041 "block1":sw -> "block3":n [label="F"];
3044 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3045 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3048 dot_dump(struct icode
*ic
, char *errbuf
)
3050 struct bpf_program f
;
3053 memset(bids
, 0, sizeof bids
);
3054 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3055 if (f
.bf_insns
== NULL
)
3058 fprintf(out
, "digraph BPF {\n");
3060 dot_dump_node(ic
, ic
->root
, &f
, out
);
3062 dot_dump_edge(ic
, ic
->root
, out
);
3063 fprintf(out
, "}\n");
3065 free((char *)f
.bf_insns
);
3070 plain_dump(struct icode
*ic
, char *errbuf
)
3072 struct bpf_program f
;
3074 memset(bids
, 0, sizeof bids
);
3075 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3076 if (f
.bf_insns
== NULL
)
3080 free((char *)f
.bf_insns
);
3085 opt_dump(opt_state_t
*opt_state
, struct icode
*ic
)
3088 char errbuf
[PCAP_ERRBUF_SIZE
];
3091 * If the CFG, in DOT format, is requested, output it rather than
3092 * the code that would be generated from that graph.
3094 if (pcap_print_dot_graph
)
3095 status
= dot_dump(ic
, errbuf
);
3097 status
= plain_dump(ic
, errbuf
);
3099 opt_error(opt_state
, "opt_dump: icode_to_fcode failed: %s", errbuf
);