2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for BPF code intermediate representation.
28 #include <pcap-types.h>
35 #include <limits.h> /* for SIZE_MAX */
42 #include "diag-control.h"
44 #ifdef HAVE_OS_PROTO_H
50 * The internal "debug printout" flag for the filter expression optimizer.
51 * The code to print that stuff is present only if BDEBUG is defined, so
52 * the flag, and the routine to set it, are defined only if BDEBUG is
55 static int pcap_optimizer_debug
;
58 * Routine to set that flag.
60 * This is intended for libpcap developers, not for general use.
61 * If you want to set these in a program, you'll have to declare this
62 * routine yourself, with the appropriate DLL import attribute on Windows;
63 * it's not declared in any header file, and won't be declared in any
64 * header file provided by libpcap.
66 PCAP_API
void pcap_set_optimizer_debug(int value
);
69 pcap_set_optimizer_debug(int value
)
71 pcap_optimizer_debug
= value
;
75 * The internal "print dot graph" flag for the filter expression optimizer.
76 * The code to print that stuff is present only if BDEBUG is defined, so
77 * the flag, and the routine to set it, are defined only if BDEBUG is
80 static int pcap_print_dot_graph
;
83 * Routine to set that flag.
85 * This is intended for libpcap developers, not for general use.
86 * If you want to set these in a program, you'll have to declare this
87 * routine yourself, with the appropriate DLL import attribute on Windows;
88 * it's not declared in any header file, and won't be declared in any
89 * header file provided by libpcap.
91 PCAP_API
void pcap_set_print_dot_graph(int value
);
94 pcap_set_print_dot_graph(int value
)
96 pcap_print_dot_graph
= value
;
104 * Takes a 32-bit integer as an argument.
106 * If handed a non-zero value, returns the index of the lowest set bit,
107 * counting upwards from zero.
109 * If handed zero, the results are platform- and compiler-dependent.
110 * Keep it out of the light, don't give it any water, don't feed it
111 * after midnight, and don't pass zero to it.
113 * This is the same as the count of trailing zeroes in the word.
115 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
117 * GCC 3.4 and later; we have __builtin_ctz().
119 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120 #elif defined(_MSC_VER)
122 * Visual Studio; we support only 2005 and later, so use
128 #pragma intrinsic(_BitScanForward)
131 static __forceinline u_int
132 lowest_set_bit(int mask
)
137 * Don't sign-extend mask if long is longer than int.
138 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
140 if (_BitScanForward(&bit
, (unsigned int)mask
) == 0)
141 abort(); /* mask is zero */
147 * Use a perfect-hash-function-based function.
150 lowest_set_bit(int mask
)
152 unsigned int v
= (unsigned int)mask
;
154 static const u_int MultiplyDeBruijnBitPosition
[32] = {
155 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
156 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
160 * We strip off all but the lowermost set bit (v & ~v),
161 * and perform a minimal perfect hash on it to look up the
162 * number of low-order zero bits in a table.
166 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
168 * http://supertech.csail.mit.edu/papers/debruijn.pdf
170 return (MultiplyDeBruijnBitPosition
[((v
& -v
) * 0x077CB531U
) >> 27]);
175 * Represents a deleted instruction.
180 * Register numbers for use-def values.
181 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
182 * location. A_ATOM is the accumulator and X_ATOM is the index
185 #define A_ATOM BPF_MEMWORDS
186 #define X_ATOM (BPF_MEMWORDS+1)
189 * This define is used to represent *both* the accumulator and
190 * x register in use-def computations.
191 * Currently, the use-def code assumes only one definition per instruction.
193 #define AX_ATOM N_ATOMS
196 * These data structures are used in a Cocke and Schwartz style
197 * value numbering scheme. Since the flowgraph is acyclic,
198 * exit values can be propagated from a node's predecessors
199 * provided it is uniquely defined.
204 int val
; /* the value number */
205 struct valnode
*next
;
208 /* Integer constants mapped with the load immediate opcode. */
209 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
213 bpf_u_int32 const_val
;
218 * Place to longjmp to on an error.
223 * The buffer into which to put error message.
228 * A flag to indicate that further optimization is needed.
229 * Iterative passes are continued until a given pass yields no
230 * code simplification or branch movement.
235 * XXX - detect loops that do nothing but repeated AND/OR pullups
237 * If 100 passes in a row do nothing but that, treat that as a
238 * sign that we're in a loop that just shuffles in a cycle in
239 * which each pass just shuffles the code and we eventually
240 * get back to the original configuration.
242 * XXX - we need a non-heuristic way of detecting, or preventing,
245 int non_branch_movement_performed
;
247 u_int n_blocks
; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
248 struct block
**blocks
;
249 u_int n_edges
; /* twice n_blocks, so guaranteed to be > 0 */
253 * A bit vector set representation of the dominators.
254 * We round up the set size to the next power of two.
256 u_int nodewords
; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
257 u_int edgewords
; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
258 struct block
**levels
;
261 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
263 * True if a is in uset {p}
265 #define SET_MEMBER(p, a) \
266 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
271 #define SET_INSERT(p, a) \
272 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
275 * Delete 'a' from uset p.
277 #define SET_DELETE(p, a) \
278 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
282 * n must be guaranteed to be > 0
284 #define SET_INTERSECT(a, b, n)\
286 register bpf_u_int32 *_x = a, *_y = b;\
287 register u_int _n = n;\
288 do *_x++ &= *_y++; while (--_n != 0);\
293 * n must be guaranteed to be > 0
295 #define SET_SUBTRACT(a, b, n)\
297 register bpf_u_int32 *_x = a, *_y = b;\
298 register u_int _n = n;\
299 do *_x++ &=~ *_y++; while (--_n != 0);\
304 * n must be guaranteed to be > 0
306 #define SET_UNION(a, b, n)\
308 register bpf_u_int32 *_x = a, *_y = b;\
309 register u_int _n = n;\
310 do *_x++ |= *_y++; while (--_n != 0);\
314 uset all_closure_sets
;
318 struct valnode
*hashtbl
[MODULUS
];
322 struct vmapinfo
*vmap
;
323 struct valnode
*vnode_base
;
324 struct valnode
*next_vnode
;
329 * Place to longjmp to on an error.
334 * The buffer into which to put error message.
339 * Some pointers used to convert the basic block form of the code,
340 * into the array form that BPF requires. 'fstart' will point to
341 * the malloc'd array while 'ftail' is used during the recursive
344 struct bpf_insn
*fstart
;
345 struct bpf_insn
*ftail
;
348 static void opt_init(opt_state_t
*, struct icode
*);
349 static void opt_cleanup(opt_state_t
*);
350 static void PCAP_NORETURN
opt_error(opt_state_t
*, const char *, ...)
351 PCAP_PRINTFLIKE(2, 3);
353 static void intern_blocks(opt_state_t
*, struct icode
*);
355 static void find_inedges(opt_state_t
*, struct block
*);
357 static void opt_dump(opt_state_t
*, struct icode
*);
361 #define MAX(a,b) ((a)>(b)?(a):(b))
365 find_levels_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*b
)
376 find_levels_r(opt_state
, ic
, JT(b
));
377 find_levels_r(opt_state
, ic
, JF(b
));
378 level
= MAX(JT(b
)->level
, JF(b
)->level
) + 1;
382 b
->link
= opt_state
->levels
[level
];
383 opt_state
->levels
[level
] = b
;
387 * Level graph. The levels go from 0 at the leaves to
388 * N_LEVELS at the root. The opt_state->levels[] array points to the
389 * first node of the level list, whose elements are linked
390 * with the 'link' field of the struct block.
393 find_levels(opt_state_t
*opt_state
, struct icode
*ic
)
395 memset((char *)opt_state
->levels
, 0, opt_state
->n_blocks
* sizeof(*opt_state
->levels
));
397 find_levels_r(opt_state
, ic
, ic
->root
);
401 * Find dominator relationships.
402 * Assumes graph has been leveled.
405 find_dom(opt_state_t
*opt_state
, struct block
*root
)
413 * Initialize sets to contain all nodes.
415 x
= opt_state
->all_dom_sets
;
417 * In opt_init(), we've made sure the product doesn't overflow.
419 i
= opt_state
->n_blocks
* opt_state
->nodewords
;
424 /* Root starts off empty. */
425 for (i
= opt_state
->nodewords
; i
!= 0;) {
430 /* root->level is the highest level no found. */
431 for (level
= root
->level
; level
>= 0; --level
) {
432 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
433 SET_INSERT(b
->dom
, b
->id
);
436 SET_INTERSECT(JT(b
)->dom
, b
->dom
, opt_state
->nodewords
);
437 SET_INTERSECT(JF(b
)->dom
, b
->dom
, opt_state
->nodewords
);
443 propedom(opt_state_t
*opt_state
, struct edge
*ep
)
445 SET_INSERT(ep
->edom
, ep
->id
);
447 SET_INTERSECT(ep
->succ
->et
.edom
, ep
->edom
, opt_state
->edgewords
);
448 SET_INTERSECT(ep
->succ
->ef
.edom
, ep
->edom
, opt_state
->edgewords
);
453 * Compute edge dominators.
454 * Assumes graph has been leveled and predecessors established.
457 find_edom(opt_state_t
*opt_state
, struct block
*root
)
464 x
= opt_state
->all_edge_sets
;
466 * In opt_init(), we've made sure the product doesn't overflow.
468 for (i
= opt_state
->n_edges
* opt_state
->edgewords
; i
!= 0; ) {
473 /* root->level is the highest level no found. */
474 memset(root
->et
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
475 memset(root
->ef
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
476 for (level
= root
->level
; level
>= 0; --level
) {
477 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
478 propedom(opt_state
, &b
->et
);
479 propedom(opt_state
, &b
->ef
);
485 * Find the backwards transitive closure of the flow graph. These sets
486 * are backwards in the sense that we find the set of nodes that reach
487 * a given node, not the set of nodes that can be reached by a node.
489 * Assumes graph has been leveled.
492 find_closure(opt_state_t
*opt_state
, struct block
*root
)
498 * Initialize sets to contain no nodes.
500 memset((char *)opt_state
->all_closure_sets
, 0,
501 opt_state
->n_blocks
* opt_state
->nodewords
* sizeof(*opt_state
->all_closure_sets
));
503 /* root->level is the highest level no found. */
504 for (level
= root
->level
; level
>= 0; --level
) {
505 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
506 SET_INSERT(b
->closure
, b
->id
);
509 SET_UNION(JT(b
)->closure
, b
->closure
, opt_state
->nodewords
);
510 SET_UNION(JF(b
)->closure
, b
->closure
, opt_state
->nodewords
);
516 * Return the register number that is used by s.
518 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
519 * are used, the scratch memory location's number if a scratch memory
520 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
522 * The implementation should probably change to an array access.
525 atomuse(struct stmt
*s
)
527 register int c
= s
->code
;
532 switch (BPF_CLASS(c
)) {
535 return (BPF_RVAL(c
) == BPF_A
) ? A_ATOM
:
536 (BPF_RVAL(c
) == BPF_X
) ? X_ATOM
: -1;
541 * As there are fewer than 2^31 memory locations,
542 * s->k should be convertible to int without problems.
544 return (BPF_MODE(c
) == BPF_IND
) ? X_ATOM
:
545 (BPF_MODE(c
) == BPF_MEM
) ? (int)s
->k
: -1;
555 if (BPF_SRC(c
) == BPF_X
)
560 return BPF_MISCOP(c
) == BPF_TXA
? X_ATOM
: A_ATOM
;
567 * Return the register number that is defined by 's'. We assume that
568 * a single stmt cannot define more than one register. If no register
569 * is defined, return -1.
571 * The implementation should probably change to an array access.
574 atomdef(struct stmt
*s
)
579 switch (BPF_CLASS(s
->code
)) {
593 return BPF_MISCOP(s
->code
) == BPF_TAX
? X_ATOM
: A_ATOM
;
599 * Compute the sets of registers used, defined, and killed by 'b'.
601 * "Used" means that a statement in 'b' uses the register before any
602 * statement in 'b' defines it, i.e. it uses the value left in
603 * that register by a predecessor block of this block.
604 * "Defined" means that a statement in 'b' defines it.
605 * "Killed" means that a statement in 'b' defines it before any
606 * statement in 'b' uses it, i.e. it kills the value left in that
607 * register by a predecessor block of this block.
610 compute_local_ud(struct block
*b
)
613 atomset def
= 0, use
= 0, killed
= 0;
616 for (s
= b
->stmts
; s
; s
= s
->next
) {
617 if (s
->s
.code
== NOP
)
619 atom
= atomuse(&s
->s
);
621 if (atom
== AX_ATOM
) {
622 if (!ATOMELEM(def
, X_ATOM
))
623 use
|= ATOMMASK(X_ATOM
);
624 if (!ATOMELEM(def
, A_ATOM
))
625 use
|= ATOMMASK(A_ATOM
);
627 else if (atom
< N_ATOMS
) {
628 if (!ATOMELEM(def
, atom
))
629 use
|= ATOMMASK(atom
);
634 atom
= atomdef(&s
->s
);
636 if (!ATOMELEM(use
, atom
))
637 killed
|= ATOMMASK(atom
);
638 def
|= ATOMMASK(atom
);
641 if (BPF_CLASS(b
->s
.code
) == BPF_JMP
) {
643 * XXX - what about RET?
645 atom
= atomuse(&b
->s
);
647 if (atom
== AX_ATOM
) {
648 if (!ATOMELEM(def
, X_ATOM
))
649 use
|= ATOMMASK(X_ATOM
);
650 if (!ATOMELEM(def
, A_ATOM
))
651 use
|= ATOMMASK(A_ATOM
);
653 else if (atom
< N_ATOMS
) {
654 if (!ATOMELEM(def
, atom
))
655 use
|= ATOMMASK(atom
);
668 * Assume graph is already leveled.
671 find_ud(opt_state_t
*opt_state
, struct block
*root
)
677 * root->level is the highest level no found;
678 * count down from there.
680 maxlevel
= root
->level
;
681 for (i
= maxlevel
; i
>= 0; --i
)
682 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
687 for (i
= 1; i
<= maxlevel
; ++i
) {
688 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
689 p
->out_use
|= JT(p
)->in_use
| JF(p
)->in_use
;
690 p
->in_use
|= p
->out_use
&~ p
->kill
;
695 init_val(opt_state_t
*opt_state
)
697 opt_state
->curval
= 0;
698 opt_state
->next_vnode
= opt_state
->vnode_base
;
699 memset((char *)opt_state
->vmap
, 0, opt_state
->maxval
* sizeof(*opt_state
->vmap
));
700 memset((char *)opt_state
->hashtbl
, 0, sizeof opt_state
->hashtbl
);
704 * Because we really don't have an IR, this stuff is a little messy.
706 * This routine looks in the table of existing value number for a value
707 * with generated from an operation with the specified opcode and
708 * the specified values. If it finds it, it returns its value number,
709 * otherwise it makes a new entry in the table and returns the
710 * value number of that entry.
713 F(opt_state_t
*opt_state
, int code
, bpf_u_int32 v0
, bpf_u_int32 v1
)
719 hash
= (u_int
)code
^ (v0
<< 4) ^ (v1
<< 8);
722 for (p
= opt_state
->hashtbl
[hash
]; p
; p
= p
->next
)
723 if (p
->code
== code
&& p
->v0
== v0
&& p
->v1
== v1
)
727 * Not found. Allocate a new value, and assign it a new
730 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
731 * increment it before using it as the new value number, which
732 * means we never assign VAL_UNKNOWN.
734 * XXX - unless we overflow, but we probably won't have 2^32-1
735 * values; we treat 32 bits as effectively infinite.
737 val
= ++opt_state
->curval
;
738 if (BPF_MODE(code
) == BPF_IMM
&&
739 (BPF_CLASS(code
) == BPF_LD
|| BPF_CLASS(code
) == BPF_LDX
)) {
740 opt_state
->vmap
[val
].const_val
= v0
;
741 opt_state
->vmap
[val
].is_const
= 1;
743 p
= opt_state
->next_vnode
++;
748 p
->next
= opt_state
->hashtbl
[hash
];
749 opt_state
->hashtbl
[hash
] = p
;
755 vstore(struct stmt
*s
, bpf_u_int32
*valp
, bpf_u_int32 newval
, int alter
)
757 if (alter
&& newval
!= VAL_UNKNOWN
&& *valp
== newval
)
764 * Do constant-folding on binary operators.
765 * (Unary operators are handled elsewhere.)
768 fold_op(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 v0
, bpf_u_int32 v1
)
772 a
= opt_state
->vmap
[v0
].const_val
;
773 b
= opt_state
->vmap
[v1
].const_val
;
775 switch (BPF_OP(s
->code
)) {
790 opt_error(opt_state
, "division by zero");
796 opt_error(opt_state
, "modulus by zero");
814 * A left shift of more than the width of the type
815 * is undefined in C; we'll just treat it as shifting
818 * XXX - the BPF interpreter doesn't check for this,
819 * so its behavior is dependent on the behavior of
820 * the processor on which it's running. There are
821 * processors on which it shifts all the bits out
822 * and processors on which it does no shift.
832 * A right shift of more than the width of the type
833 * is undefined in C; we'll just treat it as shifting
836 * XXX - the BPF interpreter doesn't check for this,
837 * so its behavior is dependent on the behavior of
838 * the processor on which it's running. There are
839 * processors on which it shifts all the bits out
840 * and processors on which it does no shift.
852 s
->code
= BPF_LD
|BPF_IMM
;
855 * XXX - optimizer loop detection.
857 opt_state
->non_branch_movement_performed
= 1;
860 static inline struct slist
*
861 this_op(struct slist
*s
)
863 while (s
!= 0 && s
->s
.code
== NOP
)
869 opt_not(struct block
*b
)
871 struct block
*tmp
= JT(b
);
878 opt_peep(opt_state_t
*opt_state
, struct block
*b
)
881 struct slist
*next
, *last
;
889 for (/*empty*/; /*empty*/; s
= next
) {
895 break; /* nothing left in the block */
898 * Find the next real instruction after that one
901 next
= this_op(s
->next
);
903 break; /* no next instruction */
907 * st M[k] --> st M[k]
910 if (s
->s
.code
== BPF_ST
&&
911 next
->s
.code
== (BPF_LDX
|BPF_MEM
) &&
912 s
->s
.k
== next
->s
.k
) {
914 next
->s
.code
= BPF_MISC
|BPF_TAX
;
916 * XXX - optimizer loop detection.
918 opt_state
->non_branch_movement_performed
= 1;
924 if (s
->s
.code
== (BPF_LD
|BPF_IMM
) &&
925 next
->s
.code
== (BPF_MISC
|BPF_TAX
)) {
926 s
->s
.code
= BPF_LDX
|BPF_IMM
;
927 next
->s
.code
= BPF_MISC
|BPF_TXA
;
930 * XXX - optimizer loop detection.
932 opt_state
->non_branch_movement_performed
= 1;
935 * This is an ugly special case, but it happens
936 * when you say tcp[k] or udp[k] where k is a constant.
938 if (s
->s
.code
== (BPF_LD
|BPF_IMM
)) {
939 struct slist
*add
, *tax
, *ild
;
942 * Check that X isn't used on exit from this
943 * block (which the optimizer might cause).
944 * We know the code generator won't generate
945 * any local dependencies.
947 if (ATOMELEM(b
->out_use
, X_ATOM
))
951 * Check that the instruction following the ldi
952 * is an addx, or it's an ldxms with an addx
953 * following it (with 0 or more nops between the
956 if (next
->s
.code
!= (BPF_LDX
|BPF_MSH
|BPF_B
))
959 add
= this_op(next
->next
);
960 if (add
== 0 || add
->s
.code
!= (BPF_ALU
|BPF_ADD
|BPF_X
))
964 * Check that a tax follows that (with 0 or more
965 * nops between them).
967 tax
= this_op(add
->next
);
968 if (tax
== 0 || tax
->s
.code
!= (BPF_MISC
|BPF_TAX
))
972 * Check that an ild follows that (with 0 or more
973 * nops between them).
975 ild
= this_op(tax
->next
);
976 if (ild
== 0 || BPF_CLASS(ild
->s
.code
) != BPF_LD
||
977 BPF_MODE(ild
->s
.code
) != BPF_IND
)
980 * We want to turn this sequence:
983 * (005) ldxms [14] {next} -- optional
986 * (008) ild [x+0] {ild}
988 * into this sequence:
996 * XXX We need to check that X is not
997 * subsequently used, because we want to change
998 * what'll be in it after this sequence.
1000 * We know we can eliminate the accumulator
1001 * modifications earlier in the sequence since
1002 * it is defined by the last stmt of this sequence
1003 * (i.e., the last statement of the sequence loads
1004 * a value into the accumulator, so we can eliminate
1005 * earlier operations on the accumulator).
1011 opt_state
->done
= 0;
1013 * XXX - optimizer loop detection.
1015 opt_state
->non_branch_movement_performed
= 1;
1019 * If the comparison at the end of a block is an equality
1020 * comparison against a constant, and nobody uses the value
1021 * we leave in the A register at the end of a block, and
1022 * the operation preceding the comparison is an arithmetic
1023 * operation, we can sometime optimize it away.
1025 if (b
->s
.code
== (BPF_JMP
|BPF_JEQ
|BPF_K
) &&
1026 !ATOMELEM(b
->out_use
, A_ATOM
)) {
1028 * We can optimize away certain subtractions of the
1031 if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_X
)) {
1032 val
= b
->val
[X_ATOM
];
1033 if (opt_state
->vmap
[val
].is_const
) {
1035 * If we have a subtract to do a comparison,
1036 * and the X register is a known constant,
1037 * we can merge this value into the
1043 b
->s
.k
+= opt_state
->vmap
[val
].const_val
;
1045 opt_state
->done
= 0;
1047 * XXX - optimizer loop detection.
1049 opt_state
->non_branch_movement_performed
= 1;
1050 } else if (b
->s
.k
== 0) {
1052 * If the X register isn't a constant,
1053 * and the comparison in the test is
1054 * against 0, we can compare with the
1055 * X register, instead:
1061 b
->s
.code
= BPF_JMP
|BPF_JEQ
|BPF_X
;
1062 opt_state
->done
= 0;
1064 * XXX - optimizer loop detection.
1066 opt_state
->non_branch_movement_performed
= 1;
1070 * Likewise, a constant subtract can be simplified:
1073 * jeq #y -> jeq #(x+y)
1075 else if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_K
)) {
1077 b
->s
.k
+= last
->s
.k
;
1078 opt_state
->done
= 0;
1080 * XXX - optimizer loop detection.
1082 opt_state
->non_branch_movement_performed
= 1;
1085 * And, similarly, a constant AND can be simplified
1086 * if we're testing against 0, i.e.:
1091 else if (last
->s
.code
== (BPF_ALU
|BPF_AND
|BPF_K
) &&
1094 b
->s
.code
= BPF_JMP
|BPF_K
|BPF_JSET
;
1096 opt_state
->done
= 0;
1099 * XXX - optimizer loop detection.
1101 opt_state
->non_branch_movement_performed
= 1;
1106 * jset #ffffffff -> always
1108 if (b
->s
.code
== (BPF_JMP
|BPF_K
|BPF_JSET
)) {
1111 if (b
->s
.k
== 0xffffffffU
)
1115 * If we're comparing against the index register, and the index
1116 * register is a known constant, we can just compare against that
1119 val
= b
->val
[X_ATOM
];
1120 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_X
) {
1121 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1122 b
->s
.code
&= ~BPF_X
;
1126 * If the accumulator is a known constant, we can compute the
1127 * comparison result.
1129 val
= b
->val
[A_ATOM
];
1130 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_K
) {
1131 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1132 switch (BPF_OP(b
->s
.code
)) {
1153 if (JF(b
) != JT(b
)) {
1154 opt_state
->done
= 0;
1156 * XXX - optimizer loop detection.
1158 opt_state
->non_branch_movement_performed
= 1;
1168 * Compute the symbolic value of expression of 's', and update
1169 * anything it defines in the value table 'val'. If 'alter' is true,
1170 * do various optimizations. This code would be cleaner if symbolic
1171 * evaluation and code transformations weren't folded together.
1174 opt_stmt(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 val
[], int alter
)
1181 case BPF_LD
|BPF_ABS
|BPF_W
:
1182 case BPF_LD
|BPF_ABS
|BPF_H
:
1183 case BPF_LD
|BPF_ABS
|BPF_B
:
1184 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1185 vstore(s
, &val
[A_ATOM
], v
, alter
);
1188 case BPF_LD
|BPF_IND
|BPF_W
:
1189 case BPF_LD
|BPF_IND
|BPF_H
:
1190 case BPF_LD
|BPF_IND
|BPF_B
:
1192 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1193 s
->code
= BPF_LD
|BPF_ABS
|BPF_SIZE(s
->code
);
1194 s
->k
+= opt_state
->vmap
[v
].const_val
;
1195 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1197 * XXX - optimizer loop detection.
1199 opt_state
->non_branch_movement_performed
= 1;
1200 opt_state
->done
= 0;
1203 v
= F(opt_state
, s
->code
, s
->k
, v
);
1204 vstore(s
, &val
[A_ATOM
], v
, alter
);
1207 case BPF_LD
|BPF_LEN
:
1208 v
= F(opt_state
, s
->code
, 0L, 0L);
1209 vstore(s
, &val
[A_ATOM
], v
, alter
);
1212 case BPF_LD
|BPF_IMM
:
1214 vstore(s
, &val
[A_ATOM
], v
, alter
);
1217 case BPF_LDX
|BPF_IMM
:
1219 vstore(s
, &val
[X_ATOM
], v
, alter
);
1222 case BPF_LDX
|BPF_MSH
|BPF_B
:
1223 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1224 vstore(s
, &val
[X_ATOM
], v
, alter
);
1227 case BPF_ALU
|BPF_NEG
:
1228 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1229 s
->code
= BPF_LD
|BPF_IMM
;
1231 * Do this negation as unsigned arithmetic; that's
1232 * what modern BPF engines do, and it guarantees
1233 * that all possible values can be negated. (Yeah,
1234 * negating 0x80000000, the minimum signed 32-bit
1235 * two's-complement value, results in 0x80000000,
1236 * so it's still negative, but we *should* be doing
1237 * all unsigned arithmetic here, to match what
1238 * modern BPF engines do.)
1240 * Express it as 0U - (unsigned value) so that we
1241 * don't get compiler warnings about negating an
1242 * unsigned value and don't get UBSan warnings
1243 * about the result of negating 0x80000000 being
1246 s
->k
= 0U - opt_state
->vmap
[val
[A_ATOM
]].const_val
;
1247 val
[A_ATOM
] = K(s
->k
);
1250 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], 0L);
1253 case BPF_ALU
|BPF_ADD
|BPF_K
:
1254 case BPF_ALU
|BPF_SUB
|BPF_K
:
1255 case BPF_ALU
|BPF_MUL
|BPF_K
:
1256 case BPF_ALU
|BPF_DIV
|BPF_K
:
1257 case BPF_ALU
|BPF_MOD
|BPF_K
:
1258 case BPF_ALU
|BPF_AND
|BPF_K
:
1259 case BPF_ALU
|BPF_OR
|BPF_K
:
1260 case BPF_ALU
|BPF_XOR
|BPF_K
:
1261 case BPF_ALU
|BPF_LSH
|BPF_K
:
1262 case BPF_ALU
|BPF_RSH
|BPF_K
:
1263 op
= BPF_OP(s
->code
);
1267 * Optimize operations where the constant
1270 * Don't optimize away "sub #0"
1271 * as it may be needed later to
1272 * fixup the generated math code.
1274 * Fail if we're dividing by zero or taking
1275 * a modulus by zero.
1277 if (op
== BPF_ADD
||
1278 op
== BPF_LSH
|| op
== BPF_RSH
||
1279 op
== BPF_OR
|| op
== BPF_XOR
) {
1283 if (op
== BPF_MUL
|| op
== BPF_AND
) {
1284 s
->code
= BPF_LD
|BPF_IMM
;
1285 val
[A_ATOM
] = K(s
->k
);
1289 opt_error(opt_state
,
1290 "division by zero");
1292 opt_error(opt_state
,
1295 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1296 fold_op(opt_state
, s
, val
[A_ATOM
], K(s
->k
));
1297 val
[A_ATOM
] = K(s
->k
);
1301 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1304 case BPF_ALU
|BPF_ADD
|BPF_X
:
1305 case BPF_ALU
|BPF_SUB
|BPF_X
:
1306 case BPF_ALU
|BPF_MUL
|BPF_X
:
1307 case BPF_ALU
|BPF_DIV
|BPF_X
:
1308 case BPF_ALU
|BPF_MOD
|BPF_X
:
1309 case BPF_ALU
|BPF_AND
|BPF_X
:
1310 case BPF_ALU
|BPF_OR
|BPF_X
:
1311 case BPF_ALU
|BPF_XOR
|BPF_X
:
1312 case BPF_ALU
|BPF_LSH
|BPF_X
:
1313 case BPF_ALU
|BPF_RSH
|BPF_X
:
1314 op
= BPF_OP(s
->code
);
1315 if (alter
&& opt_state
->vmap
[val
[X_ATOM
]].is_const
) {
1316 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1317 fold_op(opt_state
, s
, val
[A_ATOM
], val
[X_ATOM
]);
1318 val
[A_ATOM
] = K(s
->k
);
1321 s
->code
= BPF_ALU
|BPF_K
|op
;
1322 s
->k
= opt_state
->vmap
[val
[X_ATOM
]].const_val
;
1323 if ((op
== BPF_LSH
|| op
== BPF_RSH
) &&
1325 opt_error(opt_state
,
1326 "shift by more than 31 bits");
1328 * XXX - optimizer loop detection.
1330 opt_state
->non_branch_movement_performed
= 1;
1331 opt_state
->done
= 0;
1333 F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1338 * Check if we're doing something to an accumulator
1339 * that is 0, and simplify. This may not seem like
1340 * much of a simplification but it could open up further
1342 * XXX We could also check for mul by 1, etc.
1344 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
1345 && opt_state
->vmap
[val
[A_ATOM
]].const_val
== 0) {
1346 if (op
== BPF_ADD
|| op
== BPF_OR
|| op
== BPF_XOR
) {
1347 s
->code
= BPF_MISC
|BPF_TXA
;
1348 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1351 else if (op
== BPF_MUL
|| op
== BPF_DIV
|| op
== BPF_MOD
||
1352 op
== BPF_AND
|| op
== BPF_LSH
|| op
== BPF_RSH
) {
1353 s
->code
= BPF_LD
|BPF_IMM
;
1355 vstore(s
, &val
[A_ATOM
], K(s
->k
), alter
);
1358 else if (op
== BPF_NEG
) {
1363 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], val
[X_ATOM
]);
1366 case BPF_MISC
|BPF_TXA
:
1367 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1370 case BPF_LD
|BPF_MEM
:
1372 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1373 s
->code
= BPF_LD
|BPF_IMM
;
1374 s
->k
= opt_state
->vmap
[v
].const_val
;
1376 * XXX - optimizer loop detection.
1378 opt_state
->non_branch_movement_performed
= 1;
1379 opt_state
->done
= 0;
1381 vstore(s
, &val
[A_ATOM
], v
, alter
);
1384 case BPF_MISC
|BPF_TAX
:
1385 vstore(s
, &val
[X_ATOM
], val
[A_ATOM
], alter
);
1388 case BPF_LDX
|BPF_MEM
:
1390 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1391 s
->code
= BPF_LDX
|BPF_IMM
;
1392 s
->k
= opt_state
->vmap
[v
].const_val
;
1394 * XXX - optimizer loop detection.
1396 opt_state
->non_branch_movement_performed
= 1;
1397 opt_state
->done
= 0;
1399 vstore(s
, &val
[X_ATOM
], v
, alter
);
1403 vstore(s
, &val
[s
->k
], val
[A_ATOM
], alter
);
1407 vstore(s
, &val
[s
->k
], val
[X_ATOM
], alter
);
1413 deadstmt(opt_state_t
*opt_state
, register struct stmt
*s
, register struct stmt
*last
[])
1419 if (atom
== AX_ATOM
) {
1430 * XXX - optimizer loop detection.
1432 opt_state
->non_branch_movement_performed
= 1;
1433 opt_state
->done
= 0;
1434 last
[atom
]->code
= NOP
;
1441 opt_deadstores(opt_state_t
*opt_state
, register struct block
*b
)
1443 register struct slist
*s
;
1445 struct stmt
*last
[N_ATOMS
];
1447 memset((char *)last
, 0, sizeof last
);
1449 for (s
= b
->stmts
; s
!= 0; s
= s
->next
)
1450 deadstmt(opt_state
, &s
->s
, last
);
1451 deadstmt(opt_state
, &b
->s
, last
);
1453 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1454 if (last
[atom
] && !ATOMELEM(b
->out_use
, atom
)) {
1455 last
[atom
]->code
= NOP
;
1457 * The store was removed as it's dead,
1458 * so the value stored into now has
1461 vstore(0, &b
->val
[atom
], VAL_UNKNOWN
, 0);
1463 * XXX - optimizer loop detection.
1465 opt_state
->non_branch_movement_performed
= 1;
1466 opt_state
->done
= 0;
1471 opt_blk(opt_state_t
*opt_state
, struct block
*b
, int do_stmts
)
1476 bpf_u_int32 aval
, xval
;
1479 for (s
= b
->stmts
; s
&& s
->next
; s
= s
->next
)
1480 if (BPF_CLASS(s
->s
.code
) == BPF_JMP
) {
1487 * Initialize the atom values.
1492 * We have no predecessors, so everything is undefined
1493 * upon entry to this block.
1495 memset((char *)b
->val
, 0, sizeof(b
->val
));
1498 * Inherit values from our predecessors.
1500 * First, get the values from the predecessor along the
1501 * first edge leading to this node.
1503 memcpy((char *)b
->val
, (char *)p
->pred
->val
, sizeof(b
->val
));
1505 * Now look at all the other nodes leading to this node.
1506 * If, for the predecessor along that edge, a register
1507 * has a different value from the one we have (i.e.,
1508 * control paths are merging, and the merging paths
1509 * assign different values to that register), give the
1510 * register the undefined value of 0.
1512 while ((p
= p
->next
) != NULL
) {
1513 for (i
= 0; i
< N_ATOMS
; ++i
)
1514 if (b
->val
[i
] != p
->pred
->val
[i
])
1518 aval
= b
->val
[A_ATOM
];
1519 xval
= b
->val
[X_ATOM
];
1520 for (s
= b
->stmts
; s
; s
= s
->next
)
1521 opt_stmt(opt_state
, &s
->s
, b
->val
, do_stmts
);
1524 * This is a special case: if we don't use anything from this
1525 * block, and we load the accumulator or index register with a
1526 * value that is already there, or if this block is a return,
1527 * eliminate all the statements.
1529 * XXX - what if it does a store? Presumably that falls under
1530 * the heading of "if we don't use anything from this block",
1531 * i.e., if we use any memory location set to a different
1532 * value by this block, then we use something from this block.
1534 * XXX - why does it matter whether we use anything from this
1535 * block? If the accumulator or index register doesn't change
1536 * its value, isn't that OK even if we use that value?
1538 * XXX - if we load the accumulator with a different value,
1539 * and the block ends with a conditional branch, we obviously
1540 * can't eliminate it, as the branch depends on that value.
1541 * For the index register, the conditional branch only depends
1542 * on the index register value if the test is against the index
1543 * register value rather than a constant; if nothing uses the
1544 * value we put into the index register, and we're not testing
1545 * against the index register's value, and there aren't any
1546 * other problems that would keep us from eliminating this
1547 * block, can we eliminate it?
1550 ((b
->out_use
== 0 &&
1551 aval
!= VAL_UNKNOWN
&& b
->val
[A_ATOM
] == aval
&&
1552 xval
!= VAL_UNKNOWN
&& b
->val
[X_ATOM
] == xval
) ||
1553 BPF_CLASS(b
->s
.code
) == BPF_RET
)) {
1554 if (b
->stmts
!= 0) {
1557 * XXX - optimizer loop detection.
1559 opt_state
->non_branch_movement_performed
= 1;
1560 opt_state
->done
= 0;
1563 opt_peep(opt_state
, b
);
1564 opt_deadstores(opt_state
, b
);
1567 * Set up values for branch optimizer.
1569 if (BPF_SRC(b
->s
.code
) == BPF_K
)
1570 b
->oval
= K(b
->s
.k
);
1572 b
->oval
= b
->val
[X_ATOM
];
1573 b
->et
.code
= b
->s
.code
;
1574 b
->ef
.code
= -b
->s
.code
;
1578 * Return true if any register that is used on exit from 'succ', has
1579 * an exit value that is different from the corresponding exit value
1583 use_conflict(struct block
*b
, struct block
*succ
)
1586 atomset use
= succ
->out_use
;
1591 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1592 if (ATOMELEM(use
, atom
))
1593 if (b
->val
[atom
] != succ
->val
[atom
])
1599 * Given a block that is the successor of an edge, and an edge that
1600 * dominates that edge, return either a pointer to a child of that
1601 * block (a block to which that block jumps) if that block is a
1602 * candidate to replace the successor of the latter edge or NULL
1603 * if neither of the children of the first block are candidates.
1605 static struct block
*
1606 fold_edge(struct block
*child
, struct edge
*ep
)
1609 bpf_u_int32 aval0
, aval1
, oval0
, oval1
;
1610 int code
= ep
->code
;
1614 * This edge is a "branch if false" edge.
1620 * This edge is a "branch if true" edge.
1626 * If the opcode for the branch at the end of the block we
1627 * were handed isn't the same as the opcode for the branch
1628 * to which the edge we were handed corresponds, the tests
1629 * for those branches aren't testing the same conditions,
1630 * so the blocks to which the first block branches aren't
1631 * candidates to replace the successor of the edge.
1633 if (child
->s
.code
!= code
)
1636 aval0
= child
->val
[A_ATOM
];
1637 oval0
= child
->oval
;
1638 aval1
= ep
->pred
->val
[A_ATOM
];
1639 oval1
= ep
->pred
->oval
;
1642 * If the A register value on exit from the successor block
1643 * isn't the same as the A register value on exit from the
1644 * predecessor of the edge, the blocks to which the first
1645 * block branches aren't candidates to replace the successor
1653 * The operands of the branch instructions are
1654 * identical, so the branches are testing the
1655 * same condition, and the result is true if a true
1656 * branch was taken to get here, otherwise false.
1658 return sense
? JT(child
) : JF(child
);
1660 if (sense
&& code
== (BPF_JMP
|BPF_JEQ
|BPF_K
))
1662 * At this point, we only know the comparison if we
1663 * came down the true branch, and it was an equality
1664 * comparison with a constant.
1666 * I.e., if we came down the true branch, and the branch
1667 * was an equality comparison with a constant, we know the
1668 * accumulator contains that constant. If we came down
1669 * the false branch, or the comparison wasn't with a
1670 * constant, we don't know what was in the accumulator.
1672 * We rely on the fact that distinct constants have distinct
1681 * If we can make this edge go directly to a child of the edge's current
1685 opt_j(opt_state_t
*opt_state
, struct edge
*ep
)
1687 register u_int i
, k
;
1688 register struct block
*target
;
1691 * Does this edge go to a block where, if the test
1692 * at the end of it succeeds, it goes to a block
1693 * that's a leaf node of the DAG, i.e. a return
1695 * If so, there's nothing to optimize.
1697 if (JT(ep
->succ
) == 0)
1701 * Does this edge go to a block that goes, in turn, to
1702 * the same block regardless of whether the test at the
1703 * end succeeds or fails?
1705 if (JT(ep
->succ
) == JF(ep
->succ
)) {
1707 * Common branch targets can be eliminated, provided
1708 * there is no data dependency.
1710 * Check whether any register used on exit from the
1711 * block to which the successor of this edge goes
1712 * has a value at that point that's different from
1713 * the value it has on exit from the predecessor of
1714 * this edge. If not, the predecessor of this edge
1715 * can just go to the block to which the successor
1716 * of this edge goes, bypassing the successor of this
1717 * edge, as the successor of this edge isn't doing
1718 * any calculations whose results are different
1719 * from what the blocks before it did and isn't
1720 * doing any tests the results of which matter.
1722 if (!use_conflict(ep
->pred
, JT(ep
->succ
))) {
1725 * Make this edge go to the block to
1726 * which the successor of that edge
1729 * XXX - optimizer loop detection.
1731 opt_state
->non_branch_movement_performed
= 1;
1732 opt_state
->done
= 0;
1733 ep
->succ
= JT(ep
->succ
);
1737 * For each edge dominator that matches the successor of this
1738 * edge, promote the edge successor to the its grandchild.
1740 * XXX We violate the set abstraction here in favor a reasonably
1744 for (i
= 0; i
< opt_state
->edgewords
; ++i
) {
1745 /* i'th word in the bitset of dominators */
1746 register bpf_u_int32 x
= ep
->edom
[i
];
1749 /* Find the next dominator in that word and mark it as found */
1750 k
= lowest_set_bit(x
);
1751 x
&=~ ((bpf_u_int32
)1 << k
);
1752 k
+= i
* BITS_PER_WORD
;
1754 target
= fold_edge(ep
->succ
, opt_state
->edges
[k
]);
1756 * We have a candidate to replace the successor
1759 * Check that there is no data dependency between
1760 * nodes that will be violated if we move the edge;
1761 * i.e., if any register used on exit from the
1762 * candidate has a value at that point different
1763 * from the value it has when we exit the
1764 * predecessor of that edge, there's a data
1765 * dependency that will be violated.
1767 if (target
!= 0 && !use_conflict(ep
->pred
, target
)) {
1769 * It's safe to replace the successor of
1770 * ep; do so, and note that we've made
1771 * at least one change.
1773 * XXX - this is one of the operations that
1774 * happens when the optimizer gets into
1775 * one of those infinite loops.
1777 opt_state
->done
= 0;
1779 if (JT(target
) != 0)
1781 * Start over unless we hit a leaf.
1791 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1792 * "Predicate Assertion Propagation" in the BPF+ paper?
1794 * Note that this looks at block dominators, not edge dominators.
1797 * "A or B" compiles into
1810 or_pullup(opt_state_t
*opt_state
, struct block
*b
, struct block
*root
)
1815 struct block
**diffp
, **samep
;
1823 * Make sure each predecessor loads the same value.
1826 val
= ep
->pred
->val
[A_ATOM
];
1827 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1828 if (val
!= ep
->pred
->val
[A_ATOM
])
1832 * For the first edge in the list of edges coming into this block,
1833 * see whether the predecessor of that edge comes here via a true
1834 * branch or a false branch.
1836 if (JT(b
->in_edges
->pred
) == b
)
1837 diffp
= &JT(b
->in_edges
->pred
); /* jt */
1839 diffp
= &JF(b
->in_edges
->pred
); /* jf */
1842 * diffp is a pointer to a pointer to the block.
1844 * Go down the false chain looking as far as you can,
1845 * making sure that each jump-compare is doing the
1846 * same as the original block.
1848 * If you reach the bottom before you reach a
1849 * different jump-compare, just exit. There's nothing
1850 * to do here. XXX - no, this version is checking for
1851 * the value leaving the block; that's from the BPF+
1857 * Done if that's not going anywhere XXX
1863 * Done if that predecessor blah blah blah isn't
1864 * going the same place we're going XXX
1866 * Does the true edge of this block point to the same
1867 * location as the true edge of b?
1869 if (JT(*diffp
) != JT(b
))
1873 * Done if this node isn't a dominator of that
1874 * node blah blah blah XXX
1876 * Does b dominate diffp?
1878 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1882 * Break out of the loop if that node's value of A
1883 * isn't the value of A above XXX
1885 if ((*diffp
)->val
[A_ATOM
] != val
)
1889 * Get the JF for that node XXX
1890 * Go down the false path.
1892 diffp
= &JF(*diffp
);
1897 * Now that we've found a different jump-compare in a chain
1898 * below b, search further down until we find another
1899 * jump-compare that looks at the original value. This
1900 * jump-compare should get pulled up. XXX again we're
1901 * comparing values not jump-compares.
1903 samep
= &JF(*diffp
);
1906 * Done if that's not going anywhere XXX
1912 * Done if that predecessor blah blah blah isn't
1913 * going the same place we're going XXX
1915 if (JT(*samep
) != JT(b
))
1919 * Done if this node isn't a dominator of that
1920 * node blah blah blah XXX
1922 * Does b dominate samep?
1924 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1928 * Break out of the loop if that node's value of A
1929 * is the value of A above XXX
1931 if ((*samep
)->val
[A_ATOM
] == val
)
1934 /* XXX Need to check that there are no data dependencies
1935 between dp0 and dp1. Currently, the code generator
1936 will not produce such dependencies. */
1937 samep
= &JF(*samep
);
1940 /* XXX This doesn't cover everything. */
1941 for (i
= 0; i
< N_ATOMS
; ++i
)
1942 if ((*samep
)->val
[i
] != pred
->val
[i
])
1945 /* Pull up the node. */
1951 * At the top of the chain, each predecessor needs to point at the
1952 * pulled up node. Inside the chain, there is only one predecessor
1956 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1957 if (JT(ep
->pred
) == b
)
1958 JT(ep
->pred
) = pull
;
1960 JF(ep
->pred
) = pull
;
1967 * XXX - this is one of the operations that happens when the
1968 * optimizer gets into one of those infinite loops.
1970 opt_state
->done
= 0;
1973 * Recompute dominator sets as control flow graph has changed.
1975 find_dom(opt_state
, root
);
1979 and_pullup(opt_state_t
*opt_state
, struct block
*b
, struct block
*root
)
1984 struct block
**diffp
, **samep
;
1992 * Make sure each predecessor loads the same value.
1994 val
= ep
->pred
->val
[A_ATOM
];
1995 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1996 if (val
!= ep
->pred
->val
[A_ATOM
])
1999 if (JT(b
->in_edges
->pred
) == b
)
2000 diffp
= &JT(b
->in_edges
->pred
);
2002 diffp
= &JF(b
->in_edges
->pred
);
2009 if (JF(*diffp
) != JF(b
))
2012 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
2015 if ((*diffp
)->val
[A_ATOM
] != val
)
2018 diffp
= &JT(*diffp
);
2021 samep
= &JT(*diffp
);
2026 if (JF(*samep
) != JF(b
))
2029 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
2032 if ((*samep
)->val
[A_ATOM
] == val
)
2035 /* XXX Need to check that there are no data dependencies
2036 between diffp and samep. Currently, the code generator
2037 will not produce such dependencies. */
2038 samep
= &JT(*samep
);
2041 /* XXX This doesn't cover everything. */
2042 for (i
= 0; i
< N_ATOMS
; ++i
)
2043 if ((*samep
)->val
[i
] != pred
->val
[i
])
2046 /* Pull up the node. */
2052 * At the top of the chain, each predecessor needs to point at the
2053 * pulled up node. Inside the chain, there is only one predecessor
2057 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
2058 if (JT(ep
->pred
) == b
)
2059 JT(ep
->pred
) = pull
;
2061 JF(ep
->pred
) = pull
;
2068 * XXX - this is one of the operations that happens when the
2069 * optimizer gets into one of those infinite loops.
2071 opt_state
->done
= 0;
2074 * Recompute dominator sets as control flow graph has changed.
2076 find_dom(opt_state
, root
);
2080 opt_blks(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2085 init_val(opt_state
);
2086 maxlevel
= ic
->root
->level
;
2088 find_inedges(opt_state
, ic
->root
);
2089 for (i
= maxlevel
; i
>= 0; --i
)
2090 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
)
2091 opt_blk(opt_state
, p
, do_stmts
);
2095 * No point trying to move branches; it can't possibly
2096 * make a difference at this point.
2098 * XXX - this might be after we detect a loop where
2099 * we were just looping infinitely moving branches
2100 * in such a fashion that we went through two or more
2101 * versions of the machine code, eventually returning
2102 * to the first version. (We're really not doing a
2103 * full loop detection, we're just testing for two
2104 * passes in a row where we do nothing but
2110 * Is this what the BPF+ paper describes in sections 6.1.1,
2113 for (i
= 1; i
<= maxlevel
; ++i
) {
2114 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2115 opt_j(opt_state
, &p
->et
);
2116 opt_j(opt_state
, &p
->ef
);
2120 find_inedges(opt_state
, ic
->root
);
2121 for (i
= 1; i
<= maxlevel
; ++i
) {
2122 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2123 or_pullup(opt_state
, p
, ic
->root
);
2124 and_pullup(opt_state
, p
, ic
->root
);
2130 link_inedge(struct edge
*parent
, struct block
*child
)
2132 parent
->next
= child
->in_edges
;
2133 child
->in_edges
= parent
;
2137 find_inedges(opt_state_t
*opt_state
, struct block
*root
)
2143 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2144 opt_state
->blocks
[i
]->in_edges
= 0;
2147 * Traverse the graph, adding each edge to the predecessor
2148 * list of its successors. Skip the leaves (i.e. level 0).
2150 for (level
= root
->level
; level
> 0; --level
) {
2151 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
2152 link_inedge(&b
->et
, JT(b
));
2153 link_inedge(&b
->ef
, JF(b
));
2159 opt_root(struct block
**b
)
2161 struct slist
*tmp
, *s
;
2165 while (BPF_CLASS((*b
)->s
.code
) == BPF_JMP
&& JT(*b
) == JF(*b
))
2174 * If the root node is a return, then there is no
2175 * point executing any statements (since the bpf machine
2176 * has no side effects).
2178 if (BPF_CLASS((*b
)->s
.code
) == BPF_RET
)
2183 opt_loop(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2187 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2188 printf("opt_loop(root, %d) begin\n", do_stmts
);
2189 opt_dump(opt_state
, ic
);
2194 * XXX - optimizer loop detection.
2198 opt_state
->done
= 1;
2200 * XXX - optimizer loop detection.
2202 opt_state
->non_branch_movement_performed
= 0;
2203 find_levels(opt_state
, ic
);
2204 find_dom(opt_state
, ic
->root
);
2205 find_closure(opt_state
, ic
->root
);
2206 find_ud(opt_state
, ic
->root
);
2207 find_edom(opt_state
, ic
->root
);
2208 opt_blks(opt_state
, ic
, do_stmts
);
2210 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2211 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts
, opt_state
->done
);
2212 opt_dump(opt_state
, ic
);
2217 * Was anything done in this optimizer pass?
2219 if (opt_state
->done
) {
2221 * No, so we've reached a fixed point.
2228 * XXX - was anything done other than branch movement
2231 if (opt_state
->non_branch_movement_performed
) {
2233 * Yes. Clear any loop-detection counter;
2234 * we're making some form of progress (assuming
2235 * we can't get into a cycle doing *other*
2236 * optimizations...).
2241 * No - increment the counter, and quit if
2245 if (loop_count
>= 100) {
2247 * We've done nothing but branch movement
2248 * for 100 passes; we're probably
2249 * in a cycle and will never reach a
2252 * XXX - yes, we really need a non-
2253 * heuristic way of detecting a cycle.
2255 opt_state
->done
= 1;
2263 * Optimize the filter code in its dag representation.
2264 * Return 0 on success, -1 on error.
2267 bpf_optimize(struct icode
*ic
, char *errbuf
)
2269 opt_state_t opt_state
;
2271 memset(&opt_state
, 0, sizeof(opt_state
));
2272 opt_state
.errbuf
= errbuf
;
2273 opt_state
.non_branch_movement_performed
= 0;
2274 if (setjmp(opt_state
.top_ctx
)) {
2275 opt_cleanup(&opt_state
);
2278 opt_init(&opt_state
, ic
);
2279 opt_loop(&opt_state
, ic
, 0);
2280 opt_loop(&opt_state
, ic
, 1);
2281 intern_blocks(&opt_state
, ic
);
2283 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2284 printf("after intern_blocks()\n");
2285 opt_dump(&opt_state
, ic
);
2288 opt_root(&ic
->root
);
2290 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2291 printf("after opt_root()\n");
2292 opt_dump(&opt_state
, ic
);
2295 opt_cleanup(&opt_state
);
2300 make_marks(struct icode
*ic
, struct block
*p
)
2302 if (!isMarked(ic
, p
)) {
2304 if (BPF_CLASS(p
->s
.code
) != BPF_RET
) {
2305 make_marks(ic
, JT(p
));
2306 make_marks(ic
, JF(p
));
2312 * Mark code array such that isMarked(ic->cur_mark, i) is true
2313 * only for nodes that are alive.
2316 mark_code(struct icode
*ic
)
2319 make_marks(ic
, ic
->root
);
2323 * True iff the two stmt lists load the same value from the packet into
2327 eq_slist(struct slist
*x
, struct slist
*y
)
2330 while (x
&& x
->s
.code
== NOP
)
2332 while (y
&& y
->s
.code
== NOP
)
2338 if (x
->s
.code
!= y
->s
.code
|| x
->s
.k
!= y
->s
.k
)
2346 eq_blk(struct block
*b0
, struct block
*b1
)
2348 if (b0
->s
.code
== b1
->s
.code
&&
2349 b0
->s
.k
== b1
->s
.k
&&
2350 b0
->et
.succ
== b1
->et
.succ
&&
2351 b0
->ef
.succ
== b1
->ef
.succ
)
2352 return eq_slist(b0
->stmts
, b1
->stmts
);
2357 intern_blocks(opt_state_t
*opt_state
, struct icode
*ic
)
2361 int done1
; /* don't shadow global */
2364 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2365 opt_state
->blocks
[i
]->link
= 0;
2369 for (i
= opt_state
->n_blocks
- 1; i
!= 0; ) {
2371 if (!isMarked(ic
, opt_state
->blocks
[i
]))
2373 for (j
= i
+ 1; j
< opt_state
->n_blocks
; ++j
) {
2374 if (!isMarked(ic
, opt_state
->blocks
[j
]))
2376 if (eq_blk(opt_state
->blocks
[i
], opt_state
->blocks
[j
])) {
2377 opt_state
->blocks
[i
]->link
= opt_state
->blocks
[j
]->link
?
2378 opt_state
->blocks
[j
]->link
: opt_state
->blocks
[j
];
2383 for (i
= 0; i
< opt_state
->n_blocks
; ++i
) {
2384 p
= opt_state
->blocks
[i
];
2389 JT(p
) = JT(p
)->link
;
2393 JF(p
) = JF(p
)->link
;
2401 opt_cleanup(opt_state_t
*opt_state
)
2403 free((void *)opt_state
->vnode_base
);
2404 free((void *)opt_state
->vmap
);
2405 free((void *)opt_state
->edges
);
2406 free((void *)opt_state
->space
);
2407 free((void *)opt_state
->levels
);
2408 free((void *)opt_state
->blocks
);
2412 * For optimizer errors.
2414 static void PCAP_NORETURN
2415 opt_error(opt_state_t
*opt_state
, const char *fmt
, ...)
2419 if (opt_state
->errbuf
!= NULL
) {
2421 (void)vsnprintf(opt_state
->errbuf
,
2422 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2425 longjmp(opt_state
->top_ctx
, 1);
2433 * Return the number of stmts in 's'.
2436 slength(struct slist
*s
)
2440 for (; s
; s
= s
->next
)
2441 if (s
->s
.code
!= NOP
)
2447 * Return the number of nodes reachable by 'p'.
2448 * All nodes should be initially unmarked.
2451 count_blocks(struct icode
*ic
, struct block
*p
)
2453 if (p
== 0 || isMarked(ic
, p
))
2456 return count_blocks(ic
, JT(p
)) + count_blocks(ic
, JF(p
)) + 1;
2460 * Do a depth first search on the flow graph, numbering the
2461 * the basic blocks, and entering them into the 'blocks' array.`
2464 number_blks_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*p
)
2468 if (p
== 0 || isMarked(ic
, p
))
2472 n
= opt_state
->n_blocks
++;
2473 if (opt_state
->n_blocks
== 0) {
2477 opt_error(opt_state
, "filter is too complex to optimize");
2480 opt_state
->blocks
[n
] = p
;
2482 number_blks_r(opt_state
, ic
, JT(p
));
2483 number_blks_r(opt_state
, ic
, JF(p
));
2487 * Return the number of stmts in the flowgraph reachable by 'p'.
2488 * The nodes should be unmarked before calling.
2490 * Note that "stmts" means "instructions", and that this includes
2492 * side-effect statements in 'p' (slength(p->stmts));
2494 * statements in the true branch from 'p' (count_stmts(JT(p)));
2496 * statements in the false branch from 'p' (count_stmts(JF(p)));
2498 * the conditional jump itself (1);
2500 * an extra long jump if the true branch requires it (p->longjt);
2502 * an extra long jump if the false branch requires it (p->longjf).
2505 count_stmts(struct icode
*ic
, struct block
*p
)
2509 if (p
== 0 || isMarked(ic
, p
))
2512 n
= count_stmts(ic
, JT(p
)) + count_stmts(ic
, JF(p
));
2513 return slength(p
->stmts
) + n
+ 1 + p
->longjt
+ p
->longjf
;
2517 * Allocate memory. All allocation is done before optimization
2518 * is begun. A linear bound on the size of all data structures is computed
2519 * from the total number of blocks and/or statements.
2522 opt_init(opt_state_t
*opt_state
, struct icode
*ic
)
2525 int i
, n
, max_stmts
;
2527 size_t block_memsize
, edge_memsize
;
2530 * First, count the blocks, so we can malloc an array to map
2531 * block number to block. Then, put the blocks into the array.
2534 n
= count_blocks(ic
, ic
->root
);
2535 opt_state
->blocks
= (struct block
**)calloc(n
, sizeof(*opt_state
->blocks
));
2536 if (opt_state
->blocks
== NULL
)
2537 opt_error(opt_state
, "malloc");
2539 opt_state
->n_blocks
= 0;
2540 number_blks_r(opt_state
, ic
, ic
->root
);
2543 * This "should not happen".
2545 if (opt_state
->n_blocks
== 0)
2546 opt_error(opt_state
, "filter has no instructions; please report this as a libpcap issue");
2548 opt_state
->n_edges
= 2 * opt_state
->n_blocks
;
2549 if ((opt_state
->n_edges
/ 2) != opt_state
->n_blocks
) {
2553 opt_error(opt_state
, "filter is too complex to optimize");
2555 opt_state
->edges
= (struct edge
**)calloc(opt_state
->n_edges
, sizeof(*opt_state
->edges
));
2556 if (opt_state
->edges
== NULL
) {
2557 opt_error(opt_state
, "malloc");
2561 * The number of levels is bounded by the number of nodes.
2563 opt_state
->levels
= (struct block
**)calloc(opt_state
->n_blocks
, sizeof(*opt_state
->levels
));
2564 if (opt_state
->levels
== NULL
) {
2565 opt_error(opt_state
, "malloc");
2568 opt_state
->edgewords
= opt_state
->n_edges
/ BITS_PER_WORD
+ 1;
2569 opt_state
->nodewords
= opt_state
->n_blocks
/ BITS_PER_WORD
+ 1;
2572 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2573 * in a u_int; we use it as a u_int number-of-iterations
2576 product
= opt_state
->n_blocks
* opt_state
->nodewords
;
2577 if ((product
/ opt_state
->n_blocks
) != opt_state
->nodewords
) {
2579 * XXX - just punt and don't try to optimize?
2580 * In practice, this is unlikely to happen with
2583 opt_error(opt_state
, "filter is too complex to optimize");
2587 * Make sure the total memory required for that doesn't
2590 block_memsize
= (size_t)2 * product
* sizeof(*opt_state
->space
);
2591 if ((block_memsize
/ product
) != 2 * sizeof(*opt_state
->space
)) {
2592 opt_error(opt_state
, "filter is too complex to optimize");
2596 * Make sure opt_state->n_edges * opt_state->edgewords fits
2597 * in a u_int; we use it as a u_int number-of-iterations
2600 product
= opt_state
->n_edges
* opt_state
->edgewords
;
2601 if ((product
/ opt_state
->n_edges
) != opt_state
->edgewords
) {
2602 opt_error(opt_state
, "filter is too complex to optimize");
2606 * Make sure the total memory required for that doesn't
2609 edge_memsize
= (size_t)product
* sizeof(*opt_state
->space
);
2610 if (edge_memsize
/ product
!= sizeof(*opt_state
->space
)) {
2611 opt_error(opt_state
, "filter is too complex to optimize");
2615 * Make sure the total memory required for both of them doesn't
2618 if (block_memsize
> SIZE_MAX
- edge_memsize
) {
2619 opt_error(opt_state
, "filter is too complex to optimize");
2623 opt_state
->space
= (bpf_u_int32
*)malloc(block_memsize
+ edge_memsize
);
2624 if (opt_state
->space
== NULL
) {
2625 opt_error(opt_state
, "malloc");
2627 p
= opt_state
->space
;
2628 opt_state
->all_dom_sets
= p
;
2629 for (i
= 0; i
< n
; ++i
) {
2630 opt_state
->blocks
[i
]->dom
= p
;
2631 p
+= opt_state
->nodewords
;
2633 opt_state
->all_closure_sets
= p
;
2634 for (i
= 0; i
< n
; ++i
) {
2635 opt_state
->blocks
[i
]->closure
= p
;
2636 p
+= opt_state
->nodewords
;
2638 opt_state
->all_edge_sets
= p
;
2639 for (i
= 0; i
< n
; ++i
) {
2640 register struct block
*b
= opt_state
->blocks
[i
];
2643 p
+= opt_state
->edgewords
;
2645 p
+= opt_state
->edgewords
;
2647 opt_state
->edges
[i
] = &b
->et
;
2648 b
->ef
.id
= opt_state
->n_blocks
+ i
;
2649 opt_state
->edges
[opt_state
->n_blocks
+ i
] = &b
->ef
;
2654 for (i
= 0; i
< n
; ++i
)
2655 max_stmts
+= slength(opt_state
->blocks
[i
]->stmts
) + 1;
2657 * We allocate at most 3 value numbers per statement,
2658 * so this is an upper bound on the number of valnodes
2661 opt_state
->maxval
= 3 * max_stmts
;
2662 opt_state
->vmap
= (struct vmapinfo
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vmap
));
2663 if (opt_state
->vmap
== NULL
) {
2664 opt_error(opt_state
, "malloc");
2666 opt_state
->vnode_base
= (struct valnode
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vnode_base
));
2667 if (opt_state
->vnode_base
== NULL
) {
2668 opt_error(opt_state
, "malloc");
2673 * This is only used when supporting optimizer debugging. It is
2674 * global state, so do *not* do more than one compile in parallel
2675 * and expect it to provide meaningful information.
2681 static void PCAP_NORETURN
conv_error(conv_state_t
*, const char *, ...)
2682 PCAP_PRINTFLIKE(2, 3);
2685 * Returns true if successful. Returns false if a branch has
2686 * an offset that is too large. If so, we have marked that
2687 * branch so that on a subsequent iteration, it will be treated
2691 convert_code_r(conv_state_t
*conv_state
, struct icode
*ic
, struct block
*p
)
2693 struct bpf_insn
*dst
;
2697 struct slist
**offset
= NULL
;
2699 if (p
== 0 || isMarked(ic
, p
))
2703 if (convert_code_r(conv_state
, ic
, JF(p
)) == 0)
2705 if (convert_code_r(conv_state
, ic
, JT(p
)) == 0)
2708 slen
= slength(p
->stmts
);
2709 dst
= conv_state
->ftail
-= (slen
+ 1 + p
->longjt
+ p
->longjf
);
2710 /* inflate length by any extra jumps */
2712 p
->offset
= (int)(dst
- conv_state
->fstart
);
2714 /* generate offset[] for convenience */
2716 offset
= (struct slist
**)calloc(slen
, sizeof(struct slist
*));
2718 conv_error(conv_state
, "not enough core");
2723 for (off
= 0; off
< slen
&& src
; off
++) {
2725 printf("off=%d src=%x\n", off
, src
);
2732 for (src
= p
->stmts
; src
; src
= src
->next
) {
2733 if (src
->s
.code
== NOP
)
2735 dst
->code
= (u_short
)src
->s
.code
;
2738 /* fill block-local relative jump */
2739 if (BPF_CLASS(src
->s
.code
) != BPF_JMP
|| src
->s
.code
== (BPF_JMP
|BPF_JA
)) {
2741 if (src
->s
.jt
|| src
->s
.jf
) {
2743 conv_error(conv_state
, "illegal jmp destination");
2749 if (off
== slen
- 2) /*???*/
2755 const char ljerr
[] = "%s for block-local relative jump: off=%d";
2758 printf("code=%x off=%d %x %x\n", src
->s
.code
,
2759 off
, src
->s
.jt
, src
->s
.jf
);
2762 if (!src
->s
.jt
|| !src
->s
.jf
) {
2764 conv_error(conv_state
, ljerr
, "no jmp destination", off
);
2769 for (i
= 0; i
< slen
; i
++) {
2770 if (offset
[i
] == src
->s
.jt
) {
2773 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2777 if (i
- off
- 1 >= 256) {
2779 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2782 dst
->jt
= (u_char
)(i
- off
- 1);
2785 if (offset
[i
] == src
->s
.jf
) {
2788 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2791 if (i
- off
- 1 >= 256) {
2793 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2796 dst
->jf
= (u_char
)(i
- off
- 1);
2802 conv_error(conv_state
, ljerr
, "no destination found", off
);
2814 if (dst
- conv_state
->fstart
< NBIDS
)
2815 bids
[dst
- conv_state
->fstart
] = p
->id
+ 1;
2817 dst
->code
= (u_short
)p
->s
.code
;
2820 /* number of extra jumps inserted */
2821 u_char extrajmps
= 0;
2822 off
= JT(p
)->offset
- (p
->offset
+ slen
) - 1;
2824 /* offset too large for branch, must add a jump */
2825 if (p
->longjt
== 0) {
2826 /* mark this instruction and retry */
2830 dst
->jt
= extrajmps
;
2832 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2833 dst
[extrajmps
].k
= off
- extrajmps
;
2836 dst
->jt
= (u_char
)off
;
2837 off
= JF(p
)->offset
- (p
->offset
+ slen
) - 1;
2839 /* offset too large for branch, must add a jump */
2840 if (p
->longjf
== 0) {
2841 /* mark this instruction and retry */
2845 /* branch if F to following jump */
2846 /* if two jumps are inserted, F goes to second one */
2847 dst
->jf
= extrajmps
;
2849 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2850 dst
[extrajmps
].k
= off
- extrajmps
;
2853 dst
->jf
= (u_char
)off
;
2860 * Convert flowgraph intermediate representation to the
2861 * BPF array representation. Set *lenp to the number of instructions.
2863 * This routine does *NOT* leak the memory pointed to by fp. It *must
2864 * not* do free(fp) before returning fp; doing so would make no sense,
2865 * as the BPF array pointed to by the return value of icode_to_fcode()
2866 * must be valid - it's being returned for use in a bpf_program structure.
2868 * If it appears that icode_to_fcode() is leaking, the problem is that
2869 * the program using pcap_compile() is failing to free the memory in
2870 * the BPF program when it's done - the leak is in the program, not in
2871 * the routine that happens to be allocating the memory. (By analogy, if
2872 * a program calls fopen() without ever calling fclose() on the FILE *,
2873 * it will leak the FILE structure; the leak is not in fopen(), it's in
2874 * the program.) Change the program to use pcap_freecode() when it's
2875 * done with the filter program. See the pcap man page.
2878 icode_to_fcode(struct icode
*ic
, struct block
*root
, u_int
*lenp
,
2882 struct bpf_insn
*fp
;
2883 conv_state_t conv_state
;
2885 conv_state
.fstart
= NULL
;
2886 conv_state
.errbuf
= errbuf
;
2887 if (setjmp(conv_state
.top_ctx
) != 0) {
2888 free(conv_state
.fstart
);
2893 * Loop doing convert_code_r() until no branches remain
2894 * with too-large offsets.
2898 n
= *lenp
= count_stmts(ic
, root
);
2900 fp
= (struct bpf_insn
*)malloc(sizeof(*fp
) * n
);
2902 (void)snprintf(errbuf
, PCAP_ERRBUF_SIZE
,
2906 memset((char *)fp
, 0, sizeof(*fp
) * n
);
2907 conv_state
.fstart
= fp
;
2908 conv_state
.ftail
= fp
+ n
;
2911 if (convert_code_r(&conv_state
, ic
, root
))
2920 * For iconv_to_fconv() errors.
2922 static void PCAP_NORETURN
2923 conv_error(conv_state_t
*conv_state
, const char *fmt
, ...)
2928 (void)vsnprintf(conv_state
->errbuf
,
2929 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2931 longjmp(conv_state
->top_ctx
, 1);
2939 * Make a copy of a BPF program and put it in the "fcode" member of
2942 * If we fail to allocate memory for the copy, fill in the "errbuf"
2943 * member of the "pcap_t" with an error message, and return -1;
2944 * otherwise, return 0.
2947 pcapint_install_bpf_program(pcap_t
*p
, struct bpf_program
*fp
)
2952 * Validate the program.
2954 if (!pcapint_validate_filter(fp
->bf_insns
, fp
->bf_len
)) {
2955 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2956 "BPF program is not valid");
2961 * Free up any already installed program.
2963 pcap_freecode(&p
->fcode
);
2965 prog_size
= sizeof(*fp
->bf_insns
) * fp
->bf_len
;
2966 p
->fcode
.bf_len
= fp
->bf_len
;
2967 p
->fcode
.bf_insns
= (struct bpf_insn
*)malloc(prog_size
);
2968 if (p
->fcode
.bf_insns
== NULL
) {
2969 pcapint_fmt_errmsg_for_errno(p
->errbuf
, sizeof(p
->errbuf
),
2973 memcpy(p
->fcode
.bf_insns
, fp
->bf_insns
, prog_size
);
2979 dot_dump_node(struct icode
*ic
, struct block
*block
, struct bpf_program
*prog
,
2982 int icount
, noffset
;
2985 if (block
== NULL
|| isMarked(ic
, block
))
2989 icount
= slength(block
->stmts
) + 1 + block
->longjt
+ block
->longjf
;
2990 noffset
= min(block
->offset
+ icount
, (int)prog
->bf_len
);
2992 fprintf(out
, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block
->id
, block
->id
, block
->id
);
2993 for (i
= block
->offset
; i
< noffset
; i
++) {
2994 fprintf(out
, "\\n%s", bpf_image(prog
->bf_insns
+ i
, i
));
2996 fprintf(out
, "\" tooltip=\"");
2997 for (i
= 0; i
< BPF_MEMWORDS
; i
++)
2998 if (block
->val
[i
] != VAL_UNKNOWN
)
2999 fprintf(out
, "val[%d]=%d ", i
, block
->val
[i
]);
3000 fprintf(out
, "val[A]=%d ", block
->val
[A_ATOM
]);
3001 fprintf(out
, "val[X]=%d", block
->val
[X_ATOM
]);
3003 if (JT(block
) == NULL
)
3004 fprintf(out
, ", peripheries=2");
3005 fprintf(out
, "];\n");
3007 dot_dump_node(ic
, JT(block
), prog
, out
);
3008 dot_dump_node(ic
, JF(block
), prog
, out
);
3012 dot_dump_edge(struct icode
*ic
, struct block
*block
, FILE *out
)
3014 if (block
== NULL
|| isMarked(ic
, block
))
3019 fprintf(out
, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3020 block
->id
, JT(block
)->id
);
3021 fprintf(out
, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3022 block
->id
, JF(block
)->id
);
3024 dot_dump_edge(ic
, JT(block
), out
);
3025 dot_dump_edge(ic
, JF(block
), out
);
3028 /* Output the block CFG using graphviz/DOT language
3029 * In the CFG, block's code, value index for each registers at EXIT,
3030 * and the jump relationship is show.
3032 * example DOT for BPF `ip src host 1.1.1.1' is:
3034 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3035 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3036 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3037 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3038 "block0":se -> "block1":n [label="T"];
3039 "block0":sw -> "block3":n [label="F"];
3040 "block1":se -> "block2":n [label="T"];
3041 "block1":sw -> "block3":n [label="F"];
3044 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3045 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3048 dot_dump(struct icode
*ic
, char *errbuf
)
3050 struct bpf_program f
;
3053 memset(bids
, 0, sizeof bids
);
3054 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3055 if (f
.bf_insns
== NULL
)
3058 fprintf(out
, "digraph BPF {\n");
3060 dot_dump_node(ic
, ic
->root
, &f
, out
);
3062 dot_dump_edge(ic
, ic
->root
, out
);
3063 fprintf(out
, "}\n");
3065 free((char *)f
.bf_insns
);
3070 plain_dump(struct icode
*ic
, char *errbuf
)
3072 struct bpf_program f
;
3074 memset(bids
, 0, sizeof bids
);
3075 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3076 if (f
.bf_insns
== NULL
)
3080 free((char *)f
.bf_insns
);
3085 opt_dump(opt_state_t
*opt_state
, struct icode
*ic
)
3088 char errbuf
[PCAP_ERRBUF_SIZE
];
3091 * If the CFG, in DOT format, is requested, output it rather than
3092 * the code that would be generated from that graph.
3094 if (pcap_print_dot_graph
)
3095 status
= dot_dump(ic
, errbuf
);
3097 status
= plain_dump(ic
, errbuf
);
3099 opt_error(opt_state
, "opt_dump: icode_to_fcode failed: %s", errbuf
);