2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for BPF code intermediate representation.
26 #include <pcap-types.h>
33 #include <limits.h> /* for SIZE_MAX */
40 #include "diag-control.h"
42 #ifdef HAVE_OS_PROTO_H
48 * The internal "debug printout" flag for the filter expression optimizer.
49 * The code to print that stuff is present only if BDEBUG is defined, so
50 * the flag, and the routine to set it, are defined only if BDEBUG is
53 static int pcap_optimizer_debug
;
56 * Routine to set that flag.
58 * This is intended for libpcap developers, not for general use.
59 * If you want to set these in a program, you'll have to declare this
60 * routine yourself, with the appropriate DLL import attribute on Windows;
61 * it's not declared in any header file, and won't be declared in any
62 * header file provided by libpcap.
64 PCAP_API
void pcap_set_optimizer_debug(int value
);
67 pcap_set_optimizer_debug(int value
)
69 pcap_optimizer_debug
= value
;
73 * The internal "print dot graph" flag for the filter expression optimizer.
74 * The code to print that stuff is present only if BDEBUG is defined, so
75 * the flag, and the routine to set it, are defined only if BDEBUG is
78 static int pcap_print_dot_graph
;
81 * Routine to set that flag.
83 * This is intended for libpcap developers, not for general use.
84 * If you want to set these in a program, you'll have to declare this
85 * routine yourself, with the appropriate DLL import attribute on Windows;
86 * it's not declared in any header file, and won't be declared in any
87 * header file provided by libpcap.
89 PCAP_API
void pcap_set_print_dot_graph(int value
);
92 pcap_set_print_dot_graph(int value
)
94 pcap_print_dot_graph
= value
;
102 * Takes a 32-bit integer as an argument.
104 * If handed a non-zero value, returns the index of the lowest set bit,
105 * counting upwards from zero.
107 * If handed zero, the results are platform- and compiler-dependent.
108 * Keep it out of the light, don't give it any water, don't feed it
109 * after midnight, and don't pass zero to it.
111 * This is the same as the count of trailing zeroes in the word.
113 #if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
115 * GCC 3.4 and later; we have __builtin_ctz().
117 #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
118 #elif defined(_MSC_VER)
120 * Visual Studio; we support only 2005 and later, so use
126 #pragma intrinsic(_BitScanForward)
129 static __forceinline u_int
130 lowest_set_bit(int mask
)
135 * Don't sign-extend mask if long is longer than int.
136 * (It's currently not, in MSVC, even on 64-bit platforms, but....)
138 if (_BitScanForward(&bit
, (unsigned int)mask
) == 0)
139 abort(); /* mask is zero */
142 #elif defined(STRINGS_H_DECLARES_FFS)
144 * A non-Windows OS that has <strings.h> and declares ffs() there (typically
145 * UN*X conforming to a sufficiently recent version of the Single UNIX
146 * Specification, but also Haiku).
149 #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
150 #elif defined(__hpux)
152 * HP-UX 11i v3, which declares ffs() in <string.h>, which we've already
153 * included. Place this branch after the <strings.h> branch, in case a later
154 * release of HP-UX makes the declaration available via the standard header.
156 #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
160 * Use a perfect-hash-function-based function.
163 lowest_set_bit(int mask
)
165 unsigned int v
= (unsigned int)mask
;
167 static const u_int MultiplyDeBruijnBitPosition
[32] = {
168 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
169 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
173 * We strip off all but the lowermost set bit (v & ~v),
174 * and perform a minimal perfect hash on it to look up the
175 * number of low-order zero bits in a table.
179 * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
181 * http://supertech.csail.mit.edu/papers/debruijn.pdf
183 return (MultiplyDeBruijnBitPosition
[((v
& -v
) * 0x077CB531U
) >> 27]);
188 * Represents a deleted instruction.
193 * Register numbers for use-def values.
194 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
195 * location. A_ATOM is the accumulator and X_ATOM is the index
198 #define A_ATOM BPF_MEMWORDS
199 #define X_ATOM (BPF_MEMWORDS+1)
202 * This define is used to represent *both* the accumulator and
203 * x register in use-def computations.
204 * Currently, the use-def code assumes only one definition per instruction.
206 #define AX_ATOM N_ATOMS
209 * These data structures are used in a Cocke and Schwartz style
210 * value numbering scheme. Since the flowgraph is acyclic,
211 * exit values can be propagated from a node's predecessors
212 * provided it is uniquely defined.
217 int val
; /* the value number */
218 struct valnode
*next
;
221 /* Integer constants mapped with the load immediate opcode. */
222 #define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
226 bpf_u_int32 const_val
;
231 * Place to longjmp to on an error.
236 * The buffer into which to put error message.
241 * A flag to indicate that further optimization is needed.
242 * Iterative passes are continued until a given pass yields no
243 * code simplification or branch movement.
248 * XXX - detect loops that do nothing but repeated AND/OR pullups
250 * If 100 passes in a row do nothing but that, treat that as a
251 * sign that we're in a loop that just shuffles in a cycle in
252 * which each pass just shuffles the code and we eventually
253 * get back to the original configuration.
255 * XXX - we need a non-heuristic way of detecting, or preventing,
258 int non_branch_movement_performed
;
260 u_int n_blocks
; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
261 struct block
**blocks
;
262 u_int n_edges
; /* twice n_blocks, so guaranteed to be > 0 */
266 * A bit vector set representation of the dominators.
267 * We round up the set size to the next power of two.
269 u_int nodewords
; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
270 u_int edgewords
; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
271 struct block
**levels
;
274 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
276 * True if a is in uset {p}
278 #define SET_MEMBER(p, a) \
279 ((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
284 #define SET_INSERT(p, a) \
285 (p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
288 * Delete 'a' from uset p.
290 #define SET_DELETE(p, a) \
291 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
295 * n must be guaranteed to be > 0
297 #define SET_INTERSECT(a, b, n)\
299 register bpf_u_int32 *_x = a, *_y = b;\
300 register u_int _n = n;\
301 do *_x++ &= *_y++; while (--_n != 0);\
306 * n must be guaranteed to be > 0
308 #define SET_SUBTRACT(a, b, n)\
310 register bpf_u_int32 *_x = a, *_y = b;\
311 register u_int _n = n;\
312 do *_x++ &=~ *_y++; while (--_n != 0);\
317 * n must be guaranteed to be > 0
319 #define SET_UNION(a, b, n)\
321 register bpf_u_int32 *_x = a, *_y = b;\
322 register u_int _n = n;\
323 do *_x++ |= *_y++; while (--_n != 0);\
327 uset all_closure_sets
;
331 struct valnode
*hashtbl
[MODULUS
];
335 struct vmapinfo
*vmap
;
336 struct valnode
*vnode_base
;
337 struct valnode
*next_vnode
;
342 * Place to longjmp to on an error.
347 * The buffer into which to put error message.
352 * Some pointers used to convert the basic block form of the code,
353 * into the array form that BPF requires. 'fstart' will point to
354 * the malloc'd array while 'ftail' is used during the recursive
357 struct bpf_insn
*fstart
;
358 struct bpf_insn
*ftail
;
361 static void opt_init(opt_state_t
*, struct icode
*);
362 static void opt_cleanup(opt_state_t
*);
363 static void PCAP_NORETURN
opt_error(opt_state_t
*, const char *, ...)
364 PCAP_PRINTFLIKE(2, 3);
366 static void intern_blocks(opt_state_t
*, struct icode
*);
368 static void find_inedges(opt_state_t
*, struct block
*);
370 static void opt_dump(opt_state_t
*, struct icode
*);
374 #define MAX(a,b) ((a)>(b)?(a):(b))
378 find_levels_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*b
)
389 find_levels_r(opt_state
, ic
, JT(b
));
390 find_levels_r(opt_state
, ic
, JF(b
));
391 level
= MAX(JT(b
)->level
, JF(b
)->level
) + 1;
395 b
->link
= opt_state
->levels
[level
];
396 opt_state
->levels
[level
] = b
;
400 * Level graph. The levels go from 0 at the leaves to
401 * N_LEVELS at the root. The opt_state->levels[] array points to the
402 * first node of the level list, whose elements are linked
403 * with the 'link' field of the struct block.
406 find_levels(opt_state_t
*opt_state
, struct icode
*ic
)
408 memset((char *)opt_state
->levels
, 0, opt_state
->n_blocks
* sizeof(*opt_state
->levels
));
410 find_levels_r(opt_state
, ic
, ic
->root
);
414 * Find dominator relationships.
415 * Assumes graph has been leveled.
418 find_dom(opt_state_t
*opt_state
, struct block
*root
)
426 * Initialize sets to contain all nodes.
428 x
= opt_state
->all_dom_sets
;
430 * In opt_init(), we've made sure the product doesn't overflow.
432 i
= opt_state
->n_blocks
* opt_state
->nodewords
;
437 /* Root starts off empty. */
438 for (i
= opt_state
->nodewords
; i
!= 0;) {
443 /* root->level is the highest level no found. */
444 for (level
= root
->level
; level
>= 0; --level
) {
445 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
446 SET_INSERT(b
->dom
, b
->id
);
449 SET_INTERSECT(JT(b
)->dom
, b
->dom
, opt_state
->nodewords
);
450 SET_INTERSECT(JF(b
)->dom
, b
->dom
, opt_state
->nodewords
);
456 propedom(opt_state_t
*opt_state
, struct edge
*ep
)
458 SET_INSERT(ep
->edom
, ep
->id
);
460 SET_INTERSECT(ep
->succ
->et
.edom
, ep
->edom
, opt_state
->edgewords
);
461 SET_INTERSECT(ep
->succ
->ef
.edom
, ep
->edom
, opt_state
->edgewords
);
466 * Compute edge dominators.
467 * Assumes graph has been leveled and predecessors established.
470 find_edom(opt_state_t
*opt_state
, struct block
*root
)
477 x
= opt_state
->all_edge_sets
;
479 * In opt_init(), we've made sure the product doesn't overflow.
481 for (i
= opt_state
->n_edges
* opt_state
->edgewords
; i
!= 0; ) {
486 /* root->level is the highest level no found. */
487 memset(root
->et
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
488 memset(root
->ef
.edom
, 0, opt_state
->edgewords
* sizeof(*(uset
)0));
489 for (level
= root
->level
; level
>= 0; --level
) {
490 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
491 propedom(opt_state
, &b
->et
);
492 propedom(opt_state
, &b
->ef
);
498 * Find the backwards transitive closure of the flow graph. These sets
499 * are backwards in the sense that we find the set of nodes that reach
500 * a given node, not the set of nodes that can be reached by a node.
502 * Assumes graph has been leveled.
505 find_closure(opt_state_t
*opt_state
, struct block
*root
)
511 * Initialize sets to contain no nodes.
513 memset((char *)opt_state
->all_closure_sets
, 0,
514 opt_state
->n_blocks
* opt_state
->nodewords
* sizeof(*opt_state
->all_closure_sets
));
516 /* root->level is the highest level no found. */
517 for (level
= root
->level
; level
>= 0; --level
) {
518 for (b
= opt_state
->levels
[level
]; b
; b
= b
->link
) {
519 SET_INSERT(b
->closure
, b
->id
);
522 SET_UNION(JT(b
)->closure
, b
->closure
, opt_state
->nodewords
);
523 SET_UNION(JF(b
)->closure
, b
->closure
, opt_state
->nodewords
);
529 * Return the register number that is used by s.
531 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
532 * are used, the scratch memory location's number if a scratch memory
533 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
535 * The implementation should probably change to an array access.
538 atomuse(struct stmt
*s
)
540 register int c
= s
->code
;
545 switch (BPF_CLASS(c
)) {
548 return (BPF_RVAL(c
) == BPF_A
) ? A_ATOM
:
549 (BPF_RVAL(c
) == BPF_X
) ? X_ATOM
: -1;
554 * As there are fewer than 2^31 memory locations,
555 * s->k should be convertible to int without problems.
557 return (BPF_MODE(c
) == BPF_IND
) ? X_ATOM
:
558 (BPF_MODE(c
) == BPF_MEM
) ? (int)s
->k
: -1;
568 if (BPF_SRC(c
) == BPF_X
)
573 return BPF_MISCOP(c
) == BPF_TXA
? X_ATOM
: A_ATOM
;
580 * Return the register number that is defined by 's'. We assume that
581 * a single stmt cannot define more than one register. If no register
582 * is defined, return -1.
584 * The implementation should probably change to an array access.
587 atomdef(struct stmt
*s
)
592 switch (BPF_CLASS(s
->code
)) {
606 return BPF_MISCOP(s
->code
) == BPF_TAX
? X_ATOM
: A_ATOM
;
612 * Compute the sets of registers used, defined, and killed by 'b'.
614 * "Used" means that a statement in 'b' uses the register before any
615 * statement in 'b' defines it, i.e. it uses the value left in
616 * that register by a predecessor block of this block.
617 * "Defined" means that a statement in 'b' defines it.
618 * "Killed" means that a statement in 'b' defines it before any
619 * statement in 'b' uses it, i.e. it kills the value left in that
620 * register by a predecessor block of this block.
623 compute_local_ud(struct block
*b
)
626 atomset def
= 0, use
= 0, killed
= 0;
629 for (s
= b
->stmts
; s
; s
= s
->next
) {
630 if (s
->s
.code
== NOP
)
632 atom
= atomuse(&s
->s
);
634 if (atom
== AX_ATOM
) {
635 if (!ATOMELEM(def
, X_ATOM
))
636 use
|= ATOMMASK(X_ATOM
);
637 if (!ATOMELEM(def
, A_ATOM
))
638 use
|= ATOMMASK(A_ATOM
);
640 else if (atom
< N_ATOMS
) {
641 if (!ATOMELEM(def
, atom
))
642 use
|= ATOMMASK(atom
);
647 atom
= atomdef(&s
->s
);
649 if (!ATOMELEM(use
, atom
))
650 killed
|= ATOMMASK(atom
);
651 def
|= ATOMMASK(atom
);
654 if (BPF_CLASS(b
->s
.code
) == BPF_JMP
) {
656 * XXX - what about RET?
658 atom
= atomuse(&b
->s
);
660 if (atom
== AX_ATOM
) {
661 if (!ATOMELEM(def
, X_ATOM
))
662 use
|= ATOMMASK(X_ATOM
);
663 if (!ATOMELEM(def
, A_ATOM
))
664 use
|= ATOMMASK(A_ATOM
);
666 else if (atom
< N_ATOMS
) {
667 if (!ATOMELEM(def
, atom
))
668 use
|= ATOMMASK(atom
);
681 * Assume graph is already leveled.
684 find_ud(opt_state_t
*opt_state
, struct block
*root
)
690 * root->level is the highest level no found;
691 * count down from there.
693 maxlevel
= root
->level
;
694 for (i
= maxlevel
; i
>= 0; --i
)
695 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
700 for (i
= 1; i
<= maxlevel
; ++i
) {
701 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
702 p
->out_use
|= JT(p
)->in_use
| JF(p
)->in_use
;
703 p
->in_use
|= p
->out_use
&~ p
->kill
;
708 init_val(opt_state_t
*opt_state
)
710 opt_state
->curval
= 0;
711 opt_state
->next_vnode
= opt_state
->vnode_base
;
712 memset((char *)opt_state
->vmap
, 0, opt_state
->maxval
* sizeof(*opt_state
->vmap
));
713 memset((char *)opt_state
->hashtbl
, 0, sizeof opt_state
->hashtbl
);
717 * Because we really don't have an IR, this stuff is a little messy.
719 * This routine looks in the table of existing value number for a value
720 * with generated from an operation with the specified opcode and
721 * the specified values. If it finds it, it returns its value number,
722 * otherwise it makes a new entry in the table and returns the
723 * value number of that entry.
726 F(opt_state_t
*opt_state
, int code
, bpf_u_int32 v0
, bpf_u_int32 v1
)
732 hash
= (u_int
)code
^ (v0
<< 4) ^ (v1
<< 8);
735 for (p
= opt_state
->hashtbl
[hash
]; p
; p
= p
->next
)
736 if (p
->code
== code
&& p
->v0
== v0
&& p
->v1
== v1
)
740 * Not found. Allocate a new value, and assign it a new
743 * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
744 * increment it before using it as the new value number, which
745 * means we never assign VAL_UNKNOWN.
747 * XXX - unless we overflow, but we probably won't have 2^32-1
748 * values; we treat 32 bits as effectively infinite.
750 val
= ++opt_state
->curval
;
751 if (BPF_MODE(code
) == BPF_IMM
&&
752 (BPF_CLASS(code
) == BPF_LD
|| BPF_CLASS(code
) == BPF_LDX
)) {
753 opt_state
->vmap
[val
].const_val
= v0
;
754 opt_state
->vmap
[val
].is_const
= 1;
756 p
= opt_state
->next_vnode
++;
761 p
->next
= opt_state
->hashtbl
[hash
];
762 opt_state
->hashtbl
[hash
] = p
;
768 vstore(struct stmt
*s
, bpf_u_int32
*valp
, bpf_u_int32 newval
, int alter
)
770 if (alter
&& newval
!= VAL_UNKNOWN
&& *valp
== newval
)
777 * Do constant-folding on binary operators.
778 * (Unary operators are handled elsewhere.)
781 fold_op(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 v0
, bpf_u_int32 v1
)
785 a
= opt_state
->vmap
[v0
].const_val
;
786 b
= opt_state
->vmap
[v1
].const_val
;
788 switch (BPF_OP(s
->code
)) {
803 opt_error(opt_state
, "division by zero");
809 opt_error(opt_state
, "modulus by zero");
827 * A left shift of more than the width of the type
828 * is undefined in C; we'll just treat it as shifting
831 * XXX - the BPF interpreter doesn't check for this,
832 * so its behavior is dependent on the behavior of
833 * the processor on which it's running. There are
834 * processors on which it shifts all the bits out
835 * and processors on which it does no shift.
845 * A right shift of more than the width of the type
846 * is undefined in C; we'll just treat it as shifting
849 * XXX - the BPF interpreter doesn't check for this,
850 * so its behavior is dependent on the behavior of
851 * the processor on which it's running. There are
852 * processors on which it shifts all the bits out
853 * and processors on which it does no shift.
865 s
->code
= BPF_LD
|BPF_IMM
;
868 * XXX - optimizer loop detection.
870 opt_state
->non_branch_movement_performed
= 1;
873 static inline struct slist
*
874 this_op(struct slist
*s
)
876 while (s
!= 0 && s
->s
.code
== NOP
)
882 opt_not(struct block
*b
)
884 struct block
*tmp
= JT(b
);
891 opt_peep(opt_state_t
*opt_state
, struct block
*b
)
894 struct slist
*next
, *last
;
902 for (/*empty*/; /*empty*/; s
= next
) {
908 break; /* nothing left in the block */
911 * Find the next real instruction after that one
914 next
= this_op(s
->next
);
916 break; /* no next instruction */
920 * st M[k] --> st M[k]
923 if (s
->s
.code
== BPF_ST
&&
924 next
->s
.code
== (BPF_LDX
|BPF_MEM
) &&
925 s
->s
.k
== next
->s
.k
) {
927 next
->s
.code
= BPF_MISC
|BPF_TAX
;
929 * XXX - optimizer loop detection.
931 opt_state
->non_branch_movement_performed
= 1;
937 if (s
->s
.code
== (BPF_LD
|BPF_IMM
) &&
938 next
->s
.code
== (BPF_MISC
|BPF_TAX
)) {
939 s
->s
.code
= BPF_LDX
|BPF_IMM
;
940 next
->s
.code
= BPF_MISC
|BPF_TXA
;
943 * XXX - optimizer loop detection.
945 opt_state
->non_branch_movement_performed
= 1;
948 * This is an ugly special case, but it happens
949 * when you say tcp[k] or udp[k] where k is a constant.
951 if (s
->s
.code
== (BPF_LD
|BPF_IMM
)) {
952 struct slist
*add
, *tax
, *ild
;
955 * Check that X isn't used on exit from this
956 * block (which the optimizer might cause).
957 * We know the code generator won't generate
958 * any local dependencies.
960 if (ATOMELEM(b
->out_use
, X_ATOM
))
964 * Check that the instruction following the ldi
965 * is an addx, or it's an ldxms with an addx
966 * following it (with 0 or more nops between the
969 if (next
->s
.code
!= (BPF_LDX
|BPF_MSH
|BPF_B
))
972 add
= this_op(next
->next
);
973 if (add
== 0 || add
->s
.code
!= (BPF_ALU
|BPF_ADD
|BPF_X
))
977 * Check that a tax follows that (with 0 or more
978 * nops between them).
980 tax
= this_op(add
->next
);
981 if (tax
== 0 || tax
->s
.code
!= (BPF_MISC
|BPF_TAX
))
985 * Check that an ild follows that (with 0 or more
986 * nops between them).
988 ild
= this_op(tax
->next
);
989 if (ild
== 0 || BPF_CLASS(ild
->s
.code
) != BPF_LD
||
990 BPF_MODE(ild
->s
.code
) != BPF_IND
)
993 * We want to turn this sequence:
996 * (005) ldxms [14] {next} -- optional
999 * (008) ild [x+0] {ild}
1001 * into this sequence:
1009 * XXX We need to check that X is not
1010 * subsequently used, because we want to change
1011 * what'll be in it after this sequence.
1013 * We know we can eliminate the accumulator
1014 * modifications earlier in the sequence since
1015 * it is defined by the last stmt of this sequence
1016 * (i.e., the last statement of the sequence loads
1017 * a value into the accumulator, so we can eliminate
1018 * earlier operations on the accumulator).
1024 opt_state
->done
= 0;
1026 * XXX - optimizer loop detection.
1028 opt_state
->non_branch_movement_performed
= 1;
1032 * If the comparison at the end of a block is an equality
1033 * comparison against a constant, and nobody uses the value
1034 * we leave in the A register at the end of a block, and
1035 * the operation preceding the comparison is an arithmetic
1036 * operation, we can sometime optimize it away.
1038 if (b
->s
.code
== (BPF_JMP
|BPF_JEQ
|BPF_K
) &&
1039 !ATOMELEM(b
->out_use
, A_ATOM
)) {
1041 * We can optimize away certain subtractions of the
1044 if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_X
)) {
1045 val
= b
->val
[X_ATOM
];
1046 if (opt_state
->vmap
[val
].is_const
) {
1048 * If we have a subtract to do a comparison,
1049 * and the X register is a known constant,
1050 * we can merge this value into the
1056 b
->s
.k
+= opt_state
->vmap
[val
].const_val
;
1058 opt_state
->done
= 0;
1060 * XXX - optimizer loop detection.
1062 opt_state
->non_branch_movement_performed
= 1;
1063 } else if (b
->s
.k
== 0) {
1065 * If the X register isn't a constant,
1066 * and the comparison in the test is
1067 * against 0, we can compare with the
1068 * X register, instead:
1074 b
->s
.code
= BPF_JMP
|BPF_JEQ
|BPF_X
;
1075 opt_state
->done
= 0;
1077 * XXX - optimizer loop detection.
1079 opt_state
->non_branch_movement_performed
= 1;
1083 * Likewise, a constant subtract can be simplified:
1086 * jeq #y -> jeq #(x+y)
1088 else if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_K
)) {
1090 b
->s
.k
+= last
->s
.k
;
1091 opt_state
->done
= 0;
1093 * XXX - optimizer loop detection.
1095 opt_state
->non_branch_movement_performed
= 1;
1098 * And, similarly, a constant AND can be simplified
1099 * if we're testing against 0, i.e.:
1104 else if (last
->s
.code
== (BPF_ALU
|BPF_AND
|BPF_K
) &&
1107 b
->s
.code
= BPF_JMP
|BPF_K
|BPF_JSET
;
1109 opt_state
->done
= 0;
1112 * XXX - optimizer loop detection.
1114 opt_state
->non_branch_movement_performed
= 1;
1119 * jset #ffffffff -> always
1121 if (b
->s
.code
== (BPF_JMP
|BPF_K
|BPF_JSET
)) {
1124 if (b
->s
.k
== 0xffffffffU
)
1128 * If we're comparing against the index register, and the index
1129 * register is a known constant, we can just compare against that
1132 val
= b
->val
[X_ATOM
];
1133 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_X
) {
1134 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1135 b
->s
.code
&= ~BPF_X
;
1139 * If the accumulator is a known constant, we can compute the
1140 * comparison result.
1142 val
= b
->val
[A_ATOM
];
1143 if (opt_state
->vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_K
) {
1144 bpf_u_int32 v
= opt_state
->vmap
[val
].const_val
;
1145 switch (BPF_OP(b
->s
.code
)) {
1166 if (JF(b
) != JT(b
)) {
1167 opt_state
->done
= 0;
1169 * XXX - optimizer loop detection.
1171 opt_state
->non_branch_movement_performed
= 1;
1181 * Compute the symbolic value of expression of 's', and update
1182 * anything it defines in the value table 'val'. If 'alter' is true,
1183 * do various optimizations. This code would be cleaner if symbolic
1184 * evaluation and code transformations weren't folded together.
1187 opt_stmt(opt_state_t
*opt_state
, struct stmt
*s
, bpf_u_int32 val
[], int alter
)
1194 case BPF_LD
|BPF_ABS
|BPF_W
:
1195 case BPF_LD
|BPF_ABS
|BPF_H
:
1196 case BPF_LD
|BPF_ABS
|BPF_B
:
1197 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1198 vstore(s
, &val
[A_ATOM
], v
, alter
);
1201 case BPF_LD
|BPF_IND
|BPF_W
:
1202 case BPF_LD
|BPF_IND
|BPF_H
:
1203 case BPF_LD
|BPF_IND
|BPF_B
:
1205 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1206 s
->code
= BPF_LD
|BPF_ABS
|BPF_SIZE(s
->code
);
1207 s
->k
+= opt_state
->vmap
[v
].const_val
;
1208 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1209 opt_state
->done
= 0;
1211 * XXX - optimizer loop detection.
1213 opt_state
->non_branch_movement_performed
= 1;
1216 v
= F(opt_state
, s
->code
, s
->k
, v
);
1217 vstore(s
, &val
[A_ATOM
], v
, alter
);
1220 case BPF_LD
|BPF_LEN
:
1221 v
= F(opt_state
, s
->code
, 0L, 0L);
1222 vstore(s
, &val
[A_ATOM
], v
, alter
);
1225 case BPF_LD
|BPF_IMM
:
1227 vstore(s
, &val
[A_ATOM
], v
, alter
);
1230 case BPF_LDX
|BPF_IMM
:
1232 vstore(s
, &val
[X_ATOM
], v
, alter
);
1235 case BPF_LDX
|BPF_MSH
|BPF_B
:
1236 v
= F(opt_state
, s
->code
, s
->k
, 0L);
1237 vstore(s
, &val
[X_ATOM
], v
, alter
);
1240 case BPF_ALU
|BPF_NEG
:
1241 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1242 s
->code
= BPF_LD
|BPF_IMM
;
1244 * Do this negation as unsigned arithmetic; that's
1245 * what modern BPF engines do, and it guarantees
1246 * that all possible values can be negated. (Yeah,
1247 * negating 0x80000000, the minimum signed 32-bit
1248 * two's-complement value, results in 0x80000000,
1249 * so it's still negative, but we *should* be doing
1250 * all unsigned arithmetic here, to match what
1251 * modern BPF engines do.)
1253 * Express it as 0U - (unsigned value) so that we
1254 * don't get compiler warnings about negating an
1255 * unsigned value and don't get UBSan warnings
1256 * about the result of negating 0x80000000 being
1259 s
->k
= 0U - opt_state
->vmap
[val
[A_ATOM
]].const_val
;
1260 val
[A_ATOM
] = K(s
->k
);
1263 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], 0L);
1266 case BPF_ALU
|BPF_ADD
|BPF_K
:
1267 case BPF_ALU
|BPF_SUB
|BPF_K
:
1268 case BPF_ALU
|BPF_MUL
|BPF_K
:
1269 case BPF_ALU
|BPF_DIV
|BPF_K
:
1270 case BPF_ALU
|BPF_MOD
|BPF_K
:
1271 case BPF_ALU
|BPF_AND
|BPF_K
:
1272 case BPF_ALU
|BPF_OR
|BPF_K
:
1273 case BPF_ALU
|BPF_XOR
|BPF_K
:
1274 case BPF_ALU
|BPF_LSH
|BPF_K
:
1275 case BPF_ALU
|BPF_RSH
|BPF_K
:
1276 op
= BPF_OP(s
->code
);
1280 * Optimize operations where the constant
1283 * Don't optimize away "sub #0"
1284 * as it may be needed later to
1285 * fixup the generated math code.
1287 * Fail if we're dividing by zero or taking
1288 * a modulus by zero.
1290 if (op
== BPF_ADD
||
1291 op
== BPF_LSH
|| op
== BPF_RSH
||
1292 op
== BPF_OR
|| op
== BPF_XOR
) {
1296 if (op
== BPF_MUL
|| op
== BPF_AND
) {
1297 s
->code
= BPF_LD
|BPF_IMM
;
1298 val
[A_ATOM
] = K(s
->k
);
1302 opt_error(opt_state
,
1303 "division by zero");
1305 opt_error(opt_state
,
1308 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1309 fold_op(opt_state
, s
, val
[A_ATOM
], K(s
->k
));
1310 val
[A_ATOM
] = K(s
->k
);
1314 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1317 case BPF_ALU
|BPF_ADD
|BPF_X
:
1318 case BPF_ALU
|BPF_SUB
|BPF_X
:
1319 case BPF_ALU
|BPF_MUL
|BPF_X
:
1320 case BPF_ALU
|BPF_DIV
|BPF_X
:
1321 case BPF_ALU
|BPF_MOD
|BPF_X
:
1322 case BPF_ALU
|BPF_AND
|BPF_X
:
1323 case BPF_ALU
|BPF_OR
|BPF_X
:
1324 case BPF_ALU
|BPF_XOR
|BPF_X
:
1325 case BPF_ALU
|BPF_LSH
|BPF_X
:
1326 case BPF_ALU
|BPF_RSH
|BPF_X
:
1327 op
= BPF_OP(s
->code
);
1328 if (alter
&& opt_state
->vmap
[val
[X_ATOM
]].is_const
) {
1329 if (opt_state
->vmap
[val
[A_ATOM
]].is_const
) {
1330 fold_op(opt_state
, s
, val
[A_ATOM
], val
[X_ATOM
]);
1331 val
[A_ATOM
] = K(s
->k
);
1334 s
->code
= BPF_ALU
|BPF_K
|op
;
1335 s
->k
= opt_state
->vmap
[val
[X_ATOM
]].const_val
;
1336 if ((op
== BPF_LSH
|| op
== BPF_RSH
) &&
1338 opt_error(opt_state
,
1339 "shift by more than 31 bits");
1340 opt_state
->done
= 0;
1342 F(opt_state
, s
->code
, val
[A_ATOM
], K(s
->k
));
1344 * XXX - optimizer loop detection.
1346 opt_state
->non_branch_movement_performed
= 1;
1351 * Check if we're doing something to an accumulator
1352 * that is 0, and simplify. This may not seem like
1353 * much of a simplification but it could open up further
1355 * XXX We could also check for mul by 1, etc.
1357 if (alter
&& opt_state
->vmap
[val
[A_ATOM
]].is_const
1358 && opt_state
->vmap
[val
[A_ATOM
]].const_val
== 0) {
1359 if (op
== BPF_ADD
|| op
== BPF_OR
|| op
== BPF_XOR
) {
1360 s
->code
= BPF_MISC
|BPF_TXA
;
1361 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1364 else if (op
== BPF_MUL
|| op
== BPF_DIV
|| op
== BPF_MOD
||
1365 op
== BPF_AND
|| op
== BPF_LSH
|| op
== BPF_RSH
) {
1366 s
->code
= BPF_LD
|BPF_IMM
;
1368 vstore(s
, &val
[A_ATOM
], K(s
->k
), alter
);
1371 else if (op
== BPF_NEG
) {
1376 val
[A_ATOM
] = F(opt_state
, s
->code
, val
[A_ATOM
], val
[X_ATOM
]);
1379 case BPF_MISC
|BPF_TXA
:
1380 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1383 case BPF_LD
|BPF_MEM
:
1385 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1386 s
->code
= BPF_LD
|BPF_IMM
;
1387 s
->k
= opt_state
->vmap
[v
].const_val
;
1388 opt_state
->done
= 0;
1390 * XXX - optimizer loop detection.
1392 opt_state
->non_branch_movement_performed
= 1;
1394 vstore(s
, &val
[A_ATOM
], v
, alter
);
1397 case BPF_MISC
|BPF_TAX
:
1398 vstore(s
, &val
[X_ATOM
], val
[A_ATOM
], alter
);
1401 case BPF_LDX
|BPF_MEM
:
1403 if (alter
&& opt_state
->vmap
[v
].is_const
) {
1404 s
->code
= BPF_LDX
|BPF_IMM
;
1405 s
->k
= opt_state
->vmap
[v
].const_val
;
1406 opt_state
->done
= 0;
1408 * XXX - optimizer loop detection.
1410 opt_state
->non_branch_movement_performed
= 1;
1412 vstore(s
, &val
[X_ATOM
], v
, alter
);
1416 vstore(s
, &val
[s
->k
], val
[A_ATOM
], alter
);
1420 vstore(s
, &val
[s
->k
], val
[X_ATOM
], alter
);
1426 deadstmt(opt_state_t
*opt_state
, register struct stmt
*s
, register struct stmt
*last
[])
1432 if (atom
== AX_ATOM
) {
1442 opt_state
->done
= 0;
1443 last
[atom
]->code
= NOP
;
1445 * XXX - optimizer loop detection.
1447 opt_state
->non_branch_movement_performed
= 1;
1454 opt_deadstores(opt_state_t
*opt_state
, register struct block
*b
)
1456 register struct slist
*s
;
1458 struct stmt
*last
[N_ATOMS
];
1460 memset((char *)last
, 0, sizeof last
);
1462 for (s
= b
->stmts
; s
!= 0; s
= s
->next
)
1463 deadstmt(opt_state
, &s
->s
, last
);
1464 deadstmt(opt_state
, &b
->s
, last
);
1466 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1467 if (last
[atom
] && !ATOMELEM(b
->out_use
, atom
)) {
1468 last
[atom
]->code
= NOP
;
1470 * The store was removed as it's dead,
1471 * so the value stored into now has
1474 vstore(0, &b
->val
[atom
], VAL_UNKNOWN
, 0);
1475 opt_state
->done
= 0;
1477 * XXX - optimizer loop detection.
1479 opt_state
->non_branch_movement_performed
= 1;
1484 opt_blk(opt_state_t
*opt_state
, struct block
*b
, int do_stmts
)
1489 bpf_u_int32 aval
, xval
;
1492 for (s
= b
->stmts
; s
&& s
->next
; s
= s
->next
)
1493 if (BPF_CLASS(s
->s
.code
) == BPF_JMP
) {
1500 * Initialize the atom values.
1505 * We have no predecessors, so everything is undefined
1506 * upon entry to this block.
1508 memset((char *)b
->val
, 0, sizeof(b
->val
));
1511 * Inherit values from our predecessors.
1513 * First, get the values from the predecessor along the
1514 * first edge leading to this node.
1516 memcpy((char *)b
->val
, (char *)p
->pred
->val
, sizeof(b
->val
));
1518 * Now look at all the other nodes leading to this node.
1519 * If, for the predecessor along that edge, a register
1520 * has a different value from the one we have (i.e.,
1521 * control paths are merging, and the merging paths
1522 * assign different values to that register), give the
1523 * register the undefined value of 0.
1525 while ((p
= p
->next
) != NULL
) {
1526 for (i
= 0; i
< N_ATOMS
; ++i
)
1527 if (b
->val
[i
] != p
->pred
->val
[i
])
1531 aval
= b
->val
[A_ATOM
];
1532 xval
= b
->val
[X_ATOM
];
1533 for (s
= b
->stmts
; s
; s
= s
->next
)
1534 opt_stmt(opt_state
, &s
->s
, b
->val
, do_stmts
);
1537 * This is a special case: if we don't use anything from this
1538 * block, and we load the accumulator or index register with a
1539 * value that is already there, or if this block is a return,
1540 * eliminate all the statements.
1542 * XXX - what if it does a store? Presumably that falls under
1543 * the heading of "if we don't use anything from this block",
1544 * i.e., if we use any memory location set to a different
1545 * value by this block, then we use something from this block.
1547 * XXX - why does it matter whether we use anything from this
1548 * block? If the accumulator or index register doesn't change
1549 * its value, isn't that OK even if we use that value?
1551 * XXX - if we load the accumulator with a different value,
1552 * and the block ends with a conditional branch, we obviously
1553 * can't eliminate it, as the branch depends on that value.
1554 * For the index register, the conditional branch only depends
1555 * on the index register value if the test is against the index
1556 * register value rather than a constant; if nothing uses the
1557 * value we put into the index register, and we're not testing
1558 * against the index register's value, and there aren't any
1559 * other problems that would keep us from eliminating this
1560 * block, can we eliminate it?
1563 ((b
->out_use
== 0 &&
1564 aval
!= VAL_UNKNOWN
&& b
->val
[A_ATOM
] == aval
&&
1565 xval
!= VAL_UNKNOWN
&& b
->val
[X_ATOM
] == xval
) ||
1566 BPF_CLASS(b
->s
.code
) == BPF_RET
)) {
1567 if (b
->stmts
!= 0) {
1569 opt_state
->done
= 0;
1571 * XXX - optimizer loop detection.
1573 opt_state
->non_branch_movement_performed
= 1;
1576 opt_peep(opt_state
, b
);
1577 opt_deadstores(opt_state
, b
);
1580 * Set up values for branch optimizer.
1582 if (BPF_SRC(b
->s
.code
) == BPF_K
)
1583 b
->oval
= K(b
->s
.k
);
1585 b
->oval
= b
->val
[X_ATOM
];
1586 b
->et
.code
= b
->s
.code
;
1587 b
->ef
.code
= -b
->s
.code
;
1591 * Return true if any register that is used on exit from 'succ', has
1592 * an exit value that is different from the corresponding exit value
1596 use_conflict(struct block
*b
, struct block
*succ
)
1599 atomset use
= succ
->out_use
;
1604 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1605 if (ATOMELEM(use
, atom
))
1606 if (b
->val
[atom
] != succ
->val
[atom
])
1612 * Given a block that is the successor of an edge, and an edge that
1613 * dominates that edge, return either a pointer to a child of that
1614 * block (a block to which that block jumps) if that block is a
1615 * candidate to replace the successor of the latter edge or NULL
1616 * if neither of the children of the first block are candidates.
1618 static struct block
*
1619 fold_edge(struct block
*child
, struct edge
*ep
)
1622 bpf_u_int32 aval0
, aval1
, oval0
, oval1
;
1623 int code
= ep
->code
;
1627 * This edge is a "branch if false" edge.
1633 * This edge is a "branch if true" edge.
1639 * If the opcode for the branch at the end of the block we
1640 * were handed isn't the same as the opcode for the branch
1641 * to which the edge we were handed corresponds, the tests
1642 * for those branches aren't testing the same conditions,
1643 * so the blocks to which the first block branches aren't
1644 * candidates to replace the successor of the edge.
1646 if (child
->s
.code
!= code
)
1649 aval0
= child
->val
[A_ATOM
];
1650 oval0
= child
->oval
;
1651 aval1
= ep
->pred
->val
[A_ATOM
];
1652 oval1
= ep
->pred
->oval
;
1655 * If the A register value on exit from the successor block
1656 * isn't the same as the A register value on exit from the
1657 * predecessor of the edge, the blocks to which the first
1658 * block branches aren't candidates to replace the successor
1666 * The operands of the branch instructions are
1667 * identical, so the branches are testing the
1668 * same condition, and the result is true if a true
1669 * branch was taken to get here, otherwise false.
1671 return sense
? JT(child
) : JF(child
);
1673 if (sense
&& code
== (BPF_JMP
|BPF_JEQ
|BPF_K
))
1675 * At this point, we only know the comparison if we
1676 * came down the true branch, and it was an equality
1677 * comparison with a constant.
1679 * I.e., if we came down the true branch, and the branch
1680 * was an equality comparison with a constant, we know the
1681 * accumulator contains that constant. If we came down
1682 * the false branch, or the comparison wasn't with a
1683 * constant, we don't know what was in the accumulator.
1685 * We rely on the fact that distinct constants have distinct
1694 * If we can make this edge go directly to a child of the edge's current
1698 opt_j(opt_state_t
*opt_state
, struct edge
*ep
)
1700 register u_int i
, k
;
1701 register struct block
*target
;
1704 * Does this edge go to a block where, if the test
1705 * at the end of it succeeds, it goes to a block
1706 * that's a leaf node of the DAG, i.e. a return
1708 * If so, there's nothing to optimize.
1710 if (JT(ep
->succ
) == 0)
1714 * Does this edge go to a block that goes, in turn, to
1715 * the same block regardless of whether the test at the
1716 * end succeeds or fails?
1718 if (JT(ep
->succ
) == JF(ep
->succ
)) {
1720 * Common branch targets can be eliminated, provided
1721 * there is no data dependency.
1723 * Check whether any register used on exit from the
1724 * block to which the successor of this edge goes
1725 * has a value at that point that's different from
1726 * the value it has on exit from the predecessor of
1727 * this edge. If not, the predecessor of this edge
1728 * can just go to the block to which the successor
1729 * of this edge goes, bypassing the successor of this
1730 * edge, as the successor of this edge isn't doing
1731 * any calculations whose results are different
1732 * from what the blocks before it did and isn't
1733 * doing any tests the results of which matter.
1735 if (!use_conflict(ep
->pred
, JT(ep
->succ
))) {
1738 * Make this edge go to the block to
1739 * which the successor of that edge
1742 opt_state
->done
= 0;
1743 ep
->succ
= JT(ep
->succ
);
1745 * XXX - optimizer loop detection.
1747 opt_state
->non_branch_movement_performed
= 1;
1751 * For each edge dominator that matches the successor of this
1752 * edge, promote the edge successor to the its grandchild.
1754 * XXX We violate the set abstraction here in favor a reasonably
1758 for (i
= 0; i
< opt_state
->edgewords
; ++i
) {
1759 /* i'th word in the bitset of dominators */
1760 register bpf_u_int32 x
= ep
->edom
[i
];
1763 /* Find the next dominator in that word and mark it as found */
1764 k
= lowest_set_bit(x
);
1765 x
&=~ ((bpf_u_int32
)1 << k
);
1766 k
+= i
* BITS_PER_WORD
;
1768 target
= fold_edge(ep
->succ
, opt_state
->edges
[k
]);
1770 * We have a candidate to replace the successor
1773 * Check that there is no data dependency between
1774 * nodes that will be violated if we move the edge;
1775 * i.e., if any register used on exit from the
1776 * candidate has a value at that point different
1777 * from the value it has when we exit the
1778 * predecessor of that edge, there's a data
1779 * dependency that will be violated.
1781 if (target
!= 0 && !use_conflict(ep
->pred
, target
)) {
1783 * It's safe to replace the successor of
1784 * ep; do so, and note that we've made
1785 * at least one change.
1787 * XXX - this is one of the operations that
1788 * happens when the optimizer gets into
1789 * one of those infinite loops.
1791 opt_state
->done
= 0;
1793 if (JT(target
) != 0)
1795 * Start over unless we hit a leaf.
1805 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1806 * "Predicate Assertion Propagation" in the BPF+ paper?
1808 * Note that this looks at block dominators, not edge dominators.
1811 * "A or B" compiles into
1824 or_pullup(opt_state_t
*opt_state
, struct block
*b
, struct block
*root
)
1829 struct block
**diffp
, **samep
;
1837 * Make sure each predecessor loads the same value.
1840 val
= ep
->pred
->val
[A_ATOM
];
1841 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1842 if (val
!= ep
->pred
->val
[A_ATOM
])
1846 * For the first edge in the list of edges coming into this block,
1847 * see whether the predecessor of that edge comes here via a true
1848 * branch or a false branch.
1850 if (JT(b
->in_edges
->pred
) == b
)
1851 diffp
= &JT(b
->in_edges
->pred
); /* jt */
1853 diffp
= &JF(b
->in_edges
->pred
); /* jf */
1856 * diffp is a pointer to a pointer to the block.
1858 * Go down the false chain looking as far as you can,
1859 * making sure that each jump-compare is doing the
1860 * same as the original block.
1862 * If you reach the bottom before you reach a
1863 * different jump-compare, just exit. There's nothing
1864 * to do here. XXX - no, this version is checking for
1865 * the value leaving the block; that's from the BPF+
1871 * Done if that's not going anywhere XXX
1877 * Done if that predecessor blah blah blah isn't
1878 * going the same place we're going XXX
1880 * Does the true edge of this block point to the same
1881 * location as the true edge of b?
1883 if (JT(*diffp
) != JT(b
))
1887 * Done if this node isn't a dominator of that
1888 * node blah blah blah XXX
1890 * Does b dominate diffp?
1892 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1896 * Break out of the loop if that node's value of A
1897 * isn't the value of A above XXX
1899 if ((*diffp
)->val
[A_ATOM
] != val
)
1903 * Get the JF for that node XXX
1904 * Go down the false path.
1906 diffp
= &JF(*diffp
);
1911 * Now that we've found a different jump-compare in a chain
1912 * below b, search further down until we find another
1913 * jump-compare that looks at the original value. This
1914 * jump-compare should get pulled up. XXX again we're
1915 * comparing values not jump-compares.
1917 samep
= &JF(*diffp
);
1920 * Done if that's not going anywhere XXX
1926 * Done if that predecessor blah blah blah isn't
1927 * going the same place we're going XXX
1929 if (JT(*samep
) != JT(b
))
1933 * Done if this node isn't a dominator of that
1934 * node blah blah blah XXX
1936 * Does b dominate samep?
1938 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1942 * Break out of the loop if that node's value of A
1943 * is the value of A above XXX
1945 if ((*samep
)->val
[A_ATOM
] == val
)
1948 /* XXX Need to check that there are no data dependencies
1949 between dp0 and dp1. Currently, the code generator
1950 will not produce such dependencies. */
1951 samep
= &JF(*samep
);
1954 /* XXX This doesn't cover everything. */
1955 for (i
= 0; i
< N_ATOMS
; ++i
)
1956 if ((*samep
)->val
[i
] != pred
->val
[i
])
1959 /* Pull up the node. */
1965 * At the top of the chain, each predecessor needs to point at the
1966 * pulled up node. Inside the chain, there is only one predecessor
1970 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1971 if (JT(ep
->pred
) == b
)
1972 JT(ep
->pred
) = pull
;
1974 JF(ep
->pred
) = pull
;
1981 * XXX - this is one of the operations that happens when the
1982 * optimizer gets into one of those infinite loops.
1984 opt_state
->done
= 0;
1987 * Recompute dominator sets as control flow graph has changed.
1989 find_dom(opt_state
, root
);
1993 and_pullup(opt_state_t
*opt_state
, struct block
*b
, struct block
*root
)
1998 struct block
**diffp
, **samep
;
2006 * Make sure each predecessor loads the same value.
2008 val
= ep
->pred
->val
[A_ATOM
];
2009 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
2010 if (val
!= ep
->pred
->val
[A_ATOM
])
2013 if (JT(b
->in_edges
->pred
) == b
)
2014 diffp
= &JT(b
->in_edges
->pred
);
2016 diffp
= &JF(b
->in_edges
->pred
);
2023 if (JF(*diffp
) != JF(b
))
2026 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
2029 if ((*diffp
)->val
[A_ATOM
] != val
)
2032 diffp
= &JT(*diffp
);
2035 samep
= &JT(*diffp
);
2040 if (JF(*samep
) != JF(b
))
2043 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
2046 if ((*samep
)->val
[A_ATOM
] == val
)
2049 /* XXX Need to check that there are no data dependencies
2050 between diffp and samep. Currently, the code generator
2051 will not produce such dependencies. */
2052 samep
= &JT(*samep
);
2055 /* XXX This doesn't cover everything. */
2056 for (i
= 0; i
< N_ATOMS
; ++i
)
2057 if ((*samep
)->val
[i
] != pred
->val
[i
])
2060 /* Pull up the node. */
2066 * At the top of the chain, each predecessor needs to point at the
2067 * pulled up node. Inside the chain, there is only one predecessor
2071 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
2072 if (JT(ep
->pred
) == b
)
2073 JT(ep
->pred
) = pull
;
2075 JF(ep
->pred
) = pull
;
2082 * XXX - this is one of the operations that happens when the
2083 * optimizer gets into one of those infinite loops.
2085 opt_state
->done
= 0;
2088 * Recompute dominator sets as control flow graph has changed.
2090 find_dom(opt_state
, root
);
2094 opt_blks(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2099 init_val(opt_state
);
2100 maxlevel
= ic
->root
->level
;
2102 find_inedges(opt_state
, ic
->root
);
2103 for (i
= maxlevel
; i
>= 0; --i
)
2104 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
)
2105 opt_blk(opt_state
, p
, do_stmts
);
2109 * No point trying to move branches; it can't possibly
2110 * make a difference at this point.
2112 * XXX - this might be after we detect a loop where
2113 * we were just looping infinitely moving branches
2114 * in such a fashion that we went through two or more
2115 * versions of the machine code, eventually returning
2116 * to the first version. (We're really not doing a
2117 * full loop detection, we're just testing for two
2118 * passes in a row where we do nothing but
2124 * Is this what the BPF+ paper describes in sections 6.1.1,
2127 for (i
= 1; i
<= maxlevel
; ++i
) {
2128 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2129 opt_j(opt_state
, &p
->et
);
2130 opt_j(opt_state
, &p
->ef
);
2134 find_inedges(opt_state
, ic
->root
);
2135 for (i
= 1; i
<= maxlevel
; ++i
) {
2136 for (p
= opt_state
->levels
[i
]; p
; p
= p
->link
) {
2137 or_pullup(opt_state
, p
, ic
->root
);
2138 and_pullup(opt_state
, p
, ic
->root
);
2144 link_inedge(struct edge
*parent
, struct block
*child
)
2146 parent
->next
= child
->in_edges
;
2147 child
->in_edges
= parent
;
2151 find_inedges(opt_state_t
*opt_state
, struct block
*root
)
2157 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2158 opt_state
->blocks
[i
]->in_edges
= 0;
2161 * Traverse the graph, adding each edge to the predecessor
2162 * list of its successors. Skip the leaves (i.e. level 0).
2164 for (level
= root
->level
; level
> 0; --level
) {
2165 for (b
= opt_state
->levels
[level
]; b
!= 0; b
= b
->link
) {
2166 link_inedge(&b
->et
, JT(b
));
2167 link_inedge(&b
->ef
, JF(b
));
2173 opt_root(struct block
**b
)
2175 struct slist
*tmp
, *s
;
2179 while (BPF_CLASS((*b
)->s
.code
) == BPF_JMP
&& JT(*b
) == JF(*b
))
2188 * If the root node is a return, then there is no
2189 * point executing any statements (since the bpf machine
2190 * has no side effects).
2192 if (BPF_CLASS((*b
)->s
.code
) == BPF_RET
)
2197 opt_loop(opt_state_t
*opt_state
, struct icode
*ic
, int do_stmts
)
2201 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2202 printf("opt_loop(root, %d) begin\n", do_stmts
);
2203 opt_dump(opt_state
, ic
);
2208 * XXX - optimizer loop detection.
2213 * XXX - optimizer loop detection.
2215 opt_state
->non_branch_movement_performed
= 0;
2216 opt_state
->done
= 1;
2217 find_levels(opt_state
, ic
);
2218 find_dom(opt_state
, ic
->root
);
2219 find_closure(opt_state
, ic
->root
);
2220 find_ud(opt_state
, ic
->root
);
2221 find_edom(opt_state
, ic
->root
);
2222 opt_blks(opt_state
, ic
, do_stmts
);
2224 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2225 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts
, opt_state
->done
);
2226 opt_dump(opt_state
, ic
);
2231 * Was anything done in this optimizer pass?
2233 if (opt_state
->done
) {
2235 * No, so we've reached a fixed point.
2242 * XXX - was anything done other than branch movement
2245 if (opt_state
->non_branch_movement_performed
) {
2247 * Yes. Clear any loop-detection counter;
2248 * we're making some form of progress (assuming
2249 * we can't get into a cycle doing *other*
2250 * optimizations...).
2255 * No - increment the counter, and quit if
2259 if (loop_count
>= 100) {
2261 * We've done nothing but branch movement
2262 * for 100 passes; we're probably
2263 * in a cycle and will never reach a
2266 * XXX - yes, we really need a non-
2267 * heuristic way of detecting a cycle.
2269 opt_state
->done
= 1;
2277 * Optimize the filter code in its dag representation.
2278 * Return 0 on success, -1 on error.
2281 bpf_optimize(struct icode
*ic
, char *errbuf
)
2283 opt_state_t opt_state
;
2285 memset(&opt_state
, 0, sizeof(opt_state
));
2286 opt_state
.errbuf
= errbuf
;
2287 if (setjmp(opt_state
.top_ctx
)) {
2288 opt_cleanup(&opt_state
);
2291 opt_init(&opt_state
, ic
);
2292 opt_loop(&opt_state
, ic
, 0);
2293 opt_loop(&opt_state
, ic
, 1);
2294 intern_blocks(&opt_state
, ic
);
2296 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2297 printf("after intern_blocks()\n");
2298 opt_dump(&opt_state
, ic
);
2301 opt_root(&ic
->root
);
2303 if (pcap_optimizer_debug
> 1 || pcap_print_dot_graph
) {
2304 printf("after opt_root()\n");
2305 opt_dump(&opt_state
, ic
);
2308 opt_cleanup(&opt_state
);
2313 make_marks(struct icode
*ic
, struct block
*p
)
2315 if (!isMarked(ic
, p
)) {
2317 if (BPF_CLASS(p
->s
.code
) != BPF_RET
) {
2318 make_marks(ic
, JT(p
));
2319 make_marks(ic
, JF(p
));
2325 * Mark code array such that isMarked(ic->cur_mark, i) is true
2326 * only for nodes that are alive.
2329 mark_code(struct icode
*ic
)
2332 make_marks(ic
, ic
->root
);
2336 * True iff the two stmt lists load the same value from the packet into
2340 eq_slist(struct slist
*x
, struct slist
*y
)
2343 while (x
&& x
->s
.code
== NOP
)
2345 while (y
&& y
->s
.code
== NOP
)
2351 if (x
->s
.code
!= y
->s
.code
|| x
->s
.k
!= y
->s
.k
)
2359 eq_blk(struct block
*b0
, struct block
*b1
)
2361 if (b0
->s
.code
== b1
->s
.code
&&
2362 b0
->s
.k
== b1
->s
.k
&&
2363 b0
->et
.succ
== b1
->et
.succ
&&
2364 b0
->ef
.succ
== b1
->ef
.succ
)
2365 return eq_slist(b0
->stmts
, b1
->stmts
);
2370 intern_blocks(opt_state_t
*opt_state
, struct icode
*ic
)
2374 int done1
; /* don't shadow global */
2377 for (i
= 0; i
< opt_state
->n_blocks
; ++i
)
2378 opt_state
->blocks
[i
]->link
= 0;
2382 for (i
= opt_state
->n_blocks
- 1; i
!= 0; ) {
2384 if (!isMarked(ic
, opt_state
->blocks
[i
]))
2386 for (j
= i
+ 1; j
< opt_state
->n_blocks
; ++j
) {
2387 if (!isMarked(ic
, opt_state
->blocks
[j
]))
2389 if (eq_blk(opt_state
->blocks
[i
], opt_state
->blocks
[j
])) {
2390 opt_state
->blocks
[i
]->link
= opt_state
->blocks
[j
]->link
?
2391 opt_state
->blocks
[j
]->link
: opt_state
->blocks
[j
];
2396 for (i
= 0; i
< opt_state
->n_blocks
; ++i
) {
2397 p
= opt_state
->blocks
[i
];
2402 JT(p
) = JT(p
)->link
;
2406 JF(p
) = JF(p
)->link
;
2414 opt_cleanup(opt_state_t
*opt_state
)
2416 free((void *)opt_state
->vnode_base
);
2417 free((void *)opt_state
->vmap
);
2418 free((void *)opt_state
->edges
);
2419 free((void *)opt_state
->space
);
2420 free((void *)opt_state
->levels
);
2421 free((void *)opt_state
->blocks
);
2425 * For optimizer errors.
2427 static void PCAP_NORETURN
2428 opt_error(opt_state_t
*opt_state
, const char *fmt
, ...)
2432 if (opt_state
->errbuf
!= NULL
) {
2434 (void)vsnprintf(opt_state
->errbuf
,
2435 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2438 longjmp(opt_state
->top_ctx
, 1);
2446 * Return the number of stmts in 's'.
2449 slength(struct slist
*s
)
2453 for (; s
; s
= s
->next
)
2454 if (s
->s
.code
!= NOP
)
2460 * Return the number of nodes reachable by 'p'.
2461 * All nodes should be initially unmarked.
2464 count_blocks(struct icode
*ic
, struct block
*p
)
2466 if (p
== 0 || isMarked(ic
, p
))
2469 return count_blocks(ic
, JT(p
)) + count_blocks(ic
, JF(p
)) + 1;
2473 * Do a depth first search on the flow graph, numbering the
2474 * the basic blocks, and entering them into the 'blocks' array.`
2477 number_blks_r(opt_state_t
*opt_state
, struct icode
*ic
, struct block
*p
)
2481 if (p
== 0 || isMarked(ic
, p
))
2485 n
= opt_state
->n_blocks
++;
2486 if (opt_state
->n_blocks
== 0) {
2490 opt_error(opt_state
, "filter is too complex to optimize");
2493 opt_state
->blocks
[n
] = p
;
2495 number_blks_r(opt_state
, ic
, JT(p
));
2496 number_blks_r(opt_state
, ic
, JF(p
));
2500 * Return the number of stmts in the flowgraph reachable by 'p'.
2501 * The nodes should be unmarked before calling.
2503 * Note that "stmts" means "instructions", and that this includes
2505 * side-effect statements in 'p' (slength(p->stmts));
2507 * statements in the true branch from 'p' (count_stmts(JT(p)));
2509 * statements in the false branch from 'p' (count_stmts(JF(p)));
2511 * the conditional jump itself (1);
2513 * an extra long jump if the true branch requires it (p->longjt);
2515 * an extra long jump if the false branch requires it (p->longjf).
2518 count_stmts(struct icode
*ic
, struct block
*p
)
2522 if (p
== 0 || isMarked(ic
, p
))
2525 n
= count_stmts(ic
, JT(p
)) + count_stmts(ic
, JF(p
));
2526 return slength(p
->stmts
) + n
+ 1 + p
->longjt
+ p
->longjf
;
2530 * Allocate memory. All allocation is done before optimization
2531 * is begun. A linear bound on the size of all data structures is computed
2532 * from the total number of blocks and/or statements.
2535 opt_init(opt_state_t
*opt_state
, struct icode
*ic
)
2538 int i
, n
, max_stmts
;
2540 size_t block_memsize
, edge_memsize
;
2543 * First, count the blocks, so we can malloc an array to map
2544 * block number to block. Then, put the blocks into the array.
2547 n
= count_blocks(ic
, ic
->root
);
2548 opt_state
->blocks
= (struct block
**)calloc(n
, sizeof(*opt_state
->blocks
));
2549 if (opt_state
->blocks
== NULL
)
2550 opt_error(opt_state
, "malloc");
2552 opt_state
->n_blocks
= 0;
2553 number_blks_r(opt_state
, ic
, ic
->root
);
2556 * This "should not happen".
2558 if (opt_state
->n_blocks
== 0)
2559 opt_error(opt_state
, "filter has no instructions; please report this as a libpcap issue");
2561 opt_state
->n_edges
= 2 * opt_state
->n_blocks
;
2562 if ((opt_state
->n_edges
/ 2) != opt_state
->n_blocks
) {
2566 opt_error(opt_state
, "filter is too complex to optimize");
2568 opt_state
->edges
= (struct edge
**)calloc(opt_state
->n_edges
, sizeof(*opt_state
->edges
));
2569 if (opt_state
->edges
== NULL
) {
2570 opt_error(opt_state
, "malloc");
2574 * The number of levels is bounded by the number of nodes.
2576 opt_state
->levels
= (struct block
**)calloc(opt_state
->n_blocks
, sizeof(*opt_state
->levels
));
2577 if (opt_state
->levels
== NULL
) {
2578 opt_error(opt_state
, "malloc");
2581 opt_state
->edgewords
= opt_state
->n_edges
/ BITS_PER_WORD
+ 1;
2582 opt_state
->nodewords
= opt_state
->n_blocks
/ BITS_PER_WORD
+ 1;
2585 * Make sure opt_state->n_blocks * opt_state->nodewords fits
2586 * in a u_int; we use it as a u_int number-of-iterations
2589 product
= opt_state
->n_blocks
* opt_state
->nodewords
;
2590 if ((product
/ opt_state
->n_blocks
) != opt_state
->nodewords
) {
2592 * XXX - just punt and don't try to optimize?
2593 * In practice, this is unlikely to happen with
2596 opt_error(opt_state
, "filter is too complex to optimize");
2600 * Make sure the total memory required for that doesn't
2603 block_memsize
= (size_t)2 * product
* sizeof(*opt_state
->space
);
2604 if ((block_memsize
/ product
) != 2 * sizeof(*opt_state
->space
)) {
2605 opt_error(opt_state
, "filter is too complex to optimize");
2609 * Make sure opt_state->n_edges * opt_state->edgewords fits
2610 * in a u_int; we use it as a u_int number-of-iterations
2613 product
= opt_state
->n_edges
* opt_state
->edgewords
;
2614 if ((product
/ opt_state
->n_edges
) != opt_state
->edgewords
) {
2615 opt_error(opt_state
, "filter is too complex to optimize");
2619 * Make sure the total memory required for that doesn't
2622 edge_memsize
= (size_t)product
* sizeof(*opt_state
->space
);
2623 if (edge_memsize
/ product
!= sizeof(*opt_state
->space
)) {
2624 opt_error(opt_state
, "filter is too complex to optimize");
2628 * Make sure the total memory required for both of them doesn't
2631 if (block_memsize
> SIZE_MAX
- edge_memsize
) {
2632 opt_error(opt_state
, "filter is too complex to optimize");
2636 opt_state
->space
= (bpf_u_int32
*)malloc(block_memsize
+ edge_memsize
);
2637 if (opt_state
->space
== NULL
) {
2638 opt_error(opt_state
, "malloc");
2640 p
= opt_state
->space
;
2641 opt_state
->all_dom_sets
= p
;
2642 for (i
= 0; i
< n
; ++i
) {
2643 opt_state
->blocks
[i
]->dom
= p
;
2644 p
+= opt_state
->nodewords
;
2646 opt_state
->all_closure_sets
= p
;
2647 for (i
= 0; i
< n
; ++i
) {
2648 opt_state
->blocks
[i
]->closure
= p
;
2649 p
+= opt_state
->nodewords
;
2651 opt_state
->all_edge_sets
= p
;
2652 for (i
= 0; i
< n
; ++i
) {
2653 register struct block
*b
= opt_state
->blocks
[i
];
2656 p
+= opt_state
->edgewords
;
2658 p
+= opt_state
->edgewords
;
2660 opt_state
->edges
[i
] = &b
->et
;
2661 b
->ef
.id
= opt_state
->n_blocks
+ i
;
2662 opt_state
->edges
[opt_state
->n_blocks
+ i
] = &b
->ef
;
2667 for (i
= 0; i
< n
; ++i
)
2668 max_stmts
+= slength(opt_state
->blocks
[i
]->stmts
) + 1;
2670 * We allocate at most 3 value numbers per statement,
2671 * so this is an upper bound on the number of valnodes
2674 opt_state
->maxval
= 3 * max_stmts
;
2675 opt_state
->vmap
= (struct vmapinfo
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vmap
));
2676 if (opt_state
->vmap
== NULL
) {
2677 opt_error(opt_state
, "malloc");
2679 opt_state
->vnode_base
= (struct valnode
*)calloc(opt_state
->maxval
, sizeof(*opt_state
->vnode_base
));
2680 if (opt_state
->vnode_base
== NULL
) {
2681 opt_error(opt_state
, "malloc");
2686 * This is only used when supporting optimizer debugging. It is
2687 * global state, so do *not* do more than one compile in parallel
2688 * and expect it to provide meaningful information.
2694 static void PCAP_NORETURN
conv_error(conv_state_t
*, const char *, ...)
2695 PCAP_PRINTFLIKE(2, 3);
2698 * Returns true if successful. Returns false if a branch has
2699 * an offset that is too large. If so, we have marked that
2700 * branch so that on a subsequent iteration, it will be treated
2704 convert_code_r(conv_state_t
*conv_state
, struct icode
*ic
, struct block
*p
)
2706 struct bpf_insn
*dst
;
2710 struct slist
**offset
= NULL
;
2712 if (p
== 0 || isMarked(ic
, p
))
2716 if (convert_code_r(conv_state
, ic
, JF(p
)) == 0)
2718 if (convert_code_r(conv_state
, ic
, JT(p
)) == 0)
2721 slen
= slength(p
->stmts
);
2722 dst
= conv_state
->ftail
-= (slen
+ 1 + p
->longjt
+ p
->longjf
);
2723 /* inflate length by any extra jumps */
2725 p
->offset
= (int)(dst
- conv_state
->fstart
);
2727 /* generate offset[] for convenience */
2729 offset
= (struct slist
**)calloc(slen
, sizeof(struct slist
*));
2731 conv_error(conv_state
, "not enough core");
2736 for (off
= 0; off
< slen
&& src
; off
++) {
2738 printf("off=%d src=%x\n", off
, src
);
2745 for (src
= p
->stmts
; src
; src
= src
->next
) {
2746 if (src
->s
.code
== NOP
)
2748 dst
->code
= (u_short
)src
->s
.code
;
2751 /* fill block-local relative jump */
2752 if (BPF_CLASS(src
->s
.code
) != BPF_JMP
|| src
->s
.code
== (BPF_JMP
|BPF_JA
)) {
2754 if (src
->s
.jt
|| src
->s
.jf
) {
2756 conv_error(conv_state
, "illegal jmp destination");
2762 if (off
== slen
- 2) /*???*/
2768 const char ljerr
[] = "%s for block-local relative jump: off=%d";
2771 printf("code=%x off=%d %x %x\n", src
->s
.code
,
2772 off
, src
->s
.jt
, src
->s
.jf
);
2775 if (!src
->s
.jt
|| !src
->s
.jf
) {
2777 conv_error(conv_state
, ljerr
, "no jmp destination", off
);
2782 for (i
= 0; i
< slen
; i
++) {
2783 if (offset
[i
] == src
->s
.jt
) {
2786 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2790 if (i
- off
- 1 >= 256) {
2792 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2795 dst
->jt
= (u_char
)(i
- off
- 1);
2798 if (offset
[i
] == src
->s
.jf
) {
2801 conv_error(conv_state
, ljerr
, "multiple matches", off
);
2804 if (i
- off
- 1 >= 256) {
2806 conv_error(conv_state
, ljerr
, "out-of-range jump", off
);
2809 dst
->jf
= (u_char
)(i
- off
- 1);
2815 conv_error(conv_state
, ljerr
, "no destination found", off
);
2827 if (dst
- conv_state
->fstart
< NBIDS
)
2828 bids
[dst
- conv_state
->fstart
] = p
->id
+ 1;
2830 dst
->code
= (u_short
)p
->s
.code
;
2833 /* number of extra jumps inserted */
2834 u_char extrajmps
= 0;
2835 off
= JT(p
)->offset
- (p
->offset
+ slen
) - 1;
2837 /* offset too large for branch, must add a jump */
2838 if (p
->longjt
== 0) {
2839 /* mark this instruction and retry */
2843 dst
->jt
= extrajmps
;
2845 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2846 dst
[extrajmps
].k
= off
- extrajmps
;
2849 dst
->jt
= (u_char
)off
;
2850 off
= JF(p
)->offset
- (p
->offset
+ slen
) - 1;
2852 /* offset too large for branch, must add a jump */
2853 if (p
->longjf
== 0) {
2854 /* mark this instruction and retry */
2858 /* branch if F to following jump */
2859 /* if two jumps are inserted, F goes to second one */
2860 dst
->jf
= extrajmps
;
2862 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2863 dst
[extrajmps
].k
= off
- extrajmps
;
2866 dst
->jf
= (u_char
)off
;
2873 * Convert flowgraph intermediate representation to the
2874 * BPF array representation. Set *lenp to the number of instructions.
2876 * This routine does *NOT* leak the memory pointed to by fp. It *must
2877 * not* do free(fp) before returning fp; doing so would make no sense,
2878 * as the BPF array pointed to by the return value of icode_to_fcode()
2879 * must be valid - it's being returned for use in a bpf_program structure.
2881 * If it appears that icode_to_fcode() is leaking, the problem is that
2882 * the program using pcap_compile() is failing to free the memory in
2883 * the BPF program when it's done - the leak is in the program, not in
2884 * the routine that happens to be allocating the memory. (By analogy, if
2885 * a program calls fopen() without ever calling fclose() on the FILE *,
2886 * it will leak the FILE structure; the leak is not in fopen(), it's in
2887 * the program.) Change the program to use pcap_freecode() when it's
2888 * done with the filter program. See the pcap man page.
2891 icode_to_fcode(struct icode
*ic
, struct block
*root
, u_int
*lenp
,
2895 struct bpf_insn
*fp
;
2896 conv_state_t conv_state
;
2898 conv_state
.fstart
= NULL
;
2899 conv_state
.errbuf
= errbuf
;
2900 if (setjmp(conv_state
.top_ctx
) != 0) {
2901 free(conv_state
.fstart
);
2906 * Loop doing convert_code_r() until no branches remain
2907 * with too-large offsets.
2911 n
= *lenp
= count_stmts(ic
, root
);
2913 fp
= (struct bpf_insn
*)malloc(sizeof(*fp
) * n
);
2915 (void)snprintf(errbuf
, PCAP_ERRBUF_SIZE
,
2919 memset((char *)fp
, 0, sizeof(*fp
) * n
);
2920 conv_state
.fstart
= fp
;
2921 conv_state
.ftail
= fp
+ n
;
2924 if (convert_code_r(&conv_state
, ic
, root
))
2933 * For iconv_to_fconv() errors.
2935 static void PCAP_NORETURN
2936 conv_error(conv_state_t
*conv_state
, const char *fmt
, ...)
2941 (void)vsnprintf(conv_state
->errbuf
,
2942 PCAP_ERRBUF_SIZE
, fmt
, ap
);
2944 longjmp(conv_state
->top_ctx
, 1);
2952 * Make a copy of a BPF program and put it in the "fcode" member of
2955 * If we fail to allocate memory for the copy, fill in the "errbuf"
2956 * member of the "pcap_t" with an error message, and return -1;
2957 * otherwise, return 0.
2960 pcapint_install_bpf_program(pcap_t
*p
, struct bpf_program
*fp
)
2965 * Validate the program.
2967 if (!pcapint_validate_filter(fp
->bf_insns
, fp
->bf_len
)) {
2968 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2969 "BPF program is not valid");
2974 * Free up any already installed program.
2976 pcap_freecode(&p
->fcode
);
2978 prog_size
= sizeof(*fp
->bf_insns
) * fp
->bf_len
;
2979 p
->fcode
.bf_len
= fp
->bf_len
;
2980 p
->fcode
.bf_insns
= (struct bpf_insn
*)malloc(prog_size
);
2981 if (p
->fcode
.bf_insns
== NULL
) {
2982 pcapint_fmt_errmsg_for_errno(p
->errbuf
, sizeof(p
->errbuf
),
2986 memcpy(p
->fcode
.bf_insns
, fp
->bf_insns
, prog_size
);
2992 dot_dump_node(struct icode
*ic
, struct block
*block
, struct bpf_program
*prog
,
2995 int icount
, noffset
;
2998 if (block
== NULL
|| isMarked(ic
, block
))
3002 icount
= slength(block
->stmts
) + 1 + block
->longjt
+ block
->longjf
;
3003 noffset
= min(block
->offset
+ icount
, (int)prog
->bf_len
);
3005 fprintf(out
, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block
->id
, block
->id
, block
->id
);
3006 for (i
= block
->offset
; i
< noffset
; i
++) {
3007 fprintf(out
, "\\n%s", bpf_image(prog
->bf_insns
+ i
, i
));
3009 fprintf(out
, "\" tooltip=\"");
3010 for (i
= 0; i
< BPF_MEMWORDS
; i
++)
3011 if (block
->val
[i
] != VAL_UNKNOWN
)
3012 fprintf(out
, "val[%d]=%d ", i
, block
->val
[i
]);
3013 fprintf(out
, "val[A]=%d ", block
->val
[A_ATOM
]);
3014 fprintf(out
, "val[X]=%d", block
->val
[X_ATOM
]);
3016 if (JT(block
) == NULL
)
3017 fprintf(out
, ", peripheries=2");
3018 fprintf(out
, "];\n");
3020 dot_dump_node(ic
, JT(block
), prog
, out
);
3021 dot_dump_node(ic
, JF(block
), prog
, out
);
3025 dot_dump_edge(struct icode
*ic
, struct block
*block
, FILE *out
)
3027 if (block
== NULL
|| isMarked(ic
, block
))
3032 fprintf(out
, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3033 block
->id
, JT(block
)->id
);
3034 fprintf(out
, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3035 block
->id
, JF(block
)->id
);
3037 dot_dump_edge(ic
, JT(block
), out
);
3038 dot_dump_edge(ic
, JF(block
), out
);
3041 /* Output the block CFG using graphviz/DOT language
3042 * In the CFG, block's code, value index for each registers at EXIT,
3043 * and the jump relationship is show.
3045 * example DOT for BPF `ip src host 1.1.1.1' is:
3047 block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
3048 block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
3049 block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3050 block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3051 "block0":se -> "block1":n [label="T"];
3052 "block0":sw -> "block3":n [label="F"];
3053 "block1":se -> "block2":n [label="T"];
3054 "block1":sw -> "block3":n [label="F"];
3057 * After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3058 * and run `dot -Tpng -O bpf.dot' to draw the graph.
3061 dot_dump(struct icode
*ic
, char *errbuf
)
3063 struct bpf_program f
;
3066 memset(bids
, 0, sizeof bids
);
3067 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3068 if (f
.bf_insns
== NULL
)
3071 fprintf(out
, "digraph BPF {\n");
3073 dot_dump_node(ic
, ic
->root
, &f
, out
);
3075 dot_dump_edge(ic
, ic
->root
, out
);
3076 fprintf(out
, "}\n");
3078 free((char *)f
.bf_insns
);
3083 plain_dump(struct icode
*ic
, char *errbuf
)
3085 struct bpf_program f
;
3087 memset(bids
, 0, sizeof bids
);
3088 f
.bf_insns
= icode_to_fcode(ic
, ic
->root
, &f
.bf_len
, errbuf
);
3089 if (f
.bf_insns
== NULL
)
3093 free((char *)f
.bf_insns
);
3098 opt_dump(opt_state_t
*opt_state
, struct icode
*ic
)
3101 char errbuf
[PCAP_ERRBUF_SIZE
];
3104 * If the CFG, in DOT format, is requested, output it rather than
3105 * the code that would be generated from that graph.
3107 if (pcap_print_dot_graph
)
3108 status
= dot_dump(ic
, errbuf
);
3110 status
= plain_dump(ic
, errbuf
);
3112 opt_error(opt_state
, "opt_dump: icode_to_fcode failed: %s", errbuf
);