]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
Don't optimize away "sub #0" as it may be required to recognize
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for tcpdump intermediate representation.
22 */
23 #ifndef lint
24 static const char rcsid[] =
25 "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.69 2001-11-12 21:57:06 fenner Exp $ (LBL)";
26 #endif
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include <sys/types.h>
33 #include <sys/time.h>
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <memory.h>
38
39 #include <errno.h>
40
41 #include "pcap-int.h"
42
43 #include "gencode.h"
44
45 #ifdef HAVE_OS_PROTO_H
46 #include "os-proto.h"
47 #endif
48
49 #ifdef BDEBUG
50 extern int dflag;
51 #endif
52
53 #define A_ATOM BPF_MEMWORDS
54 #define X_ATOM (BPF_MEMWORDS+1)
55
56 #define NOP -1
57
58 /*
59 * This define is used to represent *both* the accumulator and
60 * x register in use-def computations.
61 * Currently, the use-def code assumes only one definition per instruction.
62 */
63 #define AX_ATOM N_ATOMS
64
65 /*
66 * A flag to indicate that further optimization is needed.
67 * Iterative passes are continued until a given pass yields no
68 * branch movement.
69 */
70 static int done;
71
72 /*
73 * A block is marked if only if its mark equals the current mark.
74 * Rather than traverse the code array, marking each item, 'cur_mark' is
75 * incremented. This automatically makes each element unmarked.
76 */
77 static int cur_mark;
78 #define isMarked(p) ((p)->mark == cur_mark)
79 #define unMarkAll() cur_mark += 1
80 #define Mark(p) ((p)->mark = cur_mark)
81
82 static void opt_init(struct block *);
83 static void opt_cleanup(void);
84
85 static void make_marks(struct block *);
86 static void mark_code(struct block *);
87
88 static void intern_blocks(struct block *);
89
90 static int eq_slist(struct slist *, struct slist *);
91
92 static void find_levels_r(struct block *);
93
94 static void find_levels(struct block *);
95 static void find_dom(struct block *);
96 static void propedom(struct edge *);
97 static void find_edom(struct block *);
98 static void find_closure(struct block *);
99 static int atomuse(struct stmt *);
100 static int atomdef(struct stmt *);
101 static void compute_local_ud(struct block *);
102 static void find_ud(struct block *);
103 static void init_val(void);
104 static int F(int, int, int);
105 static inline void vstore(struct stmt *, int *, int, int);
106 static void opt_blk(struct block *, int);
107 static int use_conflict(struct block *, struct block *);
108 static void opt_j(struct edge *);
109 static void or_pullup(struct block *);
110 static void and_pullup(struct block *);
111 static void opt_blks(struct block *, int);
112 static inline void link_inedge(struct edge *, struct block *);
113 static void find_inedges(struct block *);
114 static void opt_root(struct block **);
115 static void opt_loop(struct block *, int);
116 static void fold_op(struct stmt *, int, int);
117 static inline struct slist *this_op(struct slist *);
118 static void opt_not(struct block *);
119 static void opt_peep(struct block *);
120 static void opt_stmt(struct stmt *, int[], int);
121 static void deadstmt(struct stmt *, struct stmt *[]);
122 static void opt_deadstores(struct block *);
123 static void opt_blk(struct block *, int);
124 static int use_conflict(struct block *, struct block *);
125 static void opt_j(struct edge *);
126 static struct block *fold_edge(struct block *, struct edge *);
127 static inline int eq_blk(struct block *, struct block *);
128 static int slength(struct slist *);
129 static int count_blocks(struct block *);
130 static void number_blks_r(struct block *);
131 static int count_stmts(struct block *);
132 static int convert_code_r(struct block *);
133 #ifdef BDEBUG
134 static void opt_dump(struct block *);
135 #endif
136
137 static int n_blocks;
138 struct block **blocks;
139 static int n_edges;
140 struct edge **edges;
141
142 /*
143 * A bit vector set representation of the dominators.
144 * We round up the set size to the next power of two.
145 */
146 static int nodewords;
147 static int edgewords;
148 struct block **levels;
149 bpf_u_int32 *space;
150 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
151 /*
152 * True if a is in uset {p}
153 */
154 #define SET_MEMBER(p, a) \
155 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
156
157 /*
158 * Add 'a' to uset p.
159 */
160 #define SET_INSERT(p, a) \
161 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
162
163 /*
164 * Delete 'a' from uset p.
165 */
166 #define SET_DELETE(p, a) \
167 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
168
169 /*
170 * a := a intersect b
171 */
172 #define SET_INTERSECT(a, b, n)\
173 {\
174 register bpf_u_int32 *_x = a, *_y = b;\
175 register int _n = n;\
176 while (--_n >= 0) *_x++ &= *_y++;\
177 }
178
179 /*
180 * a := a - b
181 */
182 #define SET_SUBTRACT(a, b, n)\
183 {\
184 register bpf_u_int32 *_x = a, *_y = b;\
185 register int _n = n;\
186 while (--_n >= 0) *_x++ &=~ *_y++;\
187 }
188
189 /*
190 * a := a union b
191 */
192 #define SET_UNION(a, b, n)\
193 {\
194 register bpf_u_int32 *_x = a, *_y = b;\
195 register int _n = n;\
196 while (--_n >= 0) *_x++ |= *_y++;\
197 }
198
199 static uset all_dom_sets;
200 static uset all_closure_sets;
201 static uset all_edge_sets;
202
203 #ifndef MAX
204 #define MAX(a,b) ((a)>(b)?(a):(b))
205 #endif
206
207 static void
208 find_levels_r(b)
209 struct block *b;
210 {
211 int level;
212
213 if (isMarked(b))
214 return;
215
216 Mark(b);
217 b->link = 0;
218
219 if (JT(b)) {
220 find_levels_r(JT(b));
221 find_levels_r(JF(b));
222 level = MAX(JT(b)->level, JF(b)->level) + 1;
223 } else
224 level = 0;
225 b->level = level;
226 b->link = levels[level];
227 levels[level] = b;
228 }
229
230 /*
231 * Level graph. The levels go from 0 at the leaves to
232 * N_LEVELS at the root. The levels[] array points to the
233 * first node of the level list, whose elements are linked
234 * with the 'link' field of the struct block.
235 */
236 static void
237 find_levels(root)
238 struct block *root;
239 {
240 memset((char *)levels, 0, n_blocks * sizeof(*levels));
241 unMarkAll();
242 find_levels_r(root);
243 }
244
245 /*
246 * Find dominator relationships.
247 * Assumes graph has been leveled.
248 */
249 static void
250 find_dom(root)
251 struct block *root;
252 {
253 int i;
254 struct block *b;
255 bpf_u_int32 *x;
256
257 /*
258 * Initialize sets to contain all nodes.
259 */
260 x = all_dom_sets;
261 i = n_blocks * nodewords;
262 while (--i >= 0)
263 *x++ = ~0;
264 /* Root starts off empty. */
265 for (i = nodewords; --i >= 0;)
266 root->dom[i] = 0;
267
268 /* root->level is the highest level no found. */
269 for (i = root->level; i >= 0; --i) {
270 for (b = levels[i]; b; b = b->link) {
271 SET_INSERT(b->dom, b->id);
272 if (JT(b) == 0)
273 continue;
274 SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
275 SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
276 }
277 }
278 }
279
280 static void
281 propedom(ep)
282 struct edge *ep;
283 {
284 SET_INSERT(ep->edom, ep->id);
285 if (ep->succ) {
286 SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
287 SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
288 }
289 }
290
291 /*
292 * Compute edge dominators.
293 * Assumes graph has been leveled and predecessors established.
294 */
295 static void
296 find_edom(root)
297 struct block *root;
298 {
299 int i;
300 uset x;
301 struct block *b;
302
303 x = all_edge_sets;
304 for (i = n_edges * edgewords; --i >= 0; )
305 x[i] = ~0;
306
307 /* root->level is the highest level no found. */
308 memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
309 memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
310 for (i = root->level; i >= 0; --i) {
311 for (b = levels[i]; b != 0; b = b->link) {
312 propedom(&b->et);
313 propedom(&b->ef);
314 }
315 }
316 }
317
318 /*
319 * Find the backwards transitive closure of the flow graph. These sets
320 * are backwards in the sense that we find the set of nodes that reach
321 * a given node, not the set of nodes that can be reached by a node.
322 *
323 * Assumes graph has been leveled.
324 */
325 static void
326 find_closure(root)
327 struct block *root;
328 {
329 int i;
330 struct block *b;
331
332 /*
333 * Initialize sets to contain no nodes.
334 */
335 memset((char *)all_closure_sets, 0,
336 n_blocks * nodewords * sizeof(*all_closure_sets));
337
338 /* root->level is the highest level no found. */
339 for (i = root->level; i >= 0; --i) {
340 for (b = levels[i]; b; b = b->link) {
341 SET_INSERT(b->closure, b->id);
342 if (JT(b) == 0)
343 continue;
344 SET_UNION(JT(b)->closure, b->closure, nodewords);
345 SET_UNION(JF(b)->closure, b->closure, nodewords);
346 }
347 }
348 }
349
350 /*
351 * Return the register number that is used by s. If A and X are both
352 * used, return AX_ATOM. If no register is used, return -1.
353 *
354 * The implementation should probably change to an array access.
355 */
356 static int
357 atomuse(s)
358 struct stmt *s;
359 {
360 register int c = s->code;
361
362 if (c == NOP)
363 return -1;
364
365 switch (BPF_CLASS(c)) {
366
367 case BPF_RET:
368 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
369 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
370
371 case BPF_LD:
372 case BPF_LDX:
373 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
374 (BPF_MODE(c) == BPF_MEM) ? s->k : -1;
375
376 case BPF_ST:
377 return A_ATOM;
378
379 case BPF_STX:
380 return X_ATOM;
381
382 case BPF_JMP:
383 case BPF_ALU:
384 if (BPF_SRC(c) == BPF_X)
385 return AX_ATOM;
386 return A_ATOM;
387
388 case BPF_MISC:
389 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
390 }
391 abort();
392 /* NOTREACHED */
393 }
394
395 /*
396 * Return the register number that is defined by 's'. We assume that
397 * a single stmt cannot define more than one register. If no register
398 * is defined, return -1.
399 *
400 * The implementation should probably change to an array access.
401 */
402 static int
403 atomdef(s)
404 struct stmt *s;
405 {
406 if (s->code == NOP)
407 return -1;
408
409 switch (BPF_CLASS(s->code)) {
410
411 case BPF_LD:
412 case BPF_ALU:
413 return A_ATOM;
414
415 case BPF_LDX:
416 return X_ATOM;
417
418 case BPF_ST:
419 case BPF_STX:
420 return s->k;
421
422 case BPF_MISC:
423 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
424 }
425 return -1;
426 }
427
428 static void
429 compute_local_ud(b)
430 struct block *b;
431 {
432 struct slist *s;
433 atomset def = 0, use = 0, kill = 0;
434 int atom;
435
436 for (s = b->stmts; s; s = s->next) {
437 if (s->s.code == NOP)
438 continue;
439 atom = atomuse(&s->s);
440 if (atom >= 0) {
441 if (atom == AX_ATOM) {
442 if (!ATOMELEM(def, X_ATOM))
443 use |= ATOMMASK(X_ATOM);
444 if (!ATOMELEM(def, A_ATOM))
445 use |= ATOMMASK(A_ATOM);
446 }
447 else if (atom < N_ATOMS) {
448 if (!ATOMELEM(def, atom))
449 use |= ATOMMASK(atom);
450 }
451 else
452 abort();
453 }
454 atom = atomdef(&s->s);
455 if (atom >= 0) {
456 if (!ATOMELEM(use, atom))
457 kill |= ATOMMASK(atom);
458 def |= ATOMMASK(atom);
459 }
460 }
461 if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP)
462 use |= ATOMMASK(A_ATOM);
463
464 b->def = def;
465 b->kill = kill;
466 b->in_use = use;
467 }
468
469 /*
470 * Assume graph is already leveled.
471 */
472 static void
473 find_ud(root)
474 struct block *root;
475 {
476 int i, maxlevel;
477 struct block *p;
478
479 /*
480 * root->level is the highest level no found;
481 * count down from there.
482 */
483 maxlevel = root->level;
484 for (i = maxlevel; i >= 0; --i)
485 for (p = levels[i]; p; p = p->link) {
486 compute_local_ud(p);
487 p->out_use = 0;
488 }
489
490 for (i = 1; i <= maxlevel; ++i) {
491 for (p = levels[i]; p; p = p->link) {
492 p->out_use |= JT(p)->in_use | JF(p)->in_use;
493 p->in_use |= p->out_use &~ p->kill;
494 }
495 }
496 }
497
498 /*
499 * These data structures are used in a Cocke and Shwarz style
500 * value numbering scheme. Since the flowgraph is acyclic,
501 * exit values can be propagated from a node's predecessors
502 * provided it is uniquely defined.
503 */
504 struct valnode {
505 int code;
506 int v0, v1;
507 int val;
508 struct valnode *next;
509 };
510
511 #define MODULUS 213
512 static struct valnode *hashtbl[MODULUS];
513 static int curval;
514 static int maxval;
515
516 /* Integer constants mapped with the load immediate opcode. */
517 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
518
519 struct vmapinfo {
520 int is_const;
521 bpf_int32 const_val;
522 };
523
524 struct vmapinfo *vmap;
525 struct valnode *vnode_base;
526 struct valnode *next_vnode;
527
528 static void
529 init_val()
530 {
531 curval = 0;
532 next_vnode = vnode_base;
533 memset((char *)vmap, 0, maxval * sizeof(*vmap));
534 memset((char *)hashtbl, 0, sizeof hashtbl);
535 }
536
537 /* Because we really don't have an IR, this stuff is a little messy. */
538 static int
539 F(code, v0, v1)
540 int code;
541 int v0, v1;
542 {
543 u_int hash;
544 int val;
545 struct valnode *p;
546
547 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
548 hash %= MODULUS;
549
550 for (p = hashtbl[hash]; p; p = p->next)
551 if (p->code == code && p->v0 == v0 && p->v1 == v1)
552 return p->val;
553
554 val = ++curval;
555 if (BPF_MODE(code) == BPF_IMM &&
556 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
557 vmap[val].const_val = v0;
558 vmap[val].is_const = 1;
559 }
560 p = next_vnode++;
561 p->val = val;
562 p->code = code;
563 p->v0 = v0;
564 p->v1 = v1;
565 p->next = hashtbl[hash];
566 hashtbl[hash] = p;
567
568 return val;
569 }
570
571 static inline void
572 vstore(s, valp, newval, alter)
573 struct stmt *s;
574 int *valp;
575 int newval;
576 int alter;
577 {
578 if (alter && *valp == newval)
579 s->code = NOP;
580 else
581 *valp = newval;
582 }
583
584 static void
585 fold_op(s, v0, v1)
586 struct stmt *s;
587 int v0, v1;
588 {
589 bpf_int32 a, b;
590
591 a = vmap[v0].const_val;
592 b = vmap[v1].const_val;
593
594 switch (BPF_OP(s->code)) {
595 case BPF_ADD:
596 a += b;
597 break;
598
599 case BPF_SUB:
600 a -= b;
601 break;
602
603 case BPF_MUL:
604 a *= b;
605 break;
606
607 case BPF_DIV:
608 if (b == 0)
609 bpf_error("division by zero");
610 a /= b;
611 break;
612
613 case BPF_AND:
614 a &= b;
615 break;
616
617 case BPF_OR:
618 a |= b;
619 break;
620
621 case BPF_LSH:
622 a <<= b;
623 break;
624
625 case BPF_RSH:
626 a >>= b;
627 break;
628
629 case BPF_NEG:
630 a = -a;
631 break;
632
633 default:
634 abort();
635 }
636 s->k = a;
637 s->code = BPF_LD|BPF_IMM;
638 done = 0;
639 }
640
641 static inline struct slist *
642 this_op(s)
643 struct slist *s;
644 {
645 while (s != 0 && s->s.code == NOP)
646 s = s->next;
647 return s;
648 }
649
650 static void
651 opt_not(b)
652 struct block *b;
653 {
654 struct block *tmp = JT(b);
655
656 JT(b) = JF(b);
657 JF(b) = tmp;
658 }
659
660 static void
661 opt_peep(b)
662 struct block *b;
663 {
664 struct slist *s;
665 struct slist *next, *last;
666 int val;
667
668 s = b->stmts;
669 if (s == 0)
670 return;
671
672 last = s;
673 while (1) {
674 s = this_op(s);
675 if (s == 0)
676 break;
677 next = this_op(s->next);
678 if (next == 0)
679 break;
680 last = next;
681
682 /*
683 * st M[k] --> st M[k]
684 * ldx M[k] tax
685 */
686 if (s->s.code == BPF_ST &&
687 next->s.code == (BPF_LDX|BPF_MEM) &&
688 s->s.k == next->s.k) {
689 done = 0;
690 next->s.code = BPF_MISC|BPF_TAX;
691 }
692 /*
693 * ld #k --> ldx #k
694 * tax txa
695 */
696 if (s->s.code == (BPF_LD|BPF_IMM) &&
697 next->s.code == (BPF_MISC|BPF_TAX)) {
698 s->s.code = BPF_LDX|BPF_IMM;
699 next->s.code = BPF_MISC|BPF_TXA;
700 done = 0;
701 }
702 /*
703 * This is an ugly special case, but it happens
704 * when you say tcp[k] or udp[k] where k is a constant.
705 */
706 if (s->s.code == (BPF_LD|BPF_IMM)) {
707 struct slist *add, *tax, *ild;
708
709 /*
710 * Check that X isn't used on exit from this
711 * block (which the optimizer might cause).
712 * We know the code generator won't generate
713 * any local dependencies.
714 */
715 if (ATOMELEM(b->out_use, X_ATOM))
716 break;
717
718 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
719 add = next;
720 else
721 add = this_op(next->next);
722 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
723 break;
724
725 tax = this_op(add->next);
726 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
727 break;
728
729 ild = this_op(tax->next);
730 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
731 BPF_MODE(ild->s.code) != BPF_IND)
732 break;
733 /*
734 * XXX We need to check that X is not
735 * subsequently used. We know we can eliminate the
736 * accumulator modifications since it is defined
737 * by the last stmt of this sequence.
738 *
739 * We want to turn this sequence:
740 *
741 * (004) ldi #0x2 {s}
742 * (005) ldxms [14] {next} -- optional
743 * (006) addx {add}
744 * (007) tax {tax}
745 * (008) ild [x+0] {ild}
746 *
747 * into this sequence:
748 *
749 * (004) nop
750 * (005) ldxms [14]
751 * (006) nop
752 * (007) nop
753 * (008) ild [x+2]
754 *
755 */
756 ild->s.k += s->s.k;
757 s->s.code = NOP;
758 add->s.code = NOP;
759 tax->s.code = NOP;
760 done = 0;
761 }
762 s = next;
763 }
764 /*
765 * If we have a subtract to do a comparison, and the X register
766 * is a known constant, we can merge this value into the
767 * comparison.
768 */
769 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
770 !ATOMELEM(b->out_use, A_ATOM)) {
771 val = b->val[X_ATOM];
772 if (vmap[val].is_const) {
773 int op;
774
775 b->s.k += vmap[val].const_val;
776 op = BPF_OP(b->s.code);
777 if (op == BPF_JGT || op == BPF_JGE) {
778 struct block *t = JT(b);
779 JT(b) = JF(b);
780 JF(b) = t;
781 b->s.k += 0x80000000;
782 }
783 last->s.code = NOP;
784 done = 0;
785 } else if (b->s.k == 0) {
786 /*
787 * sub x -> nop
788 * j #0 j x
789 */
790 last->s.code = NOP;
791 b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) |
792 BPF_X;
793 done = 0;
794 }
795 }
796 /*
797 * Likewise, a constant subtract can be simplified.
798 */
799 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
800 !ATOMELEM(b->out_use, A_ATOM)) {
801 int op;
802
803 b->s.k += last->s.k;
804 last->s.code = NOP;
805 op = BPF_OP(b->s.code);
806 if (op == BPF_JGT || op == BPF_JGE) {
807 struct block *t = JT(b);
808 JT(b) = JF(b);
809 JF(b) = t;
810 b->s.k += 0x80000000;
811 }
812 done = 0;
813 }
814 /*
815 * and #k nop
816 * jeq #0 -> jset #k
817 */
818 if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
819 !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {
820 b->s.k = last->s.k;
821 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
822 last->s.code = NOP;
823 done = 0;
824 opt_not(b);
825 }
826 /*
827 * If the accumulator is a known constant, we can compute the
828 * comparison result.
829 */
830 val = b->val[A_ATOM];
831 if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
832 bpf_int32 v = vmap[val].const_val;
833 switch (BPF_OP(b->s.code)) {
834
835 case BPF_JEQ:
836 v = v == b->s.k;
837 break;
838
839 case BPF_JGT:
840 v = (unsigned)v > b->s.k;
841 break;
842
843 case BPF_JGE:
844 v = (unsigned)v >= b->s.k;
845 break;
846
847 case BPF_JSET:
848 v &= b->s.k;
849 break;
850
851 default:
852 abort();
853 }
854 if (JF(b) != JT(b))
855 done = 0;
856 if (v)
857 JF(b) = JT(b);
858 else
859 JT(b) = JF(b);
860 }
861 }
862
863 /*
864 * Compute the symbolic value of expression of 's', and update
865 * anything it defines in the value table 'val'. If 'alter' is true,
866 * do various optimizations. This code would be cleaner if symbolic
867 * evaluation and code transformations weren't folded together.
868 */
869 static void
870 opt_stmt(s, val, alter)
871 struct stmt *s;
872 int val[];
873 int alter;
874 {
875 int op;
876 int v;
877
878 switch (s->code) {
879
880 case BPF_LD|BPF_ABS|BPF_W:
881 case BPF_LD|BPF_ABS|BPF_H:
882 case BPF_LD|BPF_ABS|BPF_B:
883 v = F(s->code, s->k, 0L);
884 vstore(s, &val[A_ATOM], v, alter);
885 break;
886
887 case BPF_LD|BPF_IND|BPF_W:
888 case BPF_LD|BPF_IND|BPF_H:
889 case BPF_LD|BPF_IND|BPF_B:
890 v = val[X_ATOM];
891 if (alter && vmap[v].is_const) {
892 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
893 s->k += vmap[v].const_val;
894 v = F(s->code, s->k, 0L);
895 done = 0;
896 }
897 else
898 v = F(s->code, s->k, v);
899 vstore(s, &val[A_ATOM], v, alter);
900 break;
901
902 case BPF_LD|BPF_LEN:
903 v = F(s->code, 0L, 0L);
904 vstore(s, &val[A_ATOM], v, alter);
905 break;
906
907 case BPF_LD|BPF_IMM:
908 v = K(s->k);
909 vstore(s, &val[A_ATOM], v, alter);
910 break;
911
912 case BPF_LDX|BPF_IMM:
913 v = K(s->k);
914 vstore(s, &val[X_ATOM], v, alter);
915 break;
916
917 case BPF_LDX|BPF_MSH|BPF_B:
918 v = F(s->code, s->k, 0L);
919 vstore(s, &val[X_ATOM], v, alter);
920 break;
921
922 case BPF_ALU|BPF_NEG:
923 if (alter && vmap[val[A_ATOM]].is_const) {
924 s->code = BPF_LD|BPF_IMM;
925 s->k = -vmap[val[A_ATOM]].const_val;
926 val[A_ATOM] = K(s->k);
927 }
928 else
929 val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
930 break;
931
932 case BPF_ALU|BPF_ADD|BPF_K:
933 case BPF_ALU|BPF_SUB|BPF_K:
934 case BPF_ALU|BPF_MUL|BPF_K:
935 case BPF_ALU|BPF_DIV|BPF_K:
936 case BPF_ALU|BPF_AND|BPF_K:
937 case BPF_ALU|BPF_OR|BPF_K:
938 case BPF_ALU|BPF_LSH|BPF_K:
939 case BPF_ALU|BPF_RSH|BPF_K:
940 op = BPF_OP(s->code);
941 if (alter) {
942 if (s->k == 0) {
943 /* don't optimize away "sub #0"
944 * as it may be needed later to
945 * fixup the generated math code */
946 if (op == BPF_ADD ||
947 op == BPF_LSH || op == BPF_RSH ||
948 op == BPF_OR) {
949 s->code = NOP;
950 break;
951 }
952 if (op == BPF_MUL || op == BPF_AND) {
953 s->code = BPF_LD|BPF_IMM;
954 val[A_ATOM] = K(s->k);
955 break;
956 }
957 }
958 if (vmap[val[A_ATOM]].is_const) {
959 fold_op(s, val[A_ATOM], K(s->k));
960 val[A_ATOM] = K(s->k);
961 break;
962 }
963 }
964 val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
965 break;
966
967 case BPF_ALU|BPF_ADD|BPF_X:
968 case BPF_ALU|BPF_SUB|BPF_X:
969 case BPF_ALU|BPF_MUL|BPF_X:
970 case BPF_ALU|BPF_DIV|BPF_X:
971 case BPF_ALU|BPF_AND|BPF_X:
972 case BPF_ALU|BPF_OR|BPF_X:
973 case BPF_ALU|BPF_LSH|BPF_X:
974 case BPF_ALU|BPF_RSH|BPF_X:
975 op = BPF_OP(s->code);
976 if (alter && vmap[val[X_ATOM]].is_const) {
977 if (vmap[val[A_ATOM]].is_const) {
978 fold_op(s, val[A_ATOM], val[X_ATOM]);
979 val[A_ATOM] = K(s->k);
980 }
981 else {
982 s->code = BPF_ALU|BPF_K|op;
983 s->k = vmap[val[X_ATOM]].const_val;
984 done = 0;
985 val[A_ATOM] =
986 F(s->code, val[A_ATOM], K(s->k));
987 }
988 break;
989 }
990 /*
991 * Check if we're doing something to an accumulator
992 * that is 0, and simplify. This may not seem like
993 * much of a simplification but it could open up further
994 * optimizations.
995 * XXX We could also check for mul by 1, and -1, etc.
996 */
997 if (alter && vmap[val[A_ATOM]].is_const
998 && vmap[val[A_ATOM]].const_val == 0) {
999 if (op == BPF_ADD || op == BPF_OR ||
1000 op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {
1001 s->code = BPF_MISC|BPF_TXA;
1002 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1003 break;
1004 }
1005 else if (op == BPF_MUL || op == BPF_DIV ||
1006 op == BPF_AND) {
1007 s->code = BPF_LD|BPF_IMM;
1008 s->k = 0;
1009 vstore(s, &val[A_ATOM], K(s->k), alter);
1010 break;
1011 }
1012 else if (op == BPF_NEG) {
1013 s->code = NOP;
1014 break;
1015 }
1016 }
1017 val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1018 break;
1019
1020 case BPF_MISC|BPF_TXA:
1021 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1022 break;
1023
1024 case BPF_LD|BPF_MEM:
1025 v = val[s->k];
1026 if (alter && vmap[v].is_const) {
1027 s->code = BPF_LD|BPF_IMM;
1028 s->k = vmap[v].const_val;
1029 done = 0;
1030 }
1031 vstore(s, &val[A_ATOM], v, alter);
1032 break;
1033
1034 case BPF_MISC|BPF_TAX:
1035 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1036 break;
1037
1038 case BPF_LDX|BPF_MEM:
1039 v = val[s->k];
1040 if (alter && vmap[v].is_const) {
1041 s->code = BPF_LDX|BPF_IMM;
1042 s->k = vmap[v].const_val;
1043 done = 0;
1044 }
1045 vstore(s, &val[X_ATOM], v, alter);
1046 break;
1047
1048 case BPF_ST:
1049 vstore(s, &val[s->k], val[A_ATOM], alter);
1050 break;
1051
1052 case BPF_STX:
1053 vstore(s, &val[s->k], val[X_ATOM], alter);
1054 break;
1055 }
1056 }
1057
1058 static void
1059 deadstmt(s, last)
1060 register struct stmt *s;
1061 register struct stmt *last[];
1062 {
1063 register int atom;
1064
1065 atom = atomuse(s);
1066 if (atom >= 0) {
1067 if (atom == AX_ATOM) {
1068 last[X_ATOM] = 0;
1069 last[A_ATOM] = 0;
1070 }
1071 else
1072 last[atom] = 0;
1073 }
1074 atom = atomdef(s);
1075 if (atom >= 0) {
1076 if (last[atom]) {
1077 done = 0;
1078 last[atom]->code = NOP;
1079 }
1080 last[atom] = s;
1081 }
1082 }
1083
1084 static void
1085 opt_deadstores(b)
1086 register struct block *b;
1087 {
1088 register struct slist *s;
1089 register int atom;
1090 struct stmt *last[N_ATOMS];
1091
1092 memset((char *)last, 0, sizeof last);
1093
1094 for (s = b->stmts; s != 0; s = s->next)
1095 deadstmt(&s->s, last);
1096 deadstmt(&b->s, last);
1097
1098 for (atom = 0; atom < N_ATOMS; ++atom)
1099 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1100 last[atom]->code = NOP;
1101 done = 0;
1102 }
1103 }
1104
1105 static void
1106 opt_blk(b, do_stmts)
1107 struct block *b;
1108 int do_stmts;
1109 {
1110 struct slist *s;
1111 struct edge *p;
1112 int i;
1113 bpf_int32 aval;
1114
1115 #if 0
1116 for (s = b->stmts; s && s->next; s = s->next)
1117 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1118 do_stmts = 0;
1119 break;
1120 }
1121 #endif
1122
1123 /*
1124 * Initialize the atom values.
1125 * If we have no predecessors, everything is undefined.
1126 * Otherwise, we inherent our values from our predecessors.
1127 * If any register has an ambiguous value (i.e. control paths are
1128 * merging) give it the undefined value of 0.
1129 */
1130 p = b->in_edges;
1131 if (p == 0)
1132 memset((char *)b->val, 0, sizeof(b->val));
1133 else {
1134 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1135 while ((p = p->next) != NULL) {
1136 for (i = 0; i < N_ATOMS; ++i)
1137 if (b->val[i] != p->pred->val[i])
1138 b->val[i] = 0;
1139 }
1140 }
1141 aval = b->val[A_ATOM];
1142 for (s = b->stmts; s; s = s->next)
1143 opt_stmt(&s->s, b->val, do_stmts);
1144
1145 /*
1146 * This is a special case: if we don't use anything from this
1147 * block, and we load the accumulator with value that is
1148 * already there, or if this block is a return,
1149 * eliminate all the statements.
1150 */
1151 if (do_stmts &&
1152 ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||
1153 BPF_CLASS(b->s.code) == BPF_RET)) {
1154 if (b->stmts != 0) {
1155 b->stmts = 0;
1156 done = 0;
1157 }
1158 } else {
1159 opt_peep(b);
1160 opt_deadstores(b);
1161 }
1162 /*
1163 * Set up values for branch optimizer.
1164 */
1165 if (BPF_SRC(b->s.code) == BPF_K)
1166 b->oval = K(b->s.k);
1167 else
1168 b->oval = b->val[X_ATOM];
1169 b->et.code = b->s.code;
1170 b->ef.code = -b->s.code;
1171 }
1172
1173 /*
1174 * Return true if any register that is used on exit from 'succ', has
1175 * an exit value that is different from the corresponding exit value
1176 * from 'b'.
1177 */
1178 static int
1179 use_conflict(b, succ)
1180 struct block *b, *succ;
1181 {
1182 int atom;
1183 atomset use = succ->out_use;
1184
1185 if (use == 0)
1186 return 0;
1187
1188 for (atom = 0; atom < N_ATOMS; ++atom)
1189 if (ATOMELEM(use, atom))
1190 if (b->val[atom] != succ->val[atom])
1191 return 1;
1192 return 0;
1193 }
1194
1195 static struct block *
1196 fold_edge(child, ep)
1197 struct block *child;
1198 struct edge *ep;
1199 {
1200 int sense;
1201 int aval0, aval1, oval0, oval1;
1202 int code = ep->code;
1203
1204 if (code < 0) {
1205 code = -code;
1206 sense = 0;
1207 } else
1208 sense = 1;
1209
1210 if (child->s.code != code)
1211 return 0;
1212
1213 aval0 = child->val[A_ATOM];
1214 oval0 = child->oval;
1215 aval1 = ep->pred->val[A_ATOM];
1216 oval1 = ep->pred->oval;
1217
1218 if (aval0 != aval1)
1219 return 0;
1220
1221 if (oval0 == oval1)
1222 /*
1223 * The operands are identical, so the
1224 * result is true if a true branch was
1225 * taken to get here, otherwise false.
1226 */
1227 return sense ? JT(child) : JF(child);
1228
1229 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1230 /*
1231 * At this point, we only know the comparison if we
1232 * came down the true branch, and it was an equality
1233 * comparison with a constant. We rely on the fact that
1234 * distinct constants have distinct value numbers.
1235 */
1236 return JF(child);
1237
1238 return 0;
1239 }
1240
1241 static void
1242 opt_j(ep)
1243 struct edge *ep;
1244 {
1245 register int i, k;
1246 register struct block *target;
1247
1248 if (JT(ep->succ) == 0)
1249 return;
1250
1251 if (JT(ep->succ) == JF(ep->succ)) {
1252 /*
1253 * Common branch targets can be eliminated, provided
1254 * there is no data dependency.
1255 */
1256 if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1257 done = 0;
1258 ep->succ = JT(ep->succ);
1259 }
1260 }
1261 /*
1262 * For each edge dominator that matches the successor of this
1263 * edge, promote the edge successor to the its grandchild.
1264 *
1265 * XXX We violate the set abstraction here in favor a reasonably
1266 * efficient loop.
1267 */
1268 top:
1269 for (i = 0; i < edgewords; ++i) {
1270 register bpf_u_int32 x = ep->edom[i];
1271
1272 while (x != 0) {
1273 k = ffs(x) - 1;
1274 x &=~ (1 << k);
1275 k += i * BITS_PER_WORD;
1276
1277 target = fold_edge(ep->succ, edges[k]);
1278 /*
1279 * Check that there is no data dependency between
1280 * nodes that will be violated if we move the edge.
1281 */
1282 if (target != 0 && !use_conflict(ep->pred, target)) {
1283 done = 0;
1284 ep->succ = target;
1285 if (JT(target) != 0)
1286 /*
1287 * Start over unless we hit a leaf.
1288 */
1289 goto top;
1290 return;
1291 }
1292 }
1293 }
1294 }
1295
1296
1297 static void
1298 or_pullup(b)
1299 struct block *b;
1300 {
1301 int val, at_top;
1302 struct block *pull;
1303 struct block **diffp, **samep;
1304 struct edge *ep;
1305
1306 ep = b->in_edges;
1307 if (ep == 0)
1308 return;
1309
1310 /*
1311 * Make sure each predecessor loads the same value.
1312 * XXX why?
1313 */
1314 val = ep->pred->val[A_ATOM];
1315 for (ep = ep->next; ep != 0; ep = ep->next)
1316 if (val != ep->pred->val[A_ATOM])
1317 return;
1318
1319 if (JT(b->in_edges->pred) == b)
1320 diffp = &JT(b->in_edges->pred);
1321 else
1322 diffp = &JF(b->in_edges->pred);
1323
1324 at_top = 1;
1325 while (1) {
1326 if (*diffp == 0)
1327 return;
1328
1329 if (JT(*diffp) != JT(b))
1330 return;
1331
1332 if (!SET_MEMBER((*diffp)->dom, b->id))
1333 return;
1334
1335 if ((*diffp)->val[A_ATOM] != val)
1336 break;
1337
1338 diffp = &JF(*diffp);
1339 at_top = 0;
1340 }
1341 samep = &JF(*diffp);
1342 while (1) {
1343 if (*samep == 0)
1344 return;
1345
1346 if (JT(*samep) != JT(b))
1347 return;
1348
1349 if (!SET_MEMBER((*samep)->dom, b->id))
1350 return;
1351
1352 if ((*samep)->val[A_ATOM] == val)
1353 break;
1354
1355 /* XXX Need to check that there are no data dependencies
1356 between dp0 and dp1. Currently, the code generator
1357 will not produce such dependencies. */
1358 samep = &JF(*samep);
1359 }
1360 #ifdef notdef
1361 /* XXX This doesn't cover everything. */
1362 for (i = 0; i < N_ATOMS; ++i)
1363 if ((*samep)->val[i] != pred->val[i])
1364 return;
1365 #endif
1366 /* Pull up the node. */
1367 pull = *samep;
1368 *samep = JF(pull);
1369 JF(pull) = *diffp;
1370
1371 /*
1372 * At the top of the chain, each predecessor needs to point at the
1373 * pulled up node. Inside the chain, there is only one predecessor
1374 * to worry about.
1375 */
1376 if (at_top) {
1377 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1378 if (JT(ep->pred) == b)
1379 JT(ep->pred) = pull;
1380 else
1381 JF(ep->pred) = pull;
1382 }
1383 }
1384 else
1385 *diffp = pull;
1386
1387 done = 0;
1388 }
1389
1390 static void
1391 and_pullup(b)
1392 struct block *b;
1393 {
1394 int val, at_top;
1395 struct block *pull;
1396 struct block **diffp, **samep;
1397 struct edge *ep;
1398
1399 ep = b->in_edges;
1400 if (ep == 0)
1401 return;
1402
1403 /*
1404 * Make sure each predecessor loads the same value.
1405 */
1406 val = ep->pred->val[A_ATOM];
1407 for (ep = ep->next; ep != 0; ep = ep->next)
1408 if (val != ep->pred->val[A_ATOM])
1409 return;
1410
1411 if (JT(b->in_edges->pred) == b)
1412 diffp = &JT(b->in_edges->pred);
1413 else
1414 diffp = &JF(b->in_edges->pred);
1415
1416 at_top = 1;
1417 while (1) {
1418 if (*diffp == 0)
1419 return;
1420
1421 if (JF(*diffp) != JF(b))
1422 return;
1423
1424 if (!SET_MEMBER((*diffp)->dom, b->id))
1425 return;
1426
1427 if ((*diffp)->val[A_ATOM] != val)
1428 break;
1429
1430 diffp = &JT(*diffp);
1431 at_top = 0;
1432 }
1433 samep = &JT(*diffp);
1434 while (1) {
1435 if (*samep == 0)
1436 return;
1437
1438 if (JF(*samep) != JF(b))
1439 return;
1440
1441 if (!SET_MEMBER((*samep)->dom, b->id))
1442 return;
1443
1444 if ((*samep)->val[A_ATOM] == val)
1445 break;
1446
1447 /* XXX Need to check that there are no data dependencies
1448 between diffp and samep. Currently, the code generator
1449 will not produce such dependencies. */
1450 samep = &JT(*samep);
1451 }
1452 #ifdef notdef
1453 /* XXX This doesn't cover everything. */
1454 for (i = 0; i < N_ATOMS; ++i)
1455 if ((*samep)->val[i] != pred->val[i])
1456 return;
1457 #endif
1458 /* Pull up the node. */
1459 pull = *samep;
1460 *samep = JT(pull);
1461 JT(pull) = *diffp;
1462
1463 /*
1464 * At the top of the chain, each predecessor needs to point at the
1465 * pulled up node. Inside the chain, there is only one predecessor
1466 * to worry about.
1467 */
1468 if (at_top) {
1469 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1470 if (JT(ep->pred) == b)
1471 JT(ep->pred) = pull;
1472 else
1473 JF(ep->pred) = pull;
1474 }
1475 }
1476 else
1477 *diffp = pull;
1478
1479 done = 0;
1480 }
1481
1482 static void
1483 opt_blks(root, do_stmts)
1484 struct block *root;
1485 int do_stmts;
1486 {
1487 int i, maxlevel;
1488 struct block *p;
1489
1490 init_val();
1491 maxlevel = root->level;
1492
1493 find_inedges(root);
1494 for (i = maxlevel; i >= 0; --i)
1495 for (p = levels[i]; p; p = p->link)
1496 opt_blk(p, do_stmts);
1497
1498 if (do_stmts)
1499 /*
1500 * No point trying to move branches; it can't possibly
1501 * make a difference at this point.
1502 */
1503 return;
1504
1505 for (i = 1; i <= maxlevel; ++i) {
1506 for (p = levels[i]; p; p = p->link) {
1507 opt_j(&p->et);
1508 opt_j(&p->ef);
1509 }
1510 }
1511
1512 find_inedges(root);
1513 for (i = 1; i <= maxlevel; ++i) {
1514 for (p = levels[i]; p; p = p->link) {
1515 or_pullup(p);
1516 and_pullup(p);
1517 }
1518 }
1519 }
1520
1521 static inline void
1522 link_inedge(parent, child)
1523 struct edge *parent;
1524 struct block *child;
1525 {
1526 parent->next = child->in_edges;
1527 child->in_edges = parent;
1528 }
1529
1530 static void
1531 find_inedges(root)
1532 struct block *root;
1533 {
1534 int i;
1535 struct block *b;
1536
1537 for (i = 0; i < n_blocks; ++i)
1538 blocks[i]->in_edges = 0;
1539
1540 /*
1541 * Traverse the graph, adding each edge to the predecessor
1542 * list of its successors. Skip the leaves (i.e. level 0).
1543 */
1544 for (i = root->level; i > 0; --i) {
1545 for (b = levels[i]; b != 0; b = b->link) {
1546 link_inedge(&b->et, JT(b));
1547 link_inedge(&b->ef, JF(b));
1548 }
1549 }
1550 }
1551
1552 static void
1553 opt_root(b)
1554 struct block **b;
1555 {
1556 struct slist *tmp, *s;
1557
1558 s = (*b)->stmts;
1559 (*b)->stmts = 0;
1560 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1561 *b = JT(*b);
1562
1563 tmp = (*b)->stmts;
1564 if (tmp != 0)
1565 sappend(s, tmp);
1566 (*b)->stmts = s;
1567
1568 /*
1569 * If the root node is a return, then there is no
1570 * point executing any statements (since the bpf machine
1571 * has no side effects).
1572 */
1573 if (BPF_CLASS((*b)->s.code) == BPF_RET)
1574 (*b)->stmts = 0;
1575 }
1576
1577 static void
1578 opt_loop(root, do_stmts)
1579 struct block *root;
1580 int do_stmts;
1581 {
1582
1583 #ifdef BDEBUG
1584 if (dflag > 1) {
1585 printf("opt_loop(root, %d) begin\n", do_stmts);
1586 opt_dump(root);
1587 }
1588 #endif
1589 do {
1590 done = 1;
1591 find_levels(root);
1592 find_dom(root);
1593 find_closure(root);
1594 find_ud(root);
1595 find_edom(root);
1596 opt_blks(root, do_stmts);
1597 #ifdef BDEBUG
1598 if (dflag > 1) {
1599 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
1600 opt_dump(root);
1601 }
1602 #endif
1603 } while (!done);
1604 }
1605
1606 /*
1607 * Optimize the filter code in its dag representation.
1608 */
1609 void
1610 bpf_optimize(rootp)
1611 struct block **rootp;
1612 {
1613 struct block *root;
1614
1615 root = *rootp;
1616
1617 opt_init(root);
1618 opt_loop(root, 0);
1619 opt_loop(root, 1);
1620 intern_blocks(root);
1621 #ifdef BDEBUG
1622 if (dflag > 1) {
1623 printf("after intern_blocks()\n");
1624 opt_dump(root);
1625 }
1626 #endif
1627 opt_root(rootp);
1628 #ifdef BDEBUG
1629 if (dflag > 1) {
1630 printf("after opt_root()\n");
1631 opt_dump(root);
1632 }
1633 #endif
1634 opt_cleanup();
1635 }
1636
1637 static void
1638 make_marks(p)
1639 struct block *p;
1640 {
1641 if (!isMarked(p)) {
1642 Mark(p);
1643 if (BPF_CLASS(p->s.code) != BPF_RET) {
1644 make_marks(JT(p));
1645 make_marks(JF(p));
1646 }
1647 }
1648 }
1649
1650 /*
1651 * Mark code array such that isMarked(i) is true
1652 * only for nodes that are alive.
1653 */
1654 static void
1655 mark_code(p)
1656 struct block *p;
1657 {
1658 cur_mark += 1;
1659 make_marks(p);
1660 }
1661
1662 /*
1663 * True iff the two stmt lists load the same value from the packet into
1664 * the accumulator.
1665 */
1666 static int
1667 eq_slist(x, y)
1668 struct slist *x, *y;
1669 {
1670 while (1) {
1671 while (x && x->s.code == NOP)
1672 x = x->next;
1673 while (y && y->s.code == NOP)
1674 y = y->next;
1675 if (x == 0)
1676 return y == 0;
1677 if (y == 0)
1678 return x == 0;
1679 if (x->s.code != y->s.code || x->s.k != y->s.k)
1680 return 0;
1681 x = x->next;
1682 y = y->next;
1683 }
1684 }
1685
1686 static inline int
1687 eq_blk(b0, b1)
1688 struct block *b0, *b1;
1689 {
1690 if (b0->s.code == b1->s.code &&
1691 b0->s.k == b1->s.k &&
1692 b0->et.succ == b1->et.succ &&
1693 b0->ef.succ == b1->ef.succ)
1694 return eq_slist(b0->stmts, b1->stmts);
1695 return 0;
1696 }
1697
1698 static void
1699 intern_blocks(root)
1700 struct block *root;
1701 {
1702 struct block *p;
1703 int i, j;
1704 int done;
1705 top:
1706 done = 1;
1707 for (i = 0; i < n_blocks; ++i)
1708 blocks[i]->link = 0;
1709
1710 mark_code(root);
1711
1712 for (i = n_blocks - 1; --i >= 0; ) {
1713 if (!isMarked(blocks[i]))
1714 continue;
1715 for (j = i + 1; j < n_blocks; ++j) {
1716 if (!isMarked(blocks[j]))
1717 continue;
1718 if (eq_blk(blocks[i], blocks[j])) {
1719 blocks[i]->link = blocks[j]->link ?
1720 blocks[j]->link : blocks[j];
1721 break;
1722 }
1723 }
1724 }
1725 for (i = 0; i < n_blocks; ++i) {
1726 p = blocks[i];
1727 if (JT(p) == 0)
1728 continue;
1729 if (JT(p)->link) {
1730 done = 0;
1731 JT(p) = JT(p)->link;
1732 }
1733 if (JF(p)->link) {
1734 done = 0;
1735 JF(p) = JF(p)->link;
1736 }
1737 }
1738 if (!done)
1739 goto top;
1740 }
1741
1742 static void
1743 opt_cleanup()
1744 {
1745 free((void *)vnode_base);
1746 free((void *)vmap);
1747 free((void *)edges);
1748 free((void *)space);
1749 free((void *)levels);
1750 free((void *)blocks);
1751 }
1752
1753 /*
1754 * Return the number of stmts in 's'.
1755 */
1756 static int
1757 slength(s)
1758 struct slist *s;
1759 {
1760 int n = 0;
1761
1762 for (; s; s = s->next)
1763 if (s->s.code != NOP)
1764 ++n;
1765 return n;
1766 }
1767
1768 /*
1769 * Return the number of nodes reachable by 'p'.
1770 * All nodes should be initially unmarked.
1771 */
1772 static int
1773 count_blocks(p)
1774 struct block *p;
1775 {
1776 if (p == 0 || isMarked(p))
1777 return 0;
1778 Mark(p);
1779 return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1780 }
1781
1782 /*
1783 * Do a depth first search on the flow graph, numbering the
1784 * the basic blocks, and entering them into the 'blocks' array.`
1785 */
1786 static void
1787 number_blks_r(p)
1788 struct block *p;
1789 {
1790 int n;
1791
1792 if (p == 0 || isMarked(p))
1793 return;
1794
1795 Mark(p);
1796 n = n_blocks++;
1797 p->id = n;
1798 blocks[n] = p;
1799
1800 number_blks_r(JT(p));
1801 number_blks_r(JF(p));
1802 }
1803
1804 /*
1805 * Return the number of stmts in the flowgraph reachable by 'p'.
1806 * The nodes should be unmarked before calling.
1807 *
1808 * Note that "stmts" means "instructions", and that this includes
1809 *
1810 * side-effect statements in 'p' (slength(p->stmts));
1811 *
1812 * statements in the true branch from 'p' (count_stmts(JT(p)));
1813 *
1814 * statements in the false branch from 'p' (count_stmts(JF(p)));
1815 *
1816 * the conditional jump itself (1);
1817 *
1818 * an extra long jump if the true branch requires it (p->longjt);
1819 *
1820 * an extra long jump if the false branch requires it (p->longjf).
1821 */
1822 static int
1823 count_stmts(p)
1824 struct block *p;
1825 {
1826 int n;
1827
1828 if (p == 0 || isMarked(p))
1829 return 0;
1830 Mark(p);
1831 n = count_stmts(JT(p)) + count_stmts(JF(p));
1832 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1833 }
1834
1835 /*
1836 * Allocate memory. All allocation is done before optimization
1837 * is begun. A linear bound on the size of all data structures is computed
1838 * from the total number of blocks and/or statements.
1839 */
1840 static void
1841 opt_init(root)
1842 struct block *root;
1843 {
1844 bpf_u_int32 *p;
1845 int i, n, max_stmts;
1846
1847 /*
1848 * First, count the blocks, so we can malloc an array to map
1849 * block number to block. Then, put the blocks into the array.
1850 */
1851 unMarkAll();
1852 n = count_blocks(root);
1853 blocks = (struct block **)malloc(n * sizeof(*blocks));
1854 unMarkAll();
1855 n_blocks = 0;
1856 number_blks_r(root);
1857
1858 n_edges = 2 * n_blocks;
1859 edges = (struct edge **)malloc(n_edges * sizeof(*edges));
1860
1861 /*
1862 * The number of levels is bounded by the number of nodes.
1863 */
1864 levels = (struct block **)malloc(n_blocks * sizeof(*levels));
1865
1866 edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1867 nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1868
1869 /* XXX */
1870 space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1871 + n_edges * edgewords * sizeof(*space));
1872 p = space;
1873 all_dom_sets = p;
1874 for (i = 0; i < n; ++i) {
1875 blocks[i]->dom = p;
1876 p += nodewords;
1877 }
1878 all_closure_sets = p;
1879 for (i = 0; i < n; ++i) {
1880 blocks[i]->closure = p;
1881 p += nodewords;
1882 }
1883 all_edge_sets = p;
1884 for (i = 0; i < n; ++i) {
1885 register struct block *b = blocks[i];
1886
1887 b->et.edom = p;
1888 p += edgewords;
1889 b->ef.edom = p;
1890 p += edgewords;
1891 b->et.id = i;
1892 edges[i] = &b->et;
1893 b->ef.id = n_blocks + i;
1894 edges[n_blocks + i] = &b->ef;
1895 b->et.pred = b;
1896 b->ef.pred = b;
1897 }
1898 max_stmts = 0;
1899 for (i = 0; i < n; ++i)
1900 max_stmts += slength(blocks[i]->stmts) + 1;
1901 /*
1902 * We allocate at most 3 value numbers per statement,
1903 * so this is an upper bound on the number of valnodes
1904 * we'll need.
1905 */
1906 maxval = 3 * max_stmts;
1907 vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap));
1908 vnode_base = (struct valnode *)malloc(maxval * sizeof(*vnode_base));
1909 }
1910
1911 /*
1912 * Some pointers used to convert the basic block form of the code,
1913 * into the array form that BPF requires. 'fstart' will point to
1914 * the malloc'd array while 'ftail' is used during the recursive traversal.
1915 */
1916 static struct bpf_insn *fstart;
1917 static struct bpf_insn *ftail;
1918
1919 #ifdef BDEBUG
1920 int bids[1000];
1921 #endif
1922
1923 /*
1924 * Returns true if successful. Returns false if a branch has
1925 * an offset that is too large. If so, we have marked that
1926 * branch so that on a subsequent iteration, it will be treated
1927 * properly.
1928 */
1929 static int
1930 convert_code_r(p)
1931 struct block *p;
1932 {
1933 struct bpf_insn *dst;
1934 struct slist *src;
1935 int slen;
1936 u_int off;
1937 int extrajmps; /* number of extra jumps inserted */
1938 struct slist **offset = NULL;
1939
1940 if (p == 0 || isMarked(p))
1941 return (1);
1942 Mark(p);
1943
1944 if (convert_code_r(JF(p)) == 0)
1945 return (0);
1946 if (convert_code_r(JT(p)) == 0)
1947 return (0);
1948
1949 slen = slength(p->stmts);
1950 dst = ftail -= (slen + 1 + p->longjt + p->longjf);
1951 /* inflate length by any extra jumps */
1952
1953 p->offset = dst - fstart;
1954
1955 /* generate offset[] for convenience */
1956 if (slen) {
1957 offset = (struct slist **)calloc(sizeof(struct slist *), slen);
1958 if (!offset) {
1959 bpf_error("not enough core");
1960 /*NOTREACHED*/
1961 }
1962 }
1963 src = p->stmts;
1964 for (off = 0; off < slen && src; off++) {
1965 #if 0
1966 printf("off=%d src=%x\n", off, src);
1967 #endif
1968 offset[off] = src;
1969 src = src->next;
1970 }
1971
1972 off = 0;
1973 for (src = p->stmts; src; src = src->next) {
1974 if (src->s.code == NOP)
1975 continue;
1976 dst->code = (u_short)src->s.code;
1977 dst->k = src->s.k;
1978
1979 /* fill block-local relative jump */
1980 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
1981 #if 0
1982 if (src->s.jt || src->s.jf) {
1983 bpf_error("illegal jmp destination");
1984 /*NOTREACHED*/
1985 }
1986 #endif
1987 goto filled;
1988 }
1989 if (off == slen - 2) /*???*/
1990 goto filled;
1991
1992 {
1993 int i;
1994 int jt, jf;
1995 char *ljerr = "%s for block-local relative jump: off=%d";
1996
1997 #if 0
1998 printf("code=%x off=%d %x %x\n", src->s.code,
1999 off, src->s.jt, src->s.jf);
2000 #endif
2001
2002 if (!src->s.jt || !src->s.jf) {
2003 bpf_error(ljerr, "no jmp destination", off);
2004 /*NOTREACHED*/
2005 }
2006
2007 jt = jf = 0;
2008 for (i = 0; i < slen; i++) {
2009 if (offset[i] == src->s.jt) {
2010 if (jt) {
2011 bpf_error(ljerr, "multiple matches", off);
2012 /*NOTREACHED*/
2013 }
2014
2015 dst->jt = i - off - 1;
2016 jt++;
2017 }
2018 if (offset[i] == src->s.jf) {
2019 if (jf) {
2020 bpf_error(ljerr, "multiple matches", off);
2021 /*NOTREACHED*/
2022 }
2023 dst->jf = i - off - 1;
2024 jf++;
2025 }
2026 }
2027 if (!jt || !jf) {
2028 bpf_error(ljerr, "no destination found", off);
2029 /*NOTREACHED*/
2030 }
2031 }
2032 filled:
2033 ++dst;
2034 ++off;
2035 }
2036 if (offset)
2037 free(offset);
2038
2039 #ifdef BDEBUG
2040 bids[dst - fstart] = p->id + 1;
2041 #endif
2042 dst->code = (u_short)p->s.code;
2043 dst->k = p->s.k;
2044 if (JT(p)) {
2045 extrajmps = 0;
2046 off = JT(p)->offset - (p->offset + slen) - 1;
2047 if (off >= 256) {
2048 /* offset too large for branch, must add a jump */
2049 if (p->longjt == 0) {
2050 /* mark this instruction and retry */
2051 p->longjt++;
2052 return(0);
2053 }
2054 /* branch if T to following jump */
2055 dst->jt = extrajmps;
2056 extrajmps++;
2057 dst[extrajmps].code = BPF_JMP|BPF_JA;
2058 dst[extrajmps].k = off - extrajmps;
2059 }
2060 else
2061 dst->jt = off;
2062 off = JF(p)->offset - (p->offset + slen) - 1;
2063 if (off >= 256) {
2064 /* offset too large for branch, must add a jump */
2065 if (p->longjf == 0) {
2066 /* mark this instruction and retry */
2067 p->longjf++;
2068 return(0);
2069 }
2070 /* branch if F to following jump */
2071 /* if two jumps are inserted, F goes to second one */
2072 dst->jf = extrajmps;
2073 extrajmps++;
2074 dst[extrajmps].code = BPF_JMP|BPF_JA;
2075 dst[extrajmps].k = off - extrajmps;
2076 }
2077 else
2078 dst->jf = off;
2079 }
2080 return (1);
2081 }
2082
2083
2084 /*
2085 * Convert flowgraph intermediate representation to the
2086 * BPF array representation. Set *lenp to the number of instructions.
2087 */
2088 struct bpf_insn *
2089 icode_to_fcode(root, lenp)
2090 struct block *root;
2091 int *lenp;
2092 {
2093 int n;
2094 struct bpf_insn *fp;
2095
2096 /*
2097 * Loop doing convert_code_r() until no branches remain
2098 * with too-large offsets.
2099 */
2100 while (1) {
2101 unMarkAll();
2102 n = *lenp = count_stmts(root);
2103
2104 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2105 memset((char *)fp, 0, sizeof(*fp) * n);
2106 fstart = fp;
2107 ftail = fp + n;
2108
2109 unMarkAll();
2110 if (convert_code_r(root))
2111 break;
2112 free(fp);
2113 }
2114
2115 return fp;
2116 }
2117
2118 /*
2119 * Make a copy of a BPF program and put it in the "fcode" member of
2120 * a "pcap_t".
2121 *
2122 * If we fail to allocate memory for the copy, fill in the "errbuf"
2123 * member of the "pcap_t" with an error message, and return -1;
2124 * otherwise, return 0.
2125 */
2126 int
2127 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2128 {
2129 size_t prog_size;
2130
2131 /*
2132 * Free up any already installed program.
2133 */
2134 pcap_freecode(&p->fcode);
2135
2136 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2137 p->fcode.bf_len = fp->bf_len;
2138 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2139 if (p->fcode.bf_insns == NULL) {
2140 snprintf(p->errbuf, sizeof(p->errbuf),
2141 "malloc: %s", pcap_strerror(errno));
2142 return (-1);
2143 }
2144 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2145 return (0);
2146 }
2147
2148 #ifdef BDEBUG
2149 static void
2150 opt_dump(root)
2151 struct block *root;
2152 {
2153 struct bpf_program f;
2154
2155 memset(bids, 0, sizeof bids);
2156 f.bf_insns = icode_to_fcode(root, &f.bf_len);
2157 bpf_dump(&f, 1);
2158 putchar('\n');
2159 free((char *)f.bf_insns);
2160 }
2161 #endif