]> The Tcpdump Group git mirrors - libpcap/blob - optimize.c
use AC_DEFINE/3 to get defines into config.h.in
[libpcap] / optimize.c
1 /*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * Optimization module for tcpdump intermediate representation.
22 */
23 #ifndef lint
24 static const char rcsid[] =
25 "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.62 2000-04-27 09:11:12 itojun Exp $ (LBL)";
26 #endif
27
28 #include <sys/types.h>
29 #include <sys/time.h>
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <memory.h>
34
35 #include "pcap-int.h"
36
37 #include "gencode.h"
38
39 #include "gnuc.h"
40 #ifdef HAVE_OS_PROTO_H
41 #include "os-proto.h"
42 #endif
43
44 #ifdef BDEBUG
45 extern int dflag;
46 #endif
47
48 #define A_ATOM BPF_MEMWORDS
49 #define X_ATOM (BPF_MEMWORDS+1)
50
51 #define NOP -1
52
53 /*
54 * This define is used to represent *both* the accumulator and
55 * x register in use-def computations.
56 * Currently, the use-def code assumes only one definition per instruction.
57 */
58 #define AX_ATOM N_ATOMS
59
60 /*
61 * A flag to indicate that further optimization is needed.
62 * Iterative passes are continued until a given pass yields no
63 * branch movement.
64 */
65 static int done;
66
67 /*
68 * A block is marked if only if its mark equals the current mark.
69 * Rather than traverse the code array, marking each item, 'cur_mark' is
70 * incremented. This automatically makes each element unmarked.
71 */
72 static int cur_mark;
73 #define isMarked(p) ((p)->mark == cur_mark)
74 #define unMarkAll() cur_mark += 1
75 #define Mark(p) ((p)->mark = cur_mark)
76
77 static void opt_init(struct block *);
78 static void opt_cleanup(void);
79
80 static void make_marks(struct block *);
81 static void mark_code(struct block *);
82
83 static void intern_blocks(struct block *);
84
85 static int eq_slist(struct slist *, struct slist *);
86
87 static void find_levels_r(struct block *);
88
89 static void find_levels(struct block *);
90 static void find_dom(struct block *);
91 static void propedom(struct edge *);
92 static void find_edom(struct block *);
93 static void find_closure(struct block *);
94 static int atomuse(struct stmt *);
95 static int atomdef(struct stmt *);
96 static void compute_local_ud(struct block *);
97 static void find_ud(struct block *);
98 static void init_val(void);
99 static int F(int, int, int);
100 static inline void vstore(struct stmt *, int *, int, int);
101 static void opt_blk(struct block *, int);
102 static int use_conflict(struct block *, struct block *);
103 static void opt_j(struct edge *);
104 static void or_pullup(struct block *);
105 static void and_pullup(struct block *);
106 static void opt_blks(struct block *, int);
107 static inline void link_inedge(struct edge *, struct block *);
108 static void find_inedges(struct block *);
109 static void opt_root(struct block **);
110 static void opt_loop(struct block *, int);
111 static void fold_op(struct stmt *, int, int);
112 static inline struct slist *this_op(struct slist *);
113 static void opt_not(struct block *);
114 static void opt_peep(struct block *);
115 static void opt_stmt(struct stmt *, int[], int);
116 static void deadstmt(struct stmt *, struct stmt *[]);
117 static void opt_deadstores(struct block *);
118 static void opt_blk(struct block *, int);
119 static int use_conflict(struct block *, struct block *);
120 static void opt_j(struct edge *);
121 static struct block *fold_edge(struct block *, struct edge *);
122 static inline int eq_blk(struct block *, struct block *);
123 static int slength(struct slist *);
124 static int count_blocks(struct block *);
125 static void number_blks_r(struct block *);
126 static int count_stmts(struct block *);
127 static int convert_code_r(struct block *);
128 #ifdef BDEBUG
129 static void opt_dump(struct block *);
130 #endif
131
132 static int n_blocks;
133 struct block **blocks;
134 static int n_edges;
135 struct edge **edges;
136
137 /*
138 * A bit vector set representation of the dominators.
139 * We round up the set size to the next power of two.
140 */
141 static int nodewords;
142 static int edgewords;
143 struct block **levels;
144 bpf_u_int32 *space;
145 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
146 /*
147 * True if a is in uset {p}
148 */
149 #define SET_MEMBER(p, a) \
150 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
151
152 /*
153 * Add 'a' to uset p.
154 */
155 #define SET_INSERT(p, a) \
156 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
157
158 /*
159 * Delete 'a' from uset p.
160 */
161 #define SET_DELETE(p, a) \
162 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
163
164 /*
165 * a := a intersect b
166 */
167 #define SET_INTERSECT(a, b, n)\
168 {\
169 register bpf_u_int32 *_x = a, *_y = b;\
170 register int _n = n;\
171 while (--_n >= 0) *_x++ &= *_y++;\
172 }
173
174 /*
175 * a := a - b
176 */
177 #define SET_SUBTRACT(a, b, n)\
178 {\
179 register bpf_u_int32 *_x = a, *_y = b;\
180 register int _n = n;\
181 while (--_n >= 0) *_x++ &=~ *_y++;\
182 }
183
184 /*
185 * a := a union b
186 */
187 #define SET_UNION(a, b, n)\
188 {\
189 register bpf_u_int32 *_x = a, *_y = b;\
190 register int _n = n;\
191 while (--_n >= 0) *_x++ |= *_y++;\
192 }
193
194 static uset all_dom_sets;
195 static uset all_closure_sets;
196 static uset all_edge_sets;
197
198 #ifndef MAX
199 #define MAX(a,b) ((a)>(b)?(a):(b))
200 #endif
201
202 static void
203 find_levels_r(b)
204 struct block *b;
205 {
206 int level;
207
208 if (isMarked(b))
209 return;
210
211 Mark(b);
212 b->link = 0;
213
214 if (JT(b)) {
215 find_levels_r(JT(b));
216 find_levels_r(JF(b));
217 level = MAX(JT(b)->level, JF(b)->level) + 1;
218 } else
219 level = 0;
220 b->level = level;
221 b->link = levels[level];
222 levels[level] = b;
223 }
224
225 /*
226 * Level graph. The levels go from 0 at the leaves to
227 * N_LEVELS at the root. The levels[] array points to the
228 * first node of the level list, whose elements are linked
229 * with the 'link' field of the struct block.
230 */
231 static void
232 find_levels(root)
233 struct block *root;
234 {
235 memset((char *)levels, 0, n_blocks * sizeof(*levels));
236 unMarkAll();
237 find_levels_r(root);
238 }
239
240 /*
241 * Find dominator relationships.
242 * Assumes graph has been leveled.
243 */
244 static void
245 find_dom(root)
246 struct block *root;
247 {
248 int i;
249 struct block *b;
250 bpf_u_int32 *x;
251
252 /*
253 * Initialize sets to contain all nodes.
254 */
255 x = all_dom_sets;
256 i = n_blocks * nodewords;
257 while (--i >= 0)
258 *x++ = ~0;
259 /* Root starts off empty. */
260 for (i = nodewords; --i >= 0;)
261 root->dom[i] = 0;
262
263 /* root->level is the highest level no found. */
264 for (i = root->level; i >= 0; --i) {
265 for (b = levels[i]; b; b = b->link) {
266 SET_INSERT(b->dom, b->id);
267 if (JT(b) == 0)
268 continue;
269 SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
270 SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
271 }
272 }
273 }
274
275 static void
276 propedom(ep)
277 struct edge *ep;
278 {
279 SET_INSERT(ep->edom, ep->id);
280 if (ep->succ) {
281 SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
282 SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
283 }
284 }
285
286 /*
287 * Compute edge dominators.
288 * Assumes graph has been leveled and predecessors established.
289 */
290 static void
291 find_edom(root)
292 struct block *root;
293 {
294 int i;
295 uset x;
296 struct block *b;
297
298 x = all_edge_sets;
299 for (i = n_edges * edgewords; --i >= 0; )
300 x[i] = ~0;
301
302 /* root->level is the highest level no found. */
303 memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
304 memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
305 for (i = root->level; i >= 0; --i) {
306 for (b = levels[i]; b != 0; b = b->link) {
307 propedom(&b->et);
308 propedom(&b->ef);
309 }
310 }
311 }
312
313 /*
314 * Find the backwards transitive closure of the flow graph. These sets
315 * are backwards in the sense that we find the set of nodes that reach
316 * a given node, not the set of nodes that can be reached by a node.
317 *
318 * Assumes graph has been leveled.
319 */
320 static void
321 find_closure(root)
322 struct block *root;
323 {
324 int i;
325 struct block *b;
326
327 /*
328 * Initialize sets to contain no nodes.
329 */
330 memset((char *)all_closure_sets, 0,
331 n_blocks * nodewords * sizeof(*all_closure_sets));
332
333 /* root->level is the highest level no found. */
334 for (i = root->level; i >= 0; --i) {
335 for (b = levels[i]; b; b = b->link) {
336 SET_INSERT(b->closure, b->id);
337 if (JT(b) == 0)
338 continue;
339 SET_UNION(JT(b)->closure, b->closure, nodewords);
340 SET_UNION(JF(b)->closure, b->closure, nodewords);
341 }
342 }
343 }
344
345 /*
346 * Return the register number that is used by s. If A and X are both
347 * used, return AX_ATOM. If no register is used, return -1.
348 *
349 * The implementation should probably change to an array access.
350 */
351 static int
352 atomuse(s)
353 struct stmt *s;
354 {
355 register int c = s->code;
356
357 if (c == NOP)
358 return -1;
359
360 switch (BPF_CLASS(c)) {
361
362 case BPF_RET:
363 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
364 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
365
366 case BPF_LD:
367 case BPF_LDX:
368 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
369 (BPF_MODE(c) == BPF_MEM) ? s->k : -1;
370
371 case BPF_ST:
372 return A_ATOM;
373
374 case BPF_STX:
375 return X_ATOM;
376
377 case BPF_JMP:
378 case BPF_ALU:
379 if (BPF_SRC(c) == BPF_X)
380 return AX_ATOM;
381 return A_ATOM;
382
383 case BPF_MISC:
384 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
385 }
386 abort();
387 /* NOTREACHED */
388 }
389
390 /*
391 * Return the register number that is defined by 's'. We assume that
392 * a single stmt cannot define more than one register. If no register
393 * is defined, return -1.
394 *
395 * The implementation should probably change to an array access.
396 */
397 static int
398 atomdef(s)
399 struct stmt *s;
400 {
401 if (s->code == NOP)
402 return -1;
403
404 switch (BPF_CLASS(s->code)) {
405
406 case BPF_LD:
407 case BPF_ALU:
408 return A_ATOM;
409
410 case BPF_LDX:
411 return X_ATOM;
412
413 case BPF_ST:
414 case BPF_STX:
415 return s->k;
416
417 case BPF_MISC:
418 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
419 }
420 return -1;
421 }
422
423 static void
424 compute_local_ud(b)
425 struct block *b;
426 {
427 struct slist *s;
428 atomset def = 0, use = 0, kill = 0;
429 int atom;
430
431 for (s = b->stmts; s; s = s->next) {
432 if (s->s.code == NOP)
433 continue;
434 atom = atomuse(&s->s);
435 if (atom >= 0) {
436 if (atom == AX_ATOM) {
437 if (!ATOMELEM(def, X_ATOM))
438 use |= ATOMMASK(X_ATOM);
439 if (!ATOMELEM(def, A_ATOM))
440 use |= ATOMMASK(A_ATOM);
441 }
442 else if (atom < N_ATOMS) {
443 if (!ATOMELEM(def, atom))
444 use |= ATOMMASK(atom);
445 }
446 else
447 abort();
448 }
449 atom = atomdef(&s->s);
450 if (atom >= 0) {
451 if (!ATOMELEM(use, atom))
452 kill |= ATOMMASK(atom);
453 def |= ATOMMASK(atom);
454 }
455 }
456 if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP)
457 use |= ATOMMASK(A_ATOM);
458
459 b->def = def;
460 b->kill = kill;
461 b->in_use = use;
462 }
463
464 /*
465 * Assume graph is already leveled.
466 */
467 static void
468 find_ud(root)
469 struct block *root;
470 {
471 int i, maxlevel;
472 struct block *p;
473
474 /*
475 * root->level is the highest level no found;
476 * count down from there.
477 */
478 maxlevel = root->level;
479 for (i = maxlevel; i >= 0; --i)
480 for (p = levels[i]; p; p = p->link) {
481 compute_local_ud(p);
482 p->out_use = 0;
483 }
484
485 for (i = 1; i <= maxlevel; ++i) {
486 for (p = levels[i]; p; p = p->link) {
487 p->out_use |= JT(p)->in_use | JF(p)->in_use;
488 p->in_use |= p->out_use &~ p->kill;
489 }
490 }
491 }
492
493 /*
494 * These data structures are used in a Cocke and Shwarz style
495 * value numbering scheme. Since the flowgraph is acyclic,
496 * exit values can be propagated from a node's predecessors
497 * provided it is uniquely defined.
498 */
499 struct valnode {
500 int code;
501 int v0, v1;
502 int val;
503 struct valnode *next;
504 };
505
506 #define MODULUS 213
507 static struct valnode *hashtbl[MODULUS];
508 static int curval;
509 static int maxval;
510
511 /* Integer constants mapped with the load immediate opcode. */
512 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
513
514 struct vmapinfo {
515 int is_const;
516 bpf_int32 const_val;
517 };
518
519 struct vmapinfo *vmap;
520 struct valnode *vnode_base;
521 struct valnode *next_vnode;
522
523 static void
524 init_val()
525 {
526 curval = 0;
527 next_vnode = vnode_base;
528 memset((char *)vmap, 0, maxval * sizeof(*vmap));
529 memset((char *)hashtbl, 0, sizeof hashtbl);
530 }
531
532 /* Because we really don't have an IR, this stuff is a little messy. */
533 static int
534 F(code, v0, v1)
535 int code;
536 int v0, v1;
537 {
538 u_int hash;
539 int val;
540 struct valnode *p;
541
542 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
543 hash %= MODULUS;
544
545 for (p = hashtbl[hash]; p; p = p->next)
546 if (p->code == code && p->v0 == v0 && p->v1 == v1)
547 return p->val;
548
549 val = ++curval;
550 if (BPF_MODE(code) == BPF_IMM &&
551 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
552 vmap[val].const_val = v0;
553 vmap[val].is_const = 1;
554 }
555 p = next_vnode++;
556 p->val = val;
557 p->code = code;
558 p->v0 = v0;
559 p->v1 = v1;
560 p->next = hashtbl[hash];
561 hashtbl[hash] = p;
562
563 return val;
564 }
565
566 static inline void
567 vstore(s, valp, newval, alter)
568 struct stmt *s;
569 int *valp;
570 int newval;
571 int alter;
572 {
573 if (alter && *valp == newval)
574 s->code = NOP;
575 else
576 *valp = newval;
577 }
578
579 static void
580 fold_op(s, v0, v1)
581 struct stmt *s;
582 int v0, v1;
583 {
584 bpf_int32 a, b;
585
586 a = vmap[v0].const_val;
587 b = vmap[v1].const_val;
588
589 switch (BPF_OP(s->code)) {
590 case BPF_ADD:
591 a += b;
592 break;
593
594 case BPF_SUB:
595 a -= b;
596 break;
597
598 case BPF_MUL:
599 a *= b;
600 break;
601
602 case BPF_DIV:
603 if (b == 0)
604 bpf_error("division by zero");
605 a /= b;
606 break;
607
608 case BPF_AND:
609 a &= b;
610 break;
611
612 case BPF_OR:
613 a |= b;
614 break;
615
616 case BPF_LSH:
617 a <<= b;
618 break;
619
620 case BPF_RSH:
621 a >>= b;
622 break;
623
624 case BPF_NEG:
625 a = -a;
626 break;
627
628 default:
629 abort();
630 }
631 s->k = a;
632 s->code = BPF_LD|BPF_IMM;
633 done = 0;
634 }
635
636 static inline struct slist *
637 this_op(s)
638 struct slist *s;
639 {
640 while (s != 0 && s->s.code == NOP)
641 s = s->next;
642 return s;
643 }
644
645 static void
646 opt_not(b)
647 struct block *b;
648 {
649 struct block *tmp = JT(b);
650
651 JT(b) = JF(b);
652 JF(b) = tmp;
653 }
654
655 static void
656 opt_peep(b)
657 struct block *b;
658 {
659 struct slist *s;
660 struct slist *next, *last;
661 int val;
662
663 s = b->stmts;
664 if (s == 0)
665 return;
666
667 last = s;
668 while (1) {
669 s = this_op(s);
670 if (s == 0)
671 break;
672 next = this_op(s->next);
673 if (next == 0)
674 break;
675 last = next;
676
677 /*
678 * st M[k] --> st M[k]
679 * ldx M[k] tax
680 */
681 if (s->s.code == BPF_ST &&
682 next->s.code == (BPF_LDX|BPF_MEM) &&
683 s->s.k == next->s.k) {
684 done = 0;
685 next->s.code = BPF_MISC|BPF_TAX;
686 }
687 /*
688 * ld #k --> ldx #k
689 * tax txa
690 */
691 if (s->s.code == (BPF_LD|BPF_IMM) &&
692 next->s.code == (BPF_MISC|BPF_TAX)) {
693 s->s.code = BPF_LDX|BPF_IMM;
694 next->s.code = BPF_MISC|BPF_TXA;
695 done = 0;
696 }
697 /*
698 * This is an ugly special case, but it happens
699 * when you say tcp[k] or udp[k] where k is a constant.
700 */
701 if (s->s.code == (BPF_LD|BPF_IMM)) {
702 struct slist *add, *tax, *ild;
703
704 /*
705 * Check that X isn't used on exit from this
706 * block (which the optimizer might cause).
707 * We know the code generator won't generate
708 * any local dependencies.
709 */
710 if (ATOMELEM(b->out_use, X_ATOM))
711 break;
712
713 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
714 add = next;
715 else
716 add = this_op(next->next);
717 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
718 break;
719
720 tax = this_op(add->next);
721 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
722 break;
723
724 ild = this_op(tax->next);
725 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
726 BPF_MODE(ild->s.code) != BPF_IND)
727 break;
728 /*
729 * XXX We need to check that X is not
730 * subsequently used. We know we can eliminate the
731 * accumulator modifications since it is defined
732 * by the last stmt of this sequence.
733 *
734 * We want to turn this sequence:
735 *
736 * (004) ldi #0x2 {s}
737 * (005) ldxms [14] {next} -- optional
738 * (006) addx {add}
739 * (007) tax {tax}
740 * (008) ild [x+0] {ild}
741 *
742 * into this sequence:
743 *
744 * (004) nop
745 * (005) ldxms [14]
746 * (006) nop
747 * (007) nop
748 * (008) ild [x+2]
749 *
750 */
751 ild->s.k += s->s.k;
752 s->s.code = NOP;
753 add->s.code = NOP;
754 tax->s.code = NOP;
755 done = 0;
756 }
757 s = next;
758 }
759 /*
760 * If we have a subtract to do a comparison, and the X register
761 * is a known constant, we can merge this value into the
762 * comparison.
763 */
764 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
765 !ATOMELEM(b->out_use, A_ATOM)) {
766 val = b->val[X_ATOM];
767 if (vmap[val].is_const) {
768 int op;
769
770 b->s.k += vmap[val].const_val;
771 op = BPF_OP(b->s.code);
772 if (op == BPF_JGT || op == BPF_JGE) {
773 struct block *t = JT(b);
774 JT(b) = JF(b);
775 JF(b) = t;
776 b->s.k += 0x80000000;
777 }
778 last->s.code = NOP;
779 done = 0;
780 } else if (b->s.k == 0) {
781 /*
782 * sub x -> nop
783 * j #0 j x
784 */
785 last->s.code = NOP;
786 b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) |
787 BPF_X;
788 done = 0;
789 }
790 }
791 /*
792 * Likewise, a constant subtract can be simplified.
793 */
794 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
795 !ATOMELEM(b->out_use, A_ATOM)) {
796 int op;
797
798 b->s.k += last->s.k;
799 last->s.code = NOP;
800 op = BPF_OP(b->s.code);
801 if (op == BPF_JGT || op == BPF_JGE) {
802 struct block *t = JT(b);
803 JT(b) = JF(b);
804 JF(b) = t;
805 b->s.k += 0x80000000;
806 }
807 done = 0;
808 }
809 /*
810 * and #k nop
811 * jeq #0 -> jset #k
812 */
813 if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
814 !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {
815 b->s.k = last->s.k;
816 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
817 last->s.code = NOP;
818 done = 0;
819 opt_not(b);
820 }
821 /*
822 * If the accumulator is a known constant, we can compute the
823 * comparison result.
824 */
825 val = b->val[A_ATOM];
826 if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
827 bpf_int32 v = vmap[val].const_val;
828 switch (BPF_OP(b->s.code)) {
829
830 case BPF_JEQ:
831 v = v == b->s.k;
832 break;
833
834 case BPF_JGT:
835 v = (unsigned)v > b->s.k;
836 break;
837
838 case BPF_JGE:
839 v = (unsigned)v >= b->s.k;
840 break;
841
842 case BPF_JSET:
843 v &= b->s.k;
844 break;
845
846 default:
847 abort();
848 }
849 if (JF(b) != JT(b))
850 done = 0;
851 if (v)
852 JF(b) = JT(b);
853 else
854 JT(b) = JF(b);
855 }
856 }
857
858 /*
859 * Compute the symbolic value of expression of 's', and update
860 * anything it defines in the value table 'val'. If 'alter' is true,
861 * do various optimizations. This code would be cleaner if symbolic
862 * evaluation and code transformations weren't folded together.
863 */
864 static void
865 opt_stmt(s, val, alter)
866 struct stmt *s;
867 int val[];
868 int alter;
869 {
870 int op;
871 int v;
872
873 switch (s->code) {
874
875 case BPF_LD|BPF_ABS|BPF_W:
876 case BPF_LD|BPF_ABS|BPF_H:
877 case BPF_LD|BPF_ABS|BPF_B:
878 v = F(s->code, s->k, 0L);
879 vstore(s, &val[A_ATOM], v, alter);
880 break;
881
882 case BPF_LD|BPF_IND|BPF_W:
883 case BPF_LD|BPF_IND|BPF_H:
884 case BPF_LD|BPF_IND|BPF_B:
885 v = val[X_ATOM];
886 if (alter && vmap[v].is_const) {
887 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
888 s->k += vmap[v].const_val;
889 v = F(s->code, s->k, 0L);
890 done = 0;
891 }
892 else
893 v = F(s->code, s->k, v);
894 vstore(s, &val[A_ATOM], v, alter);
895 break;
896
897 case BPF_LD|BPF_LEN:
898 v = F(s->code, 0L, 0L);
899 vstore(s, &val[A_ATOM], v, alter);
900 break;
901
902 case BPF_LD|BPF_IMM:
903 v = K(s->k);
904 vstore(s, &val[A_ATOM], v, alter);
905 break;
906
907 case BPF_LDX|BPF_IMM:
908 v = K(s->k);
909 vstore(s, &val[X_ATOM], v, alter);
910 break;
911
912 case BPF_LDX|BPF_MSH|BPF_B:
913 v = F(s->code, s->k, 0L);
914 vstore(s, &val[X_ATOM], v, alter);
915 break;
916
917 case BPF_ALU|BPF_NEG:
918 if (alter && vmap[val[A_ATOM]].is_const) {
919 s->code = BPF_LD|BPF_IMM;
920 s->k = -vmap[val[A_ATOM]].const_val;
921 val[A_ATOM] = K(s->k);
922 }
923 else
924 val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
925 break;
926
927 case BPF_ALU|BPF_ADD|BPF_K:
928 case BPF_ALU|BPF_SUB|BPF_K:
929 case BPF_ALU|BPF_MUL|BPF_K:
930 case BPF_ALU|BPF_DIV|BPF_K:
931 case BPF_ALU|BPF_AND|BPF_K:
932 case BPF_ALU|BPF_OR|BPF_K:
933 case BPF_ALU|BPF_LSH|BPF_K:
934 case BPF_ALU|BPF_RSH|BPF_K:
935 op = BPF_OP(s->code);
936 if (alter) {
937 if (s->k == 0) {
938 if (op == BPF_ADD || op == BPF_SUB ||
939 op == BPF_LSH || op == BPF_RSH ||
940 op == BPF_OR) {
941 s->code = NOP;
942 break;
943 }
944 if (op == BPF_MUL || op == BPF_AND) {
945 s->code = BPF_LD|BPF_IMM;
946 val[A_ATOM] = K(s->k);
947 break;
948 }
949 }
950 if (vmap[val[A_ATOM]].is_const) {
951 fold_op(s, val[A_ATOM], K(s->k));
952 val[A_ATOM] = K(s->k);
953 break;
954 }
955 }
956 val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
957 break;
958
959 case BPF_ALU|BPF_ADD|BPF_X:
960 case BPF_ALU|BPF_SUB|BPF_X:
961 case BPF_ALU|BPF_MUL|BPF_X:
962 case BPF_ALU|BPF_DIV|BPF_X:
963 case BPF_ALU|BPF_AND|BPF_X:
964 case BPF_ALU|BPF_OR|BPF_X:
965 case BPF_ALU|BPF_LSH|BPF_X:
966 case BPF_ALU|BPF_RSH|BPF_X:
967 op = BPF_OP(s->code);
968 if (alter && vmap[val[X_ATOM]].is_const) {
969 if (vmap[val[A_ATOM]].is_const) {
970 fold_op(s, val[A_ATOM], val[X_ATOM]);
971 val[A_ATOM] = K(s->k);
972 }
973 else {
974 s->code = BPF_ALU|BPF_K|op;
975 s->k = vmap[val[X_ATOM]].const_val;
976 done = 0;
977 val[A_ATOM] =
978 F(s->code, val[A_ATOM], K(s->k));
979 }
980 break;
981 }
982 /*
983 * Check if we're doing something to an accumulator
984 * that is 0, and simplify. This may not seem like
985 * much of a simplification but it could open up further
986 * optimizations.
987 * XXX We could also check for mul by 1, and -1, etc.
988 */
989 if (alter && vmap[val[A_ATOM]].is_const
990 && vmap[val[A_ATOM]].const_val == 0) {
991 if (op == BPF_ADD || op == BPF_OR ||
992 op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {
993 s->code = BPF_MISC|BPF_TXA;
994 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
995 break;
996 }
997 else if (op == BPF_MUL || op == BPF_DIV ||
998 op == BPF_AND) {
999 s->code = BPF_LD|BPF_IMM;
1000 s->k = 0;
1001 vstore(s, &val[A_ATOM], K(s->k), alter);
1002 break;
1003 }
1004 else if (op == BPF_NEG) {
1005 s->code = NOP;
1006 break;
1007 }
1008 }
1009 val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1010 break;
1011
1012 case BPF_MISC|BPF_TXA:
1013 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1014 break;
1015
1016 case BPF_LD|BPF_MEM:
1017 v = val[s->k];
1018 if (alter && vmap[v].is_const) {
1019 s->code = BPF_LD|BPF_IMM;
1020 s->k = vmap[v].const_val;
1021 done = 0;
1022 }
1023 vstore(s, &val[A_ATOM], v, alter);
1024 break;
1025
1026 case BPF_MISC|BPF_TAX:
1027 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1028 break;
1029
1030 case BPF_LDX|BPF_MEM:
1031 v = val[s->k];
1032 if (alter && vmap[v].is_const) {
1033 s->code = BPF_LDX|BPF_IMM;
1034 s->k = vmap[v].const_val;
1035 done = 0;
1036 }
1037 vstore(s, &val[X_ATOM], v, alter);
1038 break;
1039
1040 case BPF_ST:
1041 vstore(s, &val[s->k], val[A_ATOM], alter);
1042 break;
1043
1044 case BPF_STX:
1045 vstore(s, &val[s->k], val[X_ATOM], alter);
1046 break;
1047 }
1048 }
1049
1050 static void
1051 deadstmt(s, last)
1052 register struct stmt *s;
1053 register struct stmt *last[];
1054 {
1055 register int atom;
1056
1057 atom = atomuse(s);
1058 if (atom >= 0) {
1059 if (atom == AX_ATOM) {
1060 last[X_ATOM] = 0;
1061 last[A_ATOM] = 0;
1062 }
1063 else
1064 last[atom] = 0;
1065 }
1066 atom = atomdef(s);
1067 if (atom >= 0) {
1068 if (last[atom]) {
1069 done = 0;
1070 last[atom]->code = NOP;
1071 }
1072 last[atom] = s;
1073 }
1074 }
1075
1076 static void
1077 opt_deadstores(b)
1078 register struct block *b;
1079 {
1080 register struct slist *s;
1081 register int atom;
1082 struct stmt *last[N_ATOMS];
1083
1084 memset((char *)last, 0, sizeof last);
1085
1086 for (s = b->stmts; s != 0; s = s->next)
1087 deadstmt(&s->s, last);
1088 deadstmt(&b->s, last);
1089
1090 for (atom = 0; atom < N_ATOMS; ++atom)
1091 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1092 last[atom]->code = NOP;
1093 done = 0;
1094 }
1095 }
1096
1097 static void
1098 opt_blk(b, do_stmts)
1099 struct block *b;
1100 int do_stmts;
1101 {
1102 struct slist *s;
1103 struct edge *p;
1104 int i;
1105 bpf_int32 aval;
1106
1107 #if 0
1108 for (s = b->stmts; s && s->next; s = s->next)
1109 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1110 do_stmts = 0;
1111 break;
1112 }
1113 #endif
1114
1115 /*
1116 * Initialize the atom values.
1117 * If we have no predecessors, everything is undefined.
1118 * Otherwise, we inherent our values from our predecessors.
1119 * If any register has an ambiguous value (i.e. control paths are
1120 * merging) give it the undefined value of 0.
1121 */
1122 p = b->in_edges;
1123 if (p == 0)
1124 memset((char *)b->val, 0, sizeof(b->val));
1125 else {
1126 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1127 while ((p = p->next) != NULL) {
1128 for (i = 0; i < N_ATOMS; ++i)
1129 if (b->val[i] != p->pred->val[i])
1130 b->val[i] = 0;
1131 }
1132 }
1133 aval = b->val[A_ATOM];
1134 for (s = b->stmts; s; s = s->next)
1135 opt_stmt(&s->s, b->val, do_stmts);
1136
1137 /*
1138 * This is a special case: if we don't use anything from this
1139 * block, and we load the accumulator with value that is
1140 * already there, or if this block is a return,
1141 * eliminate all the statements.
1142 */
1143 if (do_stmts &&
1144 ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||
1145 BPF_CLASS(b->s.code) == BPF_RET)) {
1146 if (b->stmts != 0) {
1147 b->stmts = 0;
1148 done = 0;
1149 }
1150 } else {
1151 opt_peep(b);
1152 opt_deadstores(b);
1153 }
1154 /*
1155 * Set up values for branch optimizer.
1156 */
1157 if (BPF_SRC(b->s.code) == BPF_K)
1158 b->oval = K(b->s.k);
1159 else
1160 b->oval = b->val[X_ATOM];
1161 b->et.code = b->s.code;
1162 b->ef.code = -b->s.code;
1163 }
1164
1165 /*
1166 * Return true if any register that is used on exit from 'succ', has
1167 * an exit value that is different from the corresponding exit value
1168 * from 'b'.
1169 */
1170 static int
1171 use_conflict(b, succ)
1172 struct block *b, *succ;
1173 {
1174 int atom;
1175 atomset use = succ->out_use;
1176
1177 if (use == 0)
1178 return 0;
1179
1180 for (atom = 0; atom < N_ATOMS; ++atom)
1181 if (ATOMELEM(use, atom))
1182 if (b->val[atom] != succ->val[atom])
1183 return 1;
1184 return 0;
1185 }
1186
1187 static struct block *
1188 fold_edge(child, ep)
1189 struct block *child;
1190 struct edge *ep;
1191 {
1192 int sense;
1193 int aval0, aval1, oval0, oval1;
1194 int code = ep->code;
1195
1196 if (code < 0) {
1197 code = -code;
1198 sense = 0;
1199 } else
1200 sense = 1;
1201
1202 if (child->s.code != code)
1203 return 0;
1204
1205 aval0 = child->val[A_ATOM];
1206 oval0 = child->oval;
1207 aval1 = ep->pred->val[A_ATOM];
1208 oval1 = ep->pred->oval;
1209
1210 if (aval0 != aval1)
1211 return 0;
1212
1213 if (oval0 == oval1)
1214 /*
1215 * The operands are identical, so the
1216 * result is true if a true branch was
1217 * taken to get here, otherwise false.
1218 */
1219 return sense ? JT(child) : JF(child);
1220
1221 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1222 /*
1223 * At this point, we only know the comparison if we
1224 * came down the true branch, and it was an equality
1225 * comparison with a constant. We rely on the fact that
1226 * distinct constants have distinct value numbers.
1227 */
1228 return JF(child);
1229
1230 return 0;
1231 }
1232
1233 static void
1234 opt_j(ep)
1235 struct edge *ep;
1236 {
1237 register int i, k;
1238 register struct block *target;
1239
1240 if (JT(ep->succ) == 0)
1241 return;
1242
1243 if (JT(ep->succ) == JF(ep->succ)) {
1244 /*
1245 * Common branch targets can be eliminated, provided
1246 * there is no data dependency.
1247 */
1248 if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1249 done = 0;
1250 ep->succ = JT(ep->succ);
1251 }
1252 }
1253 /*
1254 * For each edge dominator that matches the successor of this
1255 * edge, promote the edge successor to the its grandchild.
1256 *
1257 * XXX We violate the set abstraction here in favor a reasonably
1258 * efficient loop.
1259 */
1260 top:
1261 for (i = 0; i < edgewords; ++i) {
1262 register bpf_u_int32 x = ep->edom[i];
1263
1264 while (x != 0) {
1265 k = ffs(x) - 1;
1266 x &=~ (1 << k);
1267 k += i * BITS_PER_WORD;
1268
1269 target = fold_edge(ep->succ, edges[k]);
1270 /*
1271 * Check that there is no data dependency between
1272 * nodes that will be violated if we move the edge.
1273 */
1274 if (target != 0 && !use_conflict(ep->pred, target)) {
1275 done = 0;
1276 ep->succ = target;
1277 if (JT(target) != 0)
1278 /*
1279 * Start over unless we hit a leaf.
1280 */
1281 goto top;
1282 return;
1283 }
1284 }
1285 }
1286 }
1287
1288
1289 static void
1290 or_pullup(b)
1291 struct block *b;
1292 {
1293 int val, at_top;
1294 struct block *pull;
1295 struct block **diffp, **samep;
1296 struct edge *ep;
1297
1298 ep = b->in_edges;
1299 if (ep == 0)
1300 return;
1301
1302 /*
1303 * Make sure each predecessor loads the same value.
1304 * XXX why?
1305 */
1306 val = ep->pred->val[A_ATOM];
1307 for (ep = ep->next; ep != 0; ep = ep->next)
1308 if (val != ep->pred->val[A_ATOM])
1309 return;
1310
1311 if (JT(b->in_edges->pred) == b)
1312 diffp = &JT(b->in_edges->pred);
1313 else
1314 diffp = &JF(b->in_edges->pred);
1315
1316 at_top = 1;
1317 while (1) {
1318 if (*diffp == 0)
1319 return;
1320
1321 if (JT(*diffp) != JT(b))
1322 return;
1323
1324 if (!SET_MEMBER((*diffp)->dom, b->id))
1325 return;
1326
1327 if ((*diffp)->val[A_ATOM] != val)
1328 break;
1329
1330 diffp = &JF(*diffp);
1331 at_top = 0;
1332 }
1333 samep = &JF(*diffp);
1334 while (1) {
1335 if (*samep == 0)
1336 return;
1337
1338 if (JT(*samep) != JT(b))
1339 return;
1340
1341 if (!SET_MEMBER((*samep)->dom, b->id))
1342 return;
1343
1344 if ((*samep)->val[A_ATOM] == val)
1345 break;
1346
1347 /* XXX Need to check that there are no data dependencies
1348 between dp0 and dp1. Currently, the code generator
1349 will not produce such dependencies. */
1350 samep = &JF(*samep);
1351 }
1352 #ifdef notdef
1353 /* XXX This doesn't cover everything. */
1354 for (i = 0; i < N_ATOMS; ++i)
1355 if ((*samep)->val[i] != pred->val[i])
1356 return;
1357 #endif
1358 /* Pull up the node. */
1359 pull = *samep;
1360 *samep = JF(pull);
1361 JF(pull) = *diffp;
1362
1363 /*
1364 * At the top of the chain, each predecessor needs to point at the
1365 * pulled up node. Inside the chain, there is only one predecessor
1366 * to worry about.
1367 */
1368 if (at_top) {
1369 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1370 if (JT(ep->pred) == b)
1371 JT(ep->pred) = pull;
1372 else
1373 JF(ep->pred) = pull;
1374 }
1375 }
1376 else
1377 *diffp = pull;
1378
1379 done = 0;
1380 }
1381
1382 static void
1383 and_pullup(b)
1384 struct block *b;
1385 {
1386 int val, at_top;
1387 struct block *pull;
1388 struct block **diffp, **samep;
1389 struct edge *ep;
1390
1391 ep = b->in_edges;
1392 if (ep == 0)
1393 return;
1394
1395 /*
1396 * Make sure each predecessor loads the same value.
1397 */
1398 val = ep->pred->val[A_ATOM];
1399 for (ep = ep->next; ep != 0; ep = ep->next)
1400 if (val != ep->pred->val[A_ATOM])
1401 return;
1402
1403 if (JT(b->in_edges->pred) == b)
1404 diffp = &JT(b->in_edges->pred);
1405 else
1406 diffp = &JF(b->in_edges->pred);
1407
1408 at_top = 1;
1409 while (1) {
1410 if (*diffp == 0)
1411 return;
1412
1413 if (JF(*diffp) != JF(b))
1414 return;
1415
1416 if (!SET_MEMBER((*diffp)->dom, b->id))
1417 return;
1418
1419 if ((*diffp)->val[A_ATOM] != val)
1420 break;
1421
1422 diffp = &JT(*diffp);
1423 at_top = 0;
1424 }
1425 samep = &JT(*diffp);
1426 while (1) {
1427 if (*samep == 0)
1428 return;
1429
1430 if (JF(*samep) != JF(b))
1431 return;
1432
1433 if (!SET_MEMBER((*samep)->dom, b->id))
1434 return;
1435
1436 if ((*samep)->val[A_ATOM] == val)
1437 break;
1438
1439 /* XXX Need to check that there are no data dependencies
1440 between diffp and samep. Currently, the code generator
1441 will not produce such dependencies. */
1442 samep = &JT(*samep);
1443 }
1444 #ifdef notdef
1445 /* XXX This doesn't cover everything. */
1446 for (i = 0; i < N_ATOMS; ++i)
1447 if ((*samep)->val[i] != pred->val[i])
1448 return;
1449 #endif
1450 /* Pull up the node. */
1451 pull = *samep;
1452 *samep = JT(pull);
1453 JT(pull) = *diffp;
1454
1455 /*
1456 * At the top of the chain, each predecessor needs to point at the
1457 * pulled up node. Inside the chain, there is only one predecessor
1458 * to worry about.
1459 */
1460 if (at_top) {
1461 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1462 if (JT(ep->pred) == b)
1463 JT(ep->pred) = pull;
1464 else
1465 JF(ep->pred) = pull;
1466 }
1467 }
1468 else
1469 *diffp = pull;
1470
1471 done = 0;
1472 }
1473
1474 static void
1475 opt_blks(root, do_stmts)
1476 struct block *root;
1477 int do_stmts;
1478 {
1479 int i, maxlevel;
1480 struct block *p;
1481
1482 init_val();
1483 maxlevel = root->level;
1484 for (i = maxlevel; i >= 0; --i)
1485 for (p = levels[i]; p; p = p->link)
1486 opt_blk(p, do_stmts);
1487
1488 if (do_stmts)
1489 /*
1490 * No point trying to move branches; it can't possibly
1491 * make a difference at this point.
1492 */
1493 return;
1494
1495 for (i = 1; i <= maxlevel; ++i) {
1496 for (p = levels[i]; p; p = p->link) {
1497 opt_j(&p->et);
1498 opt_j(&p->ef);
1499 }
1500 }
1501 for (i = 1; i <= maxlevel; ++i) {
1502 for (p = levels[i]; p; p = p->link) {
1503 or_pullup(p);
1504 and_pullup(p);
1505 }
1506 }
1507 }
1508
1509 static inline void
1510 link_inedge(parent, child)
1511 struct edge *parent;
1512 struct block *child;
1513 {
1514 parent->next = child->in_edges;
1515 child->in_edges = parent;
1516 }
1517
1518 static void
1519 find_inedges(root)
1520 struct block *root;
1521 {
1522 int i;
1523 struct block *b;
1524
1525 for (i = 0; i < n_blocks; ++i)
1526 blocks[i]->in_edges = 0;
1527
1528 /*
1529 * Traverse the graph, adding each edge to the predecessor
1530 * list of its successors. Skip the leaves (i.e. level 0).
1531 */
1532 for (i = root->level; i > 0; --i) {
1533 for (b = levels[i]; b != 0; b = b->link) {
1534 link_inedge(&b->et, JT(b));
1535 link_inedge(&b->ef, JF(b));
1536 }
1537 }
1538 }
1539
1540 static void
1541 opt_root(b)
1542 struct block **b;
1543 {
1544 struct slist *tmp, *s;
1545
1546 s = (*b)->stmts;
1547 (*b)->stmts = 0;
1548 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1549 *b = JT(*b);
1550
1551 tmp = (*b)->stmts;
1552 if (tmp != 0)
1553 sappend(s, tmp);
1554 (*b)->stmts = s;
1555
1556 /*
1557 * If the root node is a return, then there is no
1558 * point executing any statements (since the bpf machine
1559 * has no side effects).
1560 */
1561 if (BPF_CLASS((*b)->s.code) == BPF_RET)
1562 (*b)->stmts = 0;
1563 }
1564
1565 static void
1566 opt_loop(root, do_stmts)
1567 struct block *root;
1568 int do_stmts;
1569 {
1570
1571 #ifdef BDEBUG
1572 if (dflag > 1)
1573 opt_dump(root);
1574 #endif
1575 do {
1576 done = 1;
1577 find_levels(root);
1578 find_dom(root);
1579 find_closure(root);
1580 find_inedges(root);
1581 find_ud(root);
1582 find_edom(root);
1583 opt_blks(root, do_stmts);
1584 #ifdef BDEBUG
1585 if (dflag > 1)
1586 opt_dump(root);
1587 #endif
1588 } while (!done);
1589 }
1590
1591 /*
1592 * Optimize the filter code in its dag representation.
1593 */
1594 void
1595 bpf_optimize(rootp)
1596 struct block **rootp;
1597 {
1598 struct block *root;
1599
1600 root = *rootp;
1601
1602 opt_init(root);
1603 opt_loop(root, 0);
1604 opt_loop(root, 1);
1605 intern_blocks(root);
1606 opt_root(rootp);
1607 opt_cleanup();
1608 }
1609
1610 static void
1611 make_marks(p)
1612 struct block *p;
1613 {
1614 if (!isMarked(p)) {
1615 Mark(p);
1616 if (BPF_CLASS(p->s.code) != BPF_RET) {
1617 make_marks(JT(p));
1618 make_marks(JF(p));
1619 }
1620 }
1621 }
1622
1623 /*
1624 * Mark code array such that isMarked(i) is true
1625 * only for nodes that are alive.
1626 */
1627 static void
1628 mark_code(p)
1629 struct block *p;
1630 {
1631 cur_mark += 1;
1632 make_marks(p);
1633 }
1634
1635 /*
1636 * True iff the two stmt lists load the same value from the packet into
1637 * the accumulator.
1638 */
1639 static int
1640 eq_slist(x, y)
1641 struct slist *x, *y;
1642 {
1643 while (1) {
1644 while (x && x->s.code == NOP)
1645 x = x->next;
1646 while (y && y->s.code == NOP)
1647 y = y->next;
1648 if (x == 0)
1649 return y == 0;
1650 if (y == 0)
1651 return x == 0;
1652 if (x->s.code != y->s.code || x->s.k != y->s.k)
1653 return 0;
1654 x = x->next;
1655 y = y->next;
1656 }
1657 }
1658
1659 static inline int
1660 eq_blk(b0, b1)
1661 struct block *b0, *b1;
1662 {
1663 if (b0->s.code == b1->s.code &&
1664 b0->s.k == b1->s.k &&
1665 b0->et.succ == b1->et.succ &&
1666 b0->ef.succ == b1->ef.succ)
1667 return eq_slist(b0->stmts, b1->stmts);
1668 return 0;
1669 }
1670
1671 static void
1672 intern_blocks(root)
1673 struct block *root;
1674 {
1675 struct block *p;
1676 int i, j;
1677 int done;
1678 top:
1679 done = 1;
1680 for (i = 0; i < n_blocks; ++i)
1681 blocks[i]->link = 0;
1682
1683 mark_code(root);
1684
1685 for (i = n_blocks - 1; --i >= 0; ) {
1686 if (!isMarked(blocks[i]))
1687 continue;
1688 for (j = i + 1; j < n_blocks; ++j) {
1689 if (!isMarked(blocks[j]))
1690 continue;
1691 if (eq_blk(blocks[i], blocks[j])) {
1692 blocks[i]->link = blocks[j]->link ?
1693 blocks[j]->link : blocks[j];
1694 break;
1695 }
1696 }
1697 }
1698 for (i = 0; i < n_blocks; ++i) {
1699 p = blocks[i];
1700 if (JT(p) == 0)
1701 continue;
1702 if (JT(p)->link) {
1703 done = 0;
1704 JT(p) = JT(p)->link;
1705 }
1706 if (JF(p)->link) {
1707 done = 0;
1708 JF(p) = JF(p)->link;
1709 }
1710 }
1711 if (!done)
1712 goto top;
1713 }
1714
1715 static void
1716 opt_cleanup()
1717 {
1718 free((void *)vnode_base);
1719 free((void *)vmap);
1720 free((void *)edges);
1721 free((void *)space);
1722 free((void *)levels);
1723 free((void *)blocks);
1724 }
1725
1726 /*
1727 * Return the number of stmts in 's'.
1728 */
1729 static int
1730 slength(s)
1731 struct slist *s;
1732 {
1733 int n = 0;
1734
1735 for (; s; s = s->next)
1736 if (s->s.code != NOP)
1737 ++n;
1738 return n;
1739 }
1740
1741 /*
1742 * Return the number of nodes reachable by 'p'.
1743 * All nodes should be initially unmarked.
1744 */
1745 static int
1746 count_blocks(p)
1747 struct block *p;
1748 {
1749 if (p == 0 || isMarked(p))
1750 return 0;
1751 Mark(p);
1752 return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1753 }
1754
1755 /*
1756 * Do a depth first search on the flow graph, numbering the
1757 * the basic blocks, and entering them into the 'blocks' array.`
1758 */
1759 static void
1760 number_blks_r(p)
1761 struct block *p;
1762 {
1763 int n;
1764
1765 if (p == 0 || isMarked(p))
1766 return;
1767
1768 Mark(p);
1769 n = n_blocks++;
1770 p->id = n;
1771 blocks[n] = p;
1772
1773 number_blks_r(JT(p));
1774 number_blks_r(JF(p));
1775 }
1776
1777 /*
1778 * Return the number of stmts in the flowgraph reachable by 'p'.
1779 * The nodes should be unmarked before calling.
1780 */
1781 static int
1782 count_stmts(p)
1783 struct block *p;
1784 {
1785 int n;
1786
1787 if (p == 0 || isMarked(p))
1788 return 0;
1789 Mark(p);
1790 n = count_stmts(JT(p)) + count_stmts(JF(p));
1791 return slength(p->stmts) + n + 1;
1792 }
1793
1794 /*
1795 * Allocate memory. All allocation is done before optimization
1796 * is begun. A linear bound on the size of all data structures is computed
1797 * from the total number of blocks and/or statements.
1798 */
1799 static void
1800 opt_init(root)
1801 struct block *root;
1802 {
1803 bpf_u_int32 *p;
1804 int i, n, max_stmts;
1805
1806 /*
1807 * First, count the blocks, so we can malloc an array to map
1808 * block number to block. Then, put the blocks into the array.
1809 */
1810 unMarkAll();
1811 n = count_blocks(root);
1812 blocks = (struct block **)malloc(n * sizeof(*blocks));
1813 unMarkAll();
1814 n_blocks = 0;
1815 number_blks_r(root);
1816
1817 n_edges = 2 * n_blocks;
1818 edges = (struct edge **)malloc(n_edges * sizeof(*edges));
1819
1820 /*
1821 * The number of levels is bounded by the number of nodes.
1822 */
1823 levels = (struct block **)malloc(n_blocks * sizeof(*levels));
1824
1825 edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1826 nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1827
1828 /* XXX */
1829 space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1830 + n_edges * edgewords * sizeof(*space));
1831 p = space;
1832 all_dom_sets = p;
1833 for (i = 0; i < n; ++i) {
1834 blocks[i]->dom = p;
1835 p += nodewords;
1836 }
1837 all_closure_sets = p;
1838 for (i = 0; i < n; ++i) {
1839 blocks[i]->closure = p;
1840 p += nodewords;
1841 }
1842 all_edge_sets = p;
1843 for (i = 0; i < n; ++i) {
1844 register struct block *b = blocks[i];
1845
1846 b->et.edom = p;
1847 p += edgewords;
1848 b->ef.edom = p;
1849 p += edgewords;
1850 b->et.id = i;
1851 edges[i] = &b->et;
1852 b->ef.id = n_blocks + i;
1853 edges[n_blocks + i] = &b->ef;
1854 b->et.pred = b;
1855 b->ef.pred = b;
1856 }
1857 max_stmts = 0;
1858 for (i = 0; i < n; ++i)
1859 max_stmts += slength(blocks[i]->stmts) + 1;
1860 /*
1861 * We allocate at most 3 value numbers per statement,
1862 * so this is an upper bound on the number of valnodes
1863 * we'll need.
1864 */
1865 maxval = 3 * max_stmts;
1866 vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap));
1867 vnode_base = (struct valnode *)malloc(maxval * sizeof(*vmap));
1868 }
1869
1870 /*
1871 * Some pointers used to convert the basic block form of the code,
1872 * into the array form that BPF requires. 'fstart' will point to
1873 * the malloc'd array while 'ftail' is used during the recursive traversal.
1874 */
1875 static struct bpf_insn *fstart;
1876 static struct bpf_insn *ftail;
1877
1878 #ifdef BDEBUG
1879 int bids[1000];
1880 #endif
1881
1882 /*
1883 * Returns true if successful. Returns false if a branch has
1884 * an offset that is too large. If so, we have marked that
1885 * branch so that on a subsequent iteration, it will be treated
1886 * properly.
1887 */
1888 static int
1889 convert_code_r(p)
1890 struct block *p;
1891 {
1892 struct bpf_insn *dst;
1893 struct slist *src;
1894 int slen;
1895 u_int off;
1896 int extrajmps; /* number of extra jumps inserted */
1897 struct slist **offset = NULL;
1898
1899 if (p == 0 || isMarked(p))
1900 return (1);
1901 Mark(p);
1902
1903 if (convert_code_r(JF(p)) == 0)
1904 return (0);
1905 if (convert_code_r(JT(p)) == 0)
1906 return (0);
1907
1908 slen = slength(p->stmts);
1909 dst = ftail -= (slen + 1 + p->longjt + p->longjf);
1910 /* inflate length by any extra jumps */
1911
1912 p->offset = dst - fstart;
1913
1914 /* generate offset[] for convenience */
1915 if (slen) {
1916 offset = (struct slist **)calloc(sizeof(struct slist *), slen);
1917 if (!offset) {
1918 bpf_error("not enough core");
1919 /*NOTREACHED*/
1920 }
1921 }
1922 src = p->stmts;
1923 for (off = 0; off < slen && src; off++) {
1924 #if 0
1925 printf("off=%d src=%x\n", off, src);
1926 #endif
1927 offset[off] = src;
1928 src = src->next;
1929 }
1930
1931 off = 0;
1932 for (src = p->stmts; src; src = src->next) {
1933 if (src->s.code == NOP)
1934 continue;
1935 dst->code = (u_short)src->s.code;
1936 dst->k = src->s.k;
1937
1938 /* fill block-local relative jump */
1939 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
1940 #if 0
1941 if (src->s.jt || src->s.jf) {
1942 bpf_error("illegal jmp destination");
1943 /*NOTREACHED*/
1944 }
1945 #endif
1946 goto filled;
1947 }
1948 if (off == slen - 2) /*???*/
1949 goto filled;
1950
1951 {
1952 int i;
1953 int jt, jf;
1954 char *ljerr = "%s for block-local relative jump: off=%d";
1955
1956 #if 0
1957 printf("code=%x off=%d %x %x\n", src->s.code,
1958 off, src->s.jt, src->s.jf);
1959 #endif
1960
1961 if (!src->s.jt || !src->s.jf) {
1962 bpf_error(ljerr, "no jmp destination", off);
1963 /*NOTREACHED*/
1964 }
1965
1966 jt = jf = 0;
1967 for (i = 0; i < slen; i++) {
1968 if (offset[i] == src->s.jt) {
1969 if (jt) {
1970 bpf_error(ljerr, "multiple matches", off);
1971 /*NOTREACHED*/
1972 }
1973
1974 dst->jt = i - off - 1;
1975 jt++;
1976 }
1977 if (offset[i] == src->s.jf) {
1978 if (jf) {
1979 bpf_error(ljerr, "multiple matches", off);
1980 /*NOTREACHED*/
1981 }
1982 dst->jf = i - off - 1;
1983 jf++;
1984 }
1985 }
1986 if (!jt || !jf) {
1987 bpf_error(ljerr, "no destination found", off);
1988 /*NOTREACHED*/
1989 }
1990 }
1991 filled:
1992 ++dst;
1993 ++off;
1994 }
1995 if (offset)
1996 free(offset);
1997
1998 #ifdef BDEBUG
1999 bids[dst - fstart] = p->id + 1;
2000 #endif
2001 dst->code = (u_short)p->s.code;
2002 dst->k = p->s.k;
2003 if (JT(p)) {
2004 extrajmps = 0;
2005 off = JT(p)->offset - (p->offset + slen) - 1;
2006 if (off >= 256) {
2007 /* offset too large for branch, must add a jump */
2008 if (p->longjt == 0) {
2009 /* mark this instruction and retry */
2010 p->longjt++;
2011 return(0);
2012 }
2013 /* branch if T to following jump */
2014 dst->jt = extrajmps;
2015 extrajmps++;
2016 dst[extrajmps].code = BPF_JMP|BPF_JA;
2017 dst[extrajmps].k = off - extrajmps;
2018 }
2019 else
2020 dst->jt = off;
2021 off = JF(p)->offset - (p->offset + slen) - 1;
2022 if (off >= 256) {
2023 /* offset too large for branch, must add a jump */
2024 if (p->longjf == 0) {
2025 /* mark this instruction and retry */
2026 p->longjf++;
2027 return(0);
2028 }
2029 /* branch if F to following jump */
2030 /* if two jumps are inserted, F goes to second one */
2031 dst->jf = extrajmps;
2032 extrajmps++;
2033 dst[extrajmps].code = BPF_JMP|BPF_JA;
2034 dst[extrajmps].k = off - extrajmps;
2035 }
2036 else
2037 dst->jf = off;
2038 }
2039 return (1);
2040 }
2041
2042
2043 /*
2044 * Convert flowgraph intermediate representation to the
2045 * BPF array representation. Set *lenp to the number of instructions.
2046 */
2047 struct bpf_insn *
2048 icode_to_fcode(root, lenp)
2049 struct block *root;
2050 int *lenp;
2051 {
2052 int n;
2053 struct bpf_insn *fp;
2054
2055 /*
2056 * Loop doing convert_codr_r() until no branches remain
2057 * with too-large offsets.
2058 */
2059 while (1) {
2060 unMarkAll();
2061 n = *lenp = count_stmts(root);
2062
2063 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2064 memset((char *)fp, 0, sizeof(*fp) * n);
2065 fstart = fp;
2066 ftail = fp + n;
2067
2068 unMarkAll();
2069 if (convert_code_r(root))
2070 break;
2071 free(fp);
2072 }
2073
2074 return fp;
2075 }
2076
2077 #ifdef BDEBUG
2078 static void
2079 opt_dump(root)
2080 struct block *root;
2081 {
2082 struct bpf_program f;
2083
2084 memset(bids, 0, sizeof bids);
2085 f.bf_insns = icode_to_fcode(root, &f.bf_len);
2086 bpf_dump(&f, 1);
2087 putchar('\n');
2088 free((char *)f.bf_insns);
2089 }
2090 #endif