};
typedef struct {
+ /*
+ * Place to longjmp to on an error.
+ */
+ jmp_buf top_ctx;
+
+ /*
+ * The buffer into which to put error message.
+ */
+ char *errbuf;
+
/*
* A flag to indicate that further optimization is needed.
* Iterative passes are continued until a given pass yields no
} opt_state_t;
typedef struct {
+ /*
+ * Place to longjmp to on an error.
+ */
+ jmp_buf top_ctx;
+
+ /*
+ * The buffer into which to put error message.
+ */
+ char *errbuf;
+
/*
* Some pointers used to convert the basic block form of the code,
* into the array form that BPF requires. 'fstart' will point to
struct bpf_insn *ftail;
} conv_state_t;
-static void opt_init(compiler_state_t *, opt_state_t *, struct icode *);
+static void opt_init(opt_state_t *, struct icode *);
static void opt_cleanup(opt_state_t *);
-static void PCAP_NORETURN opt_error(compiler_state_t *, opt_state_t *, const char *, ...)
- PCAP_PRINTFLIKE(3, 4);
+static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
+ PCAP_PRINTFLIKE(2, 3);
static void intern_blocks(opt_state_t *, struct icode *);
* (Unary operators are handled elsewhere.)
*/
static void
-fold_op(compiler_state_t *cstate, opt_state_t *opt_state,
- struct stmt *s, int v0, int v1)
+fold_op(opt_state_t *opt_state, struct stmt *s, int v0, int v1)
{
bpf_u_int32 a, b;
case BPF_DIV:
if (b == 0)
- opt_error(cstate, opt_state, "division by zero");
+ opt_error(opt_state, "division by zero");
a /= b;
break;
case BPF_MOD:
if (b == 0)
- opt_error(cstate, opt_state, "modulus by zero");
+ opt_error(opt_state, "modulus by zero");
a %= b;
break;
* evaluation and code transformations weren't folded together.
*/
static void
-opt_stmt(compiler_state_t *cstate, opt_state_t *opt_state,
- struct stmt *s, int val[], int alter)
+opt_stmt(opt_state_t *opt_state, struct stmt *s, int val[], int alter)
{
int op;
int v;
break;
}
if (op == BPF_DIV)
- opt_error(cstate, opt_state,
+ opt_error(opt_state,
"division by zero");
if (op == BPF_MOD)
- opt_error(cstate, opt_state,
+ opt_error(opt_state,
"modulus by zero");
}
if (opt_state->vmap[val[A_ATOM]].is_const) {
- fold_op(cstate, opt_state, s, val[A_ATOM], K(s->k));
+ fold_op(opt_state, s, val[A_ATOM], K(s->k));
val[A_ATOM] = K(s->k);
break;
}
op = BPF_OP(s->code);
if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
if (opt_state->vmap[val[A_ATOM]].is_const) {
- fold_op(cstate, opt_state, s, val[A_ATOM], val[X_ATOM]);
+ fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
val[A_ATOM] = K(s->k);
}
else {
*/
if ((op == BPF_LSH || op == BPF_RSH) &&
(s->k < 0 || s->k > 31))
- opt_error(cstate, opt_state,
+ opt_error(opt_state,
"shift by more than 31 bits");
opt_state->done = 0;
val[A_ATOM] =
}
static void
-opt_blk(compiler_state_t *cstate, opt_state_t *opt_state,
- struct block *b, int do_stmts)
+opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
{
struct slist *s;
struct edge *p;
aval = b->val[A_ATOM];
xval = b->val[X_ATOM];
for (s = b->stmts; s; s = s->next)
- opt_stmt(cstate, opt_state, &s->s, b->val, do_stmts);
+ opt_stmt(opt_state, &s->s, b->val, do_stmts);
/*
* This is a special case: if we don't use anything from this
}
static void
-opt_blks(compiler_state_t *cstate, opt_state_t *opt_state, struct icode *ic,
- int do_stmts)
+opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
{
int i, maxlevel;
struct block *p;
find_inedges(opt_state, ic->root);
for (i = maxlevel; i >= 0; --i)
for (p = opt_state->levels[i]; p; p = p->link)
- opt_blk(cstate, opt_state, p, do_stmts);
+ opt_blk(opt_state, p, do_stmts);
if (do_stmts)
/*
}
static void
-opt_loop(compiler_state_t *cstate, opt_state_t *opt_state, struct icode *ic,
- int do_stmts)
+opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
{
#ifdef BDEBUG
find_closure(opt_state, ic->root);
find_ud(opt_state, ic->root);
find_edom(opt_state, ic->root);
- opt_blks(cstate, opt_state, ic, do_stmts);
+ opt_blks(opt_state, ic, do_stmts);
#ifdef BDEBUG
if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
/*
* Optimize the filter code in its dag representation.
+ * Return 0 on success, -1 on error.
*/
-void
-bpf_optimize(compiler_state_t *cstate, struct icode *ic)
+int
+bpf_optimize(struct icode *ic, char *errbuf)
{
opt_state_t opt_state;
- opt_init(cstate, &opt_state, ic);
- opt_loop(cstate, &opt_state, ic, 0);
- opt_loop(cstate, &opt_state, ic, 1);
+ memset(&opt_state, 0, sizeof(opt_state));
+ opt_state.errbuf = errbuf;
+ if (setjmp(opt_state.top_ctx)) {
+ opt_cleanup(&opt_state);
+ return -1;
+ }
+ opt_init(&opt_state, ic);
+ opt_loop(&opt_state, ic, 0);
+ opt_loop(&opt_state, ic, 1);
intern_blocks(&opt_state, ic);
#ifdef BDEBUG
if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
}
#endif
opt_cleanup(&opt_state);
+ return 0;
}
static void
}
/*
- * Like bpf_error(), but also cleans up the optimizer state.
+ * For optimizer errors.
*/
static void PCAP_NORETURN
-opt_error(compiler_state_t *cstate, opt_state_t *opt_state, const char *fmt, ...)
+opt_error(opt_state_t *opt_state, const char *fmt, ...)
{
va_list ap;
- opt_cleanup(opt_state);
- va_start(ap, fmt);
- bpf_vset_error(cstate, fmt, ap);
- va_end(ap);
- bpf_abort_compilation(cstate);
+ if (opt_state->errbuf != NULL) {
+ va_start(ap, fmt);
+ (void)pcap_vsnprintf(opt_state->errbuf,
+ PCAP_ERRBUF_SIZE, fmt, ap);
+ va_end(ap);
+ }
+ longjmp(opt_state->top_ctx, 1);
/* NOTREACHED */
}
* from the total number of blocks and/or statements.
*/
static void
-opt_init(compiler_state_t *cstate, opt_state_t *opt_state, struct icode *ic)
+opt_init(opt_state_t *opt_state, struct icode *ic)
{
bpf_u_int32 *p;
int i, n, max_stmts;
n = count_blocks(ic, ic->root);
opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
if (opt_state->blocks == NULL)
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
unMarkAll(ic);
opt_state->n_blocks = 0;
number_blks_r(opt_state, ic, ic->root);
opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
if (opt_state->edges == NULL) {
free(opt_state->blocks);
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
}
/*
if (opt_state->levels == NULL) {
free(opt_state->edges);
free(opt_state->blocks);
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
}
opt_state->edgewords = opt_state->n_edges / (8 * sizeof(bpf_u_int32)) + 1;
free(opt_state->levels);
free(opt_state->edges);
free(opt_state->blocks);
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
}
p = opt_state->space;
opt_state->all_dom_sets = p;
free(opt_state->levels);
free(opt_state->edges);
free(opt_state->blocks);
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
}
opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
if (opt_state->vnode_base == NULL) {
free(opt_state->levels);
free(opt_state->edges);
free(opt_state->blocks);
- bpf_error(cstate, "malloc");
+ opt_error(opt_state, "malloc");
}
}
int bids[NBIDS];
#endif
-static void PCAP_NORETURN conv_error(compiler_state_t *, conv_state_t *, const char *, ...)
- PCAP_PRINTFLIKE(3, 4);
+static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
+ PCAP_PRINTFLIKE(2, 3);
/*
* Returns true if successful. Returns false if a branch has
* properly.
*/
static int
-convert_code_r(compiler_state_t *cstate, conv_state_t *conv_state,
- struct icode *ic, struct block *p)
+convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
{
struct bpf_insn *dst;
struct slist *src;
return (1);
Mark(ic, p);
- if (convert_code_r(cstate, conv_state, ic, JF(p)) == 0)
+ if (convert_code_r(conv_state, ic, JF(p)) == 0)
return (0);
- if (convert_code_r(cstate, conv_state, ic, JT(p)) == 0)
+ if (convert_code_r(conv_state, ic, JT(p)) == 0)
return (0);
slen = slength(p->stmts);
if (slen) {
offset = (struct slist **)calloc(slen, sizeof(struct slist *));
if (!offset) {
- conv_error(cstate, conv_state, "not enough core");
+ conv_error(conv_state, "not enough core");
/*NOTREACHED*/
}
}
#if 0
if (src->s.jt || src->s.jf) {
free(offset);
- conv_error(cstate, conv_state, "illegal jmp destination");
+ conv_error(conv_state, "illegal jmp destination");
/*NOTREACHED*/
}
#endif
if (!src->s.jt || !src->s.jf) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "no jmp destination", off);
+ conv_error(conv_state, ljerr, "no jmp destination", off);
/*NOTREACHED*/
}
if (offset[i] == src->s.jt) {
if (jt) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "multiple matches", off);
+ conv_error(conv_state, ljerr, "multiple matches", off);
/*NOTREACHED*/
}
if (i - off - 1 >= 256) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "out-of-range jump", off);
+ conv_error(conv_state, ljerr, "out-of-range jump", off);
/*NOTREACHED*/
}
dst->jt = (u_char)(i - off - 1);
if (offset[i] == src->s.jf) {
if (jf) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "multiple matches", off);
+ conv_error(conv_state, ljerr, "multiple matches", off);
/*NOTREACHED*/
}
if (i - off - 1 >= 256) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "out-of-range jump", off);
+ conv_error(conv_state, ljerr, "out-of-range jump", off);
/*NOTREACHED*/
}
dst->jf = (u_char)(i - off - 1);
}
if (!jt || !jf) {
free(offset);
- conv_error(cstate, conv_state, ljerr, "no destination found", off);
+ conv_error(conv_state, ljerr, "no destination found", off);
/*NOTREACHED*/
}
}
}
/* branch if T to following jump */
if (extrajmps >= 256) {
- conv_error(cstate, conv_state, "too many extra jumps");
+ conv_error(conv_state, "too many extra jumps");
/*NOTREACHED*/
}
dst->jt = (u_char)extrajmps;
/* branch if F to following jump */
/* if two jumps are inserted, F goes to second one */
if (extrajmps >= 256) {
- conv_error(cstate, conv_state, "too many extra jumps");
+ conv_error(conv_state, "too many extra jumps");
/*NOTREACHED*/
}
dst->jf = (u_char)extrajmps;
* done with the filter program. See the pcap man page.
*/
struct bpf_insn *
-icode_to_fcode(compiler_state_t *cstate, struct icode *ic,
- struct block *root, u_int *lenp)
+icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
+ char *errbuf)
{
u_int n;
struct bpf_insn *fp;
conv_state_t conv_state;
+ conv_state.fstart = NULL;
+ conv_state.errbuf = errbuf;
+ if (setjmp(conv_state.top_ctx) != 0) {
+ free(conv_state.fstart);
+ return NULL;
+ }
+
/*
* Loop doing convert_code_r() until no branches remain
* with too-large offsets.
n = *lenp = count_stmts(ic, root);
fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
- if (fp == NULL)
- bpf_error(cstate, "malloc");
+ if (fp == NULL) {
+ (void)pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
+ "malloc");
+ free(fp);
+ return NULL;
+ }
memset((char *)fp, 0, sizeof(*fp) * n);
conv_state.fstart = fp;
conv_state.ftail = fp + n;
unMarkAll(ic);
- if (convert_code_r(cstate, &conv_state, ic, root))
+ if (convert_code_r(&conv_state, ic, root))
break;
free(fp);
}
}
/*
- * Like bpf_error(), but also frees the array into which we're putting
- * the generated BPF code.
+ * For iconv_to_fconv() errors.
*/
static void PCAP_NORETURN
-conv_error(compiler_state_t *cstate, conv_state_t *conv_state, const char *fmt, ...)
+conv_error(conv_state_t *conv_state, const char *fmt, ...)
{
va_list ap;
- free(conv_state->fstart);
va_start(ap, fmt);
- bpf_vset_error(cstate, fmt, ap);
+ (void)pcap_vsnprintf(conv_state->errbuf,
+ PCAP_ERRBUF_SIZE, fmt, ap);
va_end(ap);
- bpf_abort_compilation(cstate);
+ longjmp(conv_state->top_ctx, 1);
/* NOTREACHED */
}
memset(bids, 0, sizeof bids);
f.bf_insns = icode_to_fcode(cstate, ic, ic->root, &f.bf_len);
+ if (f.bf_insns == NULL)
+ return;
fprintf(out, "digraph BPF {\n");
unMarkAll(ic);
memset(bids, 0, sizeof bids);
f.bf_insns = icode_to_fcode(cstate, ic, ic->root, &f.bf_len);
+ if (f.bf_insns == NULL)
+ return;
bpf_dump(&f, 1);
putchar('\n');
free((char *)f.bf_insns);