* Optimization module for BPF code intermediate representation.
*/
-#ifdef HAVE_CONFIG_H
#include <config.h>
-#endif
#include <pcap-types.h>
#include <memory.h>
#include <setjmp.h>
#include <string.h>
-
+#include <limits.h> /* for SIZE_MAX */
#include <errno.h>
#include "pcap-int.h"
#include "gencode.h"
#include "optimize.h"
+#include "diag-control.h"
#ifdef HAVE_OS_PROTO_H
#include "os-proto.h"
#define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
#elif defined(_MSC_VER)
/*
- * Visual Studio; we support only 2005 and later, so use
+ * Visual Studio; we support only 2015 and later, so use
* _BitScanForward().
*/
#include <intrin.h>
abort(); /* mask is zero */
return (u_int)bit;
}
-#elif defined(MSDOS) && defined(__DJGPP__)
- /*
- * MS-DOS with DJGPP, which declares ffs() in <string.h>, which
- * we've already included.
- */
- #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
-#elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
+#else
/*
- * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
- * or some other platform (UN*X conforming to a sufficient recent version
- * of the Single UNIX Specification).
+ * POSIX.1-2001 says ffs() is in <strings.h>. Every supported non-Windows OS
+ * (including Linux with musl libc and uclibc-ng) has the header and (except
+ * HP-UX) declares the function there. HP-UX declares the function in
+ * <string.h>, which has already been included.
*/
#include <strings.h>
- #define lowest_set_bit(mask) (u_int)((ffs((mask)) - 1))
-#else
-/*
- * None of the above.
- * Use a perfect-hash-function-based function.
- */
-static u_int
-lowest_set_bit(int mask)
-{
- unsigned int v = (unsigned int)mask;
-
- static const u_int MultiplyDeBruijnBitPosition[32] = {
- 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
- 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
- };
-
- /*
- * We strip off all but the lowermost set bit (v & ~v),
- * and perform a minimal perfect hash on it to look up the
- * number of low-order zero bits in a table.
- *
- * See:
- *
- * http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
- *
- * http://supertech.csail.mit.edu/papers/debruijn.pdf
- */
- return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
-}
+ #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
#endif
/*
#define AX_ATOM N_ATOMS
/*
- * These data structures are used in a Cocke and Shwarz style
+ * These data structures are used in a Cocke and Schwartz style
* value numbering scheme. Since the flowgraph is acyclic,
* exit values can be propagated from a node's predecessors
* provided it is uniquely defined.
static void opt_dump(opt_state_t *, struct icode *);
#endif
-#ifndef MAX
-#define MAX(a,b) ((a)>(b)?(a):(b))
-#endif
-
static void
find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
{
if (JT(b)) {
find_levels_r(opt_state, ic, JT(b));
find_levels_r(opt_state, ic, JF(b));
- level = MAX(JT(b)->level, JF(b)->level) + 1;
+ level = max(JT(b)->level, JF(b)->level) + 1;
} else
level = 0;
b->level = level;
*/
x = opt_state->all_dom_sets;
/*
- * These are both guaranteed to be > 0, so the product is
- * guaranteed to be > 0.
- *
- * XXX - but what if it overflows?
+ * In opt_init(), we've made sure the product doesn't overflow.
*/
i = opt_state->n_blocks * opt_state->nodewords;
- do
+ while (i != 0) {
+ --i;
*x++ = 0xFFFFFFFFU;
- while (--i != 0);
+ }
/* Root starts off empty. */
- i = opt_state->nodewords;
- do
+ for (i = opt_state->nodewords; i != 0;) {
+ --i;
root->dom[i] = 0;
- while (--i != 0);
+ }
/* root->level is the highest level no found. */
for (level = root->level; level >= 0; --level) {
x = opt_state->all_edge_sets;
/*
- * These are both guaranteed to be > 0, so the product is
- * guaranteed to be > 0.
- *
- * XXX - but what if it overflows?
+ * In opt_init(), we've made sure the product doesn't overflow.
*/
- i = opt_state->n_edges * opt_state->edgewords;
- do
+ for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
+ --i;
x[i] = 0xFFFFFFFFU;
- while (--i != 0);
+ }
/* root->level is the highest level no found. */
memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
case BPF_LDX:
/*
* As there are fewer than 2^31 memory locations,
- * s->k should be convertable to int without problems.
+ * s->k should be convertible to int without problems.
*/
return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
(BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
}
s->k = a;
s->code = BPF_LD|BPF_IMM;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
static inline struct slist *
if (s->s.code == BPF_ST &&
next->s.code == (BPF_LDX|BPF_MEM) &&
s->s.k == next->s.k) {
+ opt_state->done = 0;
+ next->s.code = BPF_MISC|BPF_TAX;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
- next->s.code = BPF_MISC|BPF_TAX;
}
/*
* ld #k --> ldx #k
next->s.code == (BPF_MISC|BPF_TAX)) {
s->s.code = BPF_LDX|BPF_IMM;
next->s.code = BPF_MISC|BPF_TXA;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
/*
* This is an ugly special case, but it happens
s->s.code = NOP;
add->s.code = NOP;
tax->s.code = NOP;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
}
/*
*/
b->s.k += opt_state->vmap[val].const_val;
last->s.code = NOP;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
} else if (b->s.k == 0) {
/*
* If the X register isn't a constant,
*/
last->s.code = NOP;
b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
}
/*
else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
last->s.code = NOP;
b->s.k += last->s.k;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
/*
* And, similarly, a constant AND can be simplified
b->s.k = last->s.k;
b->s.code = BPF_JMP|BPF_K|BPF_JSET;
last->s.code = NOP;
+ opt_state->done = 0;
+ opt_not(b);
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
- opt_not(b);
}
}
/*
abort();
}
if (JF(b) != JT(b)) {
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
if (v)
JF(b) = JT(b);
s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
s->k += opt_state->vmap[v].const_val;
v = F(opt_state, s->code, s->k, 0L);
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
else
v = F(opt_state, s->code, s->k, v);
s->k > 31)
opt_error(opt_state,
"shift by more than 31 bits");
+ opt_state->done = 0;
+ val[A_ATOM] =
+ F(opt_state, s->code, val[A_ATOM], K(s->k));
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
- val[A_ATOM] =
- F(opt_state, s->code, val[A_ATOM], K(s->k));
}
break;
}
if (alter && opt_state->vmap[v].is_const) {
s->code = BPF_LD|BPF_IMM;
s->k = opt_state->vmap[v].const_val;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
vstore(s, &val[A_ATOM], v, alter);
break;
if (alter && opt_state->vmap[v].is_const) {
s->code = BPF_LDX|BPF_IMM;
s->k = opt_state->vmap[v].const_val;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
vstore(s, &val[X_ATOM], v, alter);
break;
atom = atomdef(s);
if (atom >= 0) {
if (last[atom]) {
+ opt_state->done = 0;
+ last[atom]->code = NOP;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
- last[atom]->code = NOP;
}
last[atom] = s;
}
for (atom = 0; atom < N_ATOMS; ++atom)
if (last[atom] && !ATOMELEM(b->out_use, atom)) {
last[atom]->code = NOP;
+ /*
+ * The store was removed as it's dead,
+ * so the value stored into now has
+ * an unknown value.
+ */
+ vstore(0, &b->val[atom], VAL_UNKNOWN, 0);
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
}
BPF_CLASS(b->s.code) == BPF_RET)) {
if (b->stmts != 0) {
b->stmts = 0;
+ opt_state->done = 0;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 1;
- opt_state->done = 0;
}
} else {
opt_peep(opt_state, b);
* Make this edge go to the block to
* which the successor of that edge
* goes.
- *
- * XXX - optimizer loop detection.
*/
- opt_state->non_branch_movement_performed = 1;
opt_state->done = 0;
ep->succ = JT(ep->succ);
+ /*
+ * XXX - optimizer loop detection.
+ */
+ opt_state->non_branch_movement_performed = 1;
}
}
/*
*
*/
static void
-or_pullup(opt_state_t *opt_state, struct block *b)
+or_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
{
bpf_u_int32 val;
int at_top;
* optimizer gets into one of those infinite loops.
*/
opt_state->done = 0;
+
+ /*
+ * Recompute dominator sets as control flow graph has changed.
+ */
+ find_dom(opt_state, root);
}
static void
-and_pullup(opt_state_t *opt_state, struct block *b)
+and_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
{
bpf_u_int32 val;
int at_top;
* optimizer gets into one of those infinite loops.
*/
opt_state->done = 0;
+
+ /*
+ * Recompute dominator sets as control flow graph has changed.
+ */
+ find_dom(opt_state, root);
}
static void
* versions of the machine code, eventually returning
* to the first version. (We're really not doing a
* full loop detection, we're just testing for two
- * passes in a row where where we do nothing but
+ * passes in a row where we do nothing but
* move branches.)
*/
return;
find_inedges(opt_state, ic->root);
for (i = 1; i <= maxlevel; ++i) {
for (p = opt_state->levels[i]; p; p = p->link) {
- or_pullup(opt_state, p);
- and_pullup(opt_state, p);
+ or_pullup(opt_state, p, ic->root);
+ and_pullup(opt_state, p, ic->root);
}
}
}
static void
find_inedges(opt_state_t *opt_state, struct block *root)
{
+ u_int i;
+ int level;
struct block *b;
- for (u_int i = 0; i < opt_state->n_blocks; ++i)
+ for (i = 0; i < opt_state->n_blocks; ++i)
opt_state->blocks[i]->in_edges = 0;
/*
* Traverse the graph, adding each edge to the predecessor
* list of its successors. Skip the leaves (i.e. level 0).
*/
- for (int i = root->level; i > 0; --i) {
- for (b = opt_state->levels[i]; b != 0; b = b->link) {
+ for (level = root->level; level > 0; --level) {
+ for (b = opt_state->levels[level]; b != 0; b = b->link) {
link_inedge(&b->et, JT(b));
link_inedge(&b->ef, JF(b));
}
#ifdef BDEBUG
if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
- printf("opt_loop(root, %d) begin\n", do_stmts);
+ printf("%s(root, %d) begin\n", __func__, do_stmts);
opt_dump(opt_state, ic);
}
#endif
*/
int loop_count = 0;
for (;;) {
- opt_state->done = 1;
/*
* XXX - optimizer loop detection.
*/
opt_state->non_branch_movement_performed = 0;
+ opt_state->done = 1;
find_levels(opt_state, ic);
find_dom(opt_state, ic->root);
find_closure(opt_state, ic->root);
opt_blks(opt_state, ic, do_stmts);
#ifdef BDEBUG
if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
- printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
+ printf("%s(root, %d) bottom, done=%d\n", __func__, do_stmts, opt_state->done);
opt_dump(opt_state, ic);
}
#endif
memset(&opt_state, 0, sizeof(opt_state));
opt_state.errbuf = errbuf;
- opt_state.non_branch_movement_performed = 0;
if (setjmp(opt_state.top_ctx)) {
opt_cleanup(&opt_state);
return -1;
}
longjmp(opt_state->top_ctx, 1);
/* NOTREACHED */
+#ifdef _AIX
+ PCAP_UNREACHABLE
+#endif /* _AIX */
}
/*
static void
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
{
- int n;
+ u_int n;
if (p == 0 || isMarked(ic, p))
return;
Mark(ic, p);
n = opt_state->n_blocks++;
+ if (opt_state->n_blocks == 0) {
+ /*
+ * Overflow.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
p->id = n;
opt_state->blocks[n] = p;
{
bpf_u_int32 *p;
int i, n, max_stmts;
+ u_int product;
+ size_t block_memsize, edge_memsize;
/*
* First, count the blocks, so we can malloc an array to map
opt_state->n_blocks = 0;
number_blks_r(opt_state, ic, ic->root);
+ /*
+ * This "should not happen".
+ */
+ if (opt_state->n_blocks == 0)
+ opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
+
opt_state->n_edges = 2 * opt_state->n_blocks;
+ if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
+ /*
+ * Overflow.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
if (opt_state->edges == NULL) {
opt_error(opt_state, "malloc");
opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
+ /*
+ * Make sure opt_state->n_blocks * opt_state->nodewords fits
+ * in a u_int; we use it as a u_int number-of-iterations
+ * value.
+ */
+ product = opt_state->n_blocks * opt_state->nodewords;
+ if ((product / opt_state->n_blocks) != opt_state->nodewords) {
+ /*
+ * XXX - just punt and don't try to optimize?
+ * In practice, this is unlikely to happen with
+ * a normal filter.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for that doesn't
+ * overflow.
+ */
+ block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
+ if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure opt_state->n_edges * opt_state->edgewords fits
+ * in a u_int; we use it as a u_int number-of-iterations
+ * value.
+ */
+ product = opt_state->n_edges * opt_state->edgewords;
+ if ((product / opt_state->n_edges) != opt_state->edgewords) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for that doesn't
+ * overflow.
+ */
+ edge_memsize = (size_t)product * sizeof(*opt_state->space);
+ if (edge_memsize / product != sizeof(*opt_state->space)) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for both of them doesn't
+ * overflow.
+ */
+ if (block_memsize > SIZE_MAX - edge_memsize) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
/* XXX */
- opt_state->space = (bpf_u_int32 *)malloc(2 * opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->space)
- + opt_state->n_edges * opt_state->edgewords * sizeof(*opt_state->space));
+ opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
if (opt_state->space == NULL) {
opt_error(opt_state, "malloc");
}
struct slist *src;
u_int slen;
u_int off;
- u_int extrajmps; /* number of extra jumps inserted */
struct slist **offset = NULL;
if (p == 0 || isMarked(ic, p))
dst->code = (u_short)p->s.code;
dst->k = p->s.k;
if (JT(p)) {
- extrajmps = 0;
+ /* number of extra jumps inserted */
+ u_char extrajmps = 0;
off = JT(p)->offset - (p->offset + slen) - 1;
if (off >= 256) {
/* offset too large for branch, must add a jump */
p->longjt++;
return(0);
}
- /* branch if T to following jump */
- if (extrajmps >= 256) {
- conv_error(conv_state, "too many extra jumps");
- /*NOTREACHED*/
- }
- dst->jt = (u_char)extrajmps;
+ dst->jt = extrajmps;
extrajmps++;
dst[extrajmps].code = BPF_JMP|BPF_JA;
dst[extrajmps].k = off - extrajmps;
}
/* branch if F to following jump */
/* if two jumps are inserted, F goes to second one */
- if (extrajmps >= 256) {
- conv_error(conv_state, "too many extra jumps");
- /*NOTREACHED*/
- }
- dst->jf = (u_char)extrajmps;
+ dst->jf = extrajmps;
extrajmps++;
dst[extrajmps].code = BPF_JMP|BPF_JA;
dst[extrajmps].k = off - extrajmps;
if (fp == NULL) {
(void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
"malloc");
- free(fp);
return NULL;
}
memset((char *)fp, 0, sizeof(*fp) * n);
va_end(ap);
longjmp(conv_state->top_ctx, 1);
/* NOTREACHED */
+#ifdef _AIX
+ PCAP_UNREACHABLE
+#endif /* _AIX */
}
/*
* otherwise, return 0.
*/
int
-install_bpf_program(pcap_t *p, struct bpf_program *fp)
+pcapint_install_bpf_program(pcap_t *p, struct bpf_program *fp)
{
size_t prog_size;
/*
* Validate the program.
*/
- if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
+ if (!pcapint_validate_filter(fp->bf_insns, fp->bf_len)) {
snprintf(p->errbuf, sizeof(p->errbuf),
"BPF program is not valid");
return (-1);
p->fcode.bf_len = fp->bf_len;
p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
if (p->fcode.bf_insns == NULL) {
- pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
+ pcapint_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
errno, "malloc");
return (-1);
}
icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
noffset = min(block->offset + icount, (int)prog->bf_len);
- fprintf(out, "\tblock%d [shape=ellipse, id=\"block-%d\" label=\"BLOCK%d\\n", block->id, block->id, block->id);
+ fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
for (i = block->offset; i < noffset; i++) {
fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
}
Mark(ic, block);
if (JT(block)) {
- fprintf(out, "\t\"block%d\":se -> \"block%d\":n [label=\"T\"]; \n",
+ fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
block->id, JT(block)->id);
- fprintf(out, "\t\"block%d\":sw -> \"block%d\":n [label=\"F\"]; \n",
+ fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
block->id, JF(block)->id);
}
dot_dump_edge(ic, JT(block), out);
*
* example DOT for BPF `ip src host 1.1.1.1' is:
digraph BPF {
- block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
- block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
- block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
- block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
- "block0":se -> "block1":n [label="T"];
- "block0":sw -> "block3":n [label="F"];
- "block1":se -> "block2":n [label="T"];
- "block1":sw -> "block3":n [label="F"];
+ block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh [12]\n(001) jeq #0x800 jt 2 jf 5" tooltip="val[A]=0 val[X]=0"];
+ block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld [26]\n(003) jeq #0x1010101 jt 4 jf 5" tooltip="val[A]=0 val[X]=0"];
+ block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
+ block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
+ "block0":se -> "block1":n [label="T"];
+ "block0":sw -> "block3":n [label="F"];
+ "block1":se -> "block2":n [label="T"];
+ "block1":sw -> "block3":n [label="F"];
}
*
* After install graphviz on https://www.graphviz.org/, save it as bpf.dot
else
status = plain_dump(ic, errbuf);
if (status == -1)
- opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
+ opt_error(opt_state, "%s: icode_to_fcode failed: %s", __func__, errbuf);
}
#endif