/*
* Convert a token value to a string; use "fmt" if not found.
*/
-const char *
+static const char *
tok2strbuf(const struct tok *lp, const char *fmt,
- u_int v, char *buf, size_t bufsize)
+ const u_int v, char *buf, const size_t bufsize)
{
if (lp != NULL) {
while (lp->s != NULL) {
* in round-robin fashion.
*/
const char *
-tok2str(const struct tok *lp, const char *fmt,
- u_int v)
+tok2str(const struct tok *lp, const char *fmt, const u_int v)
{
static char buf[4][TOKBUFSIZE];
static int idx = 0;
*/
static char *
bittok2str_internal(const struct tok *lp, const char *fmt,
- u_int v, const char *sep)
+ const u_int v, const char *sep)
{
static char buf[1024+1]; /* our string buffer */
char *bufp = buf;
* this is useful for parsing bitfields, the output strings are not separated.
*/
char *
-bittok2str_nosep(const struct tok *lp, const char *fmt,
- u_int v)
+bittok2str_nosep(const struct tok *lp, const char *fmt, const u_int v)
{
return (bittok2str_internal(lp, fmt, v, ""));
}
* this is useful for parsing bitfields, the output strings are comma separated.
*/
char *
-bittok2str(const struct tok *lp, const char *fmt,
- u_int v)
+bittok2str(const struct tok *lp, const char *fmt, const u_int v)
{
return (bittok2str_internal(lp, fmt, v, ", "));
}
* correct for bounds-checking.
*/
const char *
-tok2strary_internal(const char **lp, int n, const char *fmt,
- int v)
+tok2strary_internal(const char **lp, int n, const char *fmt, const int v)
{
static char buf[TOKBUFSIZE];
*/
int
-mask2plen(uint32_t mask)
+mask2plen(const uint32_t mask)
{
- uint32_t bitmasks[33] = {
+ const uint32_t bitmasks[33] = {
0x00000000,
0x80000000, 0xc0000000, 0xe0000000, 0xf0000000,
0xf8000000, 0xfc000000, 0xfe000000, 0xff000000,
*/
static int
fetch_token(netdissect_options *ndo, const u_char *pptr, u_int idx, u_int len,
- u_char *tbuf, size_t tbuflen)
+ u_char *tbuf, size_t tbuflen)
{
size_t toklen = 0;
u_char c;
(defined(__s390__) || defined(__s390x__) || defined(__zarch__)) || \
defined(__vax__)
/*
- * The procesor natively handles unaligned loads, so just use memcpy()
+ * The processor natively handles unaligned loads, so just use memcpy()
* and memcmp(), to enable those optimizations.
*
* XXX - are those all the x86 tests we need?