X-Git-Url: https://git.tcpdump.org/tcpdump/blobdiff_plain/3c8f3e13b03380742c24070f8a7b56fe12c6b8ee..a8abce5c5e2dce2ba6dbccd5d3829da104b80f9c:/extract.h diff --git a/extract.h b/extract.h index 7e2fad1e..64c9d724 100644 --- a/extract.h +++ b/extract.h @@ -19,12 +19,14 @@ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ +#include + /* * For 8-bit values; needed to fetch a one-byte value. Byte order * isn't relevant, and alignment isn't an issue. */ -#define EXTRACT_8BITS(p) (*(p)) -#define EXTRACT_INT8(p) ((int8_t)(*(p))) +#define EXTRACT_U_1(p) ((uint8_t)(*(p))) +#define EXTRACT_S_1(p) ((int8_t)(*(p))) /* * Inline functions or macros to extract possibly-unaligned big-endian @@ -82,54 +84,65 @@ * XXX - do we need to test for both 32-bit and 64-bit versions of those * architectures in all cases? */ -static inline uint16_t UNALIGNED_OK -EXTRACT_BE_16BITS(const void *p) +UNALIGNED_OK static inline uint16_t +EXTRACT_BE_U_2(const void *p) { return ((uint16_t)ntohs(*(const uint16_t *)(p))); } -static inline int16_t UNALIGNED_OK -EXTRACT_BE_INT16(const void *p) +UNALIGNED_OK static inline int16_t +EXTRACT_BE_S_2(const void *p) { return ((int16_t)ntohs(*(const int16_t *)(p))); } -static inline uint32_t UNALIGNED_OK -EXTRACT_BE_32BITS(const void *p) +UNALIGNED_OK static inline uint32_t +EXTRACT_BE_U_4(const void *p) { return ((uint32_t)ntohl(*(const uint32_t *)(p))); } -static inline int32_t UNALIGNED_OK -EXTRACT_BE_INT32(const void *p) +UNALIGNED_OK static inline int32_t +EXTRACT_BE_S_4(const void *p) { return ((int32_t)ntohl(*(const int32_t *)(p))); } -static inline uint64_t UNALIGNED_OK -EXTRACT_BE_64BITS(const void *p) +UNALIGNED_OK static inline uint64_t +EXTRACT_BE_U_8(const void *p) { return ((uint64_t)(((uint64_t)ntohl(*((const uint32_t *)(p) + 0))) << 32 | ((uint64_t)ntohl(*((const uint32_t *)(p) + 1))) << 0)); } -static inline int64_t UNALIGNED_OK -EXTRACT_BE_INT64(const void *p) +UNALIGNED_OK static inline int64_t +EXTRACT_BE_S_8(const void *p) { return ((int64_t)(((int64_t)ntohl(*((const uint32_t *)(p) + 0))) << 32 | ((uint64_t)ntohl(*((const uint32_t *)(p) + 1))) << 0)); } -#elif defined(__GNUC__) && defined(HAVE___ATTRIBUTE__) && \ + +/* + * Extract an IPv4 address, which is in network byte order, and not + * necessarily aligned, and provide the result in host byte order. + */ +UNALIGNED_OK static inline uint32_t +EXTRACT_IPV4_TO_HOST_ORDER(const void *p) +{ + return ((uint32_t)ntohl(*(const uint32_t *)(p))); +} +#elif ND_IS_AT_LEAST_GNUC_VERSION(2,0) && \ (defined(__alpha) || defined(__alpha__) || \ defined(__mips) || defined(__mips__)) /* * This is MIPS or Alpha, which don't natively handle unaligned loads, * but which have instructions that can help when doing unaligned - * loads, and this is a GCC-compatible compiler and we have __attribute__, - * which we assume that mean we have __attribute__((packed)), which - * we can use to convince the compiler to generate those instructions. + * loads, and this is GCC 2.0 or later or a compiler that claims to + * be GCC 2.0 or later, which we assume that mean we have + * __attribute__((packed)), which we can use to convince the compiler + * to generate those instructions. * * Declare packed structures containing a uint16_t and a uint32_t, * cast the pointer to point to one of those, and fetch through it; @@ -192,42 +205,52 @@ typedef struct { } __attribute__((packed)) unaligned_int32_t; UNALIGNED_OK static inline uint16_t -EXTRACT_BE_16BITS(const void *p) +EXTRACT_BE_U_2(const void *p) { return ((uint16_t)ntohs(((const unaligned_uint16_t *)(p))->val)); } UNALIGNED_OK static inline int16_t -EXTRACT_BE_INT16(const void *p) +EXTRACT_BE_S_2(const void *p) { return ((int16_t)ntohs(((const unaligned_int16_t *)(p))->val)); } UNALIGNED_OK static inline uint32_t -EXTRACT_BE_32BITS(const void *p) +EXTRACT_BE_U_4(const void *p) { return ((uint32_t)ntohl(((const unaligned_uint32_t *)(p))->val)); } UNALIGNED_OK static inline int32_t -EXTRACT_BE_INT32(const void *p) +EXTRACT_BE_S_4(const void *p) { return ((int32_t)ntohl(((const unaligned_int32_t *)(p))->val)); } UNALIGNED_OK static inline uint64_t -EXTRACT_BE_64BITS(const void *p) +EXTRACT_BE_U_8(const void *p) { return ((uint64_t)(((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 0)->val)) << 32 | ((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 1)->val)) << 0)); } UNALIGNED_OK static inline int64_t -EXTRACT_BE_INT64(const void *p) +EXTRACT_BE_S_8(const void *p) { return ((int64_t)(((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 0)->val)) << 32 | ((uint64_t)ntohl(((const unaligned_uint32_t *)(p) + 1)->val)) << 0)); } + +/* + * Extract an IPv4 address, which is in network byte order, and not + * necessarily aligned, and provide the result in host byte order. + */ +UNALIGNED_OK static inline uint32_t +EXTRACT_IPV4_TO_HOST_ORDER(const void *p) +{ + return ((uint32_t)ntohl(((const unaligned_uint32_t *)(p))->val)); +} #else /* * This architecture doesn't natively support unaligned loads, and either @@ -237,23 +260,23 @@ EXTRACT_BE_INT64(const void *p) * quantities the hard way - fetch the bytes one at a time and * assemble them. */ -#define EXTRACT_BE_16BITS(p) \ +#define EXTRACT_BE_U_2(p) \ ((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \ ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0))) -#define EXTRACT_BE_INT16(p) \ +#define EXTRACT_BE_S_2(p) \ ((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \ ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0))) -#define EXTRACT_BE_32BITS(p) \ +#define EXTRACT_BE_U_4(p) \ ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0))) -#define EXTRACT_BE_INT32(p) \ +#define EXTRACT_BE_S_4(p) \ ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0))) -#define EXTRACT_BE_64BITS(p) \ +#define EXTRACT_BE_U_8(p) \ ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \ @@ -262,7 +285,7 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \ ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0))) -#define EXTRACT_BE_INT64(p) \ +#define EXTRACT_BE_S_8(p) \ ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \ @@ -271,14 +294,93 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \ ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0))) + +/* + * Extract an IPv4 address, which is in network byte order, and not + * necessarily aligned, and provide the result in host byte order. + */ +#define EXTRACT_IPV4_TO_HOST_ORDER(p) \ + ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \ + ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \ + ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \ + ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0))) #endif /* unaligned access checks */ -#define EXTRACT_BE_24BITS(p) \ +/* + * Extract numerical values in *host* byte order. (Some metadata + * headers are in the byte order of the host that wrote the file, + * and libpcap translate them to the byte order of the host + * reading the file. This means that if a program on that host + * reads with libpcap and writes to a new file, the new file will + * be written in the byte order of the host writing the file. Thus, + * the magic number in pcap files and byte-order magic in pcapng + * files can be used to determine the byte order in those metadata + * headers.) + * + * XXX - on platforms that can do unaligned accesses, just cast and + * dereference the pointer. + */ +static inline uint16_t +EXTRACT_HE_U_2(const void *p) +{ + uint16_t val; + + UNALIGNED_MEMCPY(&val, p, sizeof(uint16_t)); + return val; +} + +static inline int16_t +EXTRACT_HE_S_2(const void *p) +{ + int16_t val; + + UNALIGNED_MEMCPY(&val, p, sizeof(int16_t)); + return val; +} + +static inline uint32_t +EXTRACT_HE_U_4(const void *p) +{ + uint32_t val; + + UNALIGNED_MEMCPY(&val, p, sizeof(uint32_t)); + return val; +} + +static inline int32_t +EXTRACT_HE_S_4(const void *p) +{ + int32_t val; + + UNALIGNED_MEMCPY(&val, p, sizeof(int32_t)); + return val; +} + +/* + * Extract an IPv4 address, which is in network byte order, and which + * is not necessarily aligned on a 4-byte boundary, and provide the + * result in network byte order. + * + * This works the same way regardless of the host's byte order. + */ +static inline uint32_t +EXTRACT_IPV4_TO_NETWORK_ORDER(const void *p) +{ + uint32_t addr; + + UNALIGNED_MEMCPY(&addr, p, sizeof(uint32_t)); + return addr; +} + +/* + * Non-power-of-2 sizes. + */ +#define EXTRACT_BE_U_3(p) \ ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0))) -#define EXTRACT_BE_INT24(p) \ +#define EXTRACT_BE_S_3(p) \ (((*((const uint8_t *)(p) + 0)) & 0x80) ? \ ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ @@ -288,14 +390,14 @@ EXTRACT_BE_INT64(const void *p) ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0)))) -#define EXTRACT_BE_40BITS(p) \ +#define EXTRACT_BE_U_5(p) \ ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \ ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0))) -#define EXTRACT_BE_INT40(p) \ +#define EXTRACT_BE_S_5(p) \ (((*((const uint8_t *)(p) + 0)) & 0x80) ? \ ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \ @@ -309,7 +411,7 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0)))) -#define EXTRACT_BE_48BITS(p) \ +#define EXTRACT_BE_U_6(p) \ ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \ @@ -317,9 +419,9 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0))) -#define EXTRACT_BE_INT48(p) \ +#define EXTRACT_BE_S_6(p) \ (((*((const uint8_t *)(p) + 0)) & 0x80) ? \ - ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \ + ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \ ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \ @@ -333,7 +435,7 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0)))) -#define EXTRACT_BE_56BITS(p) \ +#define EXTRACT_BE_U_7(p) \ ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \ ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \ @@ -342,7 +444,7 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0))) -#define EXTRACT_BE_INT56(p) \ +#define EXTRACT_BE_S_7(p) \ (((*((const uint8_t *)(p) + 0)) & 0x80) ? \ ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \ @@ -364,19 +466,31 @@ EXTRACT_BE_INT64(const void *p) * Macros to extract possibly-unaligned little-endian integral values. * XXX - do loads on little-endian machines that support unaligned loads? */ -#define EXTRACT_LE_16BITS(p) \ +#define EXTRACT_LE_U_2(p) \ ((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0))) -#define EXTRACT_LE_32BITS(p) \ +#define EXTRACT_LE_S_2(p) \ + ((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \ + ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0))) +#define EXTRACT_LE_U_4(p) \ ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \ ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0))) -#define EXTRACT_LE_24BITS(p) \ +#define EXTRACT_LE_S_4(p) \ + ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \ + ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \ + ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ + ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0))) +#define EXTRACT_LE_U_3(p) \ ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \ ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0))) -#define EXTRACT_LE_64BITS(p) \ +#define EXTRACT_LE_S_3(p) \ + ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \ + ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \ + ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0))) +#define EXTRACT_LE_U_8(p) \ ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \ ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \ ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \ @@ -385,33 +499,42 @@ EXTRACT_BE_INT64(const void *p) ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \ ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \ ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0))) +#define EXTRACT_LE_S_8(p) \ + ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \ + ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \ + ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \ + ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \ + ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \ + ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \ + ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \ + ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0))) /* * Macros to check the presence of the values in question. */ -#define ND_TTEST_1(p) ND_TTEST2(*(p), 1) -#define ND_TCHECK_1(p) ND_TCHECK2(*(p), 1) +#define ND_TTEST_1(p) ND_TTEST_LEN((p), 1) +#define ND_TCHECK_1(p) ND_TCHECK_LEN((p), 1) -#define ND_TTEST_2(p) ND_TTEST2(*(p), 2) -#define ND_TCHECK_2(p) ND_TCHECK2(*(p), 2) +#define ND_TTEST_2(p) ND_TTEST_LEN((p), 2) +#define ND_TCHECK_2(p) ND_TCHECK_LEN((p), 2) -#define ND_TTEST_3(p) ND_TTEST2(*(p), 3) -#define ND_TCHECK_3(p) ND_TCHECK2(*(p), 3) +#define ND_TTEST_3(p) ND_TTEST_LEN((p), 3) +#define ND_TCHECK_3(p) ND_TCHECK_LEN((p), 3) -#define ND_TTEST_4(p) ND_TTEST2(*(p), 4) -#define ND_TCHECK_4(p) ND_TCHECK2(*(p), 4) +#define ND_TTEST_4(p) ND_TTEST_LEN((p), 4) +#define ND_TCHECK_4(p) ND_TCHECK_LEN((p), 4) -#define ND_TTEST_5(p) ND_TTEST2(*(p), 5) -#define ND_TCHECK_5(p) ND_TCHECK2(*(p), 5) +#define ND_TTEST_5(p) ND_TTEST_LEN((p), 5) +#define ND_TCHECK_5(p) ND_TCHECK_LEN((p), 5) -#define ND_TTEST_6(p) ND_TTEST2(*(p), 6) -#define ND_TCHECK_6(p) ND_TCHECK2(*(p), 6) +#define ND_TTEST_6(p) ND_TTEST_LEN((p), 6) +#define ND_TCHECK_6(p) ND_TCHECK_LEN((p), 6) -#define ND_TTEST_7(p) ND_TTEST2(*(p), 7) -#define ND_TCHECK_7(p) ND_TCHECK2(*(p), 7) +#define ND_TTEST_7(p) ND_TTEST_LEN((p), 7) +#define ND_TCHECK_7(p) ND_TCHECK_LEN((p), 7) -#define ND_TTEST_8(p) ND_TTEST2(*(p), 8) -#define ND_TCHECK_8(p) ND_TCHECK2(*(p), 8) +#define ND_TTEST_8(p) ND_TTEST_LEN((p), 8) +#define ND_TCHECK_8(p) ND_TCHECK_LEN((p), 8) -#define ND_TTEST_16(p) ND_TTEST2(*(p), 16) -#define ND_TCHECK_16(p) ND_TCHECK2(*(p), 16) +#define ND_TTEST_16(p) ND_TTEST_LEN((p), 16) +#define ND_TCHECK_16(p) ND_TCHECK_LEN((p), 16)