]>
The Tcpdump Group git mirrors - tcpdump/blob - extract.h
2 * Copyright (c) 1992, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
25 * For 8-bit values; needed to fetch a one-byte value. Byte order
26 * isn't relevant, and alignment isn't an issue.
28 #define EXTRACT_U_1(p) ((uint8_t)(*(p)))
29 #define EXTRACT_S_1(p) ((int8_t)(*(p)))
32 * Inline functions or macros to extract possibly-unaligned big-endian
35 #include "funcattrs.h"
38 * If we have versions of GCC or Clang that support an __attribute__
39 * to say "if we're building with unsigned behavior sanitization,
40 * don't complain about undefined behavior in this function", we
41 * label these functions with that attribute - we *know* it's undefined
42 * in the C standard, but we *also* know it does what we want with
43 * the ISA we're targeting and the compiler we're using.
45 * For GCC 4.9.0 and later, we use __attribute__((no_sanitize_undefined));
46 * pre-5.0 GCC doesn't have __has_attribute, and I'm not sure whether
47 * GCC or Clang first had __attribute__((no_sanitize(XXX)).
49 * For Clang, we check for __attribute__((no_sanitize(XXX)) with
50 * __has_attribute, as there are versions of Clang that support
51 * __attribute__((no_sanitize("undefined")) but don't support
52 * __attribute__((no_sanitize_undefined)).
54 * We define this here, rather than in funcattrs.h, because we
55 * only want it used here, we don't want it to be broadly used.
56 * (Any printer will get this defined, but this should at least
57 * make it harder for people to find.)
59 #if defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 409)
60 #define UNALIGNED_OK __attribute__((no_sanitize_undefined))
61 #elif __has_attribute(no_sanitize)
62 #define UNALIGNED_OK __attribute__((no_sanitize("undefined")))
67 #if (defined(__i386__) || defined(_M_IX86) || defined(__X86__) || defined(__x86_64__) || defined(_M_X64)) || \
68 (defined(__m68k__) && (!defined(__mc68000__) && !defined(__mc68010__))) || \
69 (defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || defined(_ARCH_PPC64)) || \
70 (defined(__s390__) || defined(__s390x__) || defined(__zarch__))
72 * The processor natively handles unaligned loads, so we can just
73 * cast the pointer and fetch through it.
75 * XXX - are those all the x86 tests we need?
76 * XXX - are those the only 68k tests we need not to generated
77 * unaligned accesses if the target is the 68000 or 68010?
78 * XXX - are there any tests we don't need, because some definitions are for
79 * compilers that also predefine the GCC symbols?
80 * XXX - do we need to test for both 32-bit and 64-bit versions of those
81 * architectures in all cases?
83 UNALIGNED_OK
static inline uint16_t
84 EXTRACT_BE_U_2(const void *p
)
86 return ((uint16_t)ntohs(*(const uint16_t *)(p
)));
89 UNALIGNED_OK
static inline int16_t
90 EXTRACT_BE_S_2(const void *p
)
92 return ((int16_t)ntohs(*(const int16_t *)(p
)));
95 UNALIGNED_OK
static inline uint32_t
96 EXTRACT_BE_U_4(const void *p
)
98 return ((uint32_t)ntohl(*(const uint32_t *)(p
)));
101 UNALIGNED_OK
static inline int32_t
102 EXTRACT_BE_S_4(const void *p
)
104 return ((int32_t)ntohl(*(const int32_t *)(p
)));
107 UNALIGNED_OK
static inline uint64_t
108 EXTRACT_BE_U_8(const void *p
)
110 return ((uint64_t)(((uint64_t)ntohl(*((const uint32_t *)(p
) + 0))) << 32 |
111 ((uint64_t)ntohl(*((const uint32_t *)(p
) + 1))) << 0));
115 UNALIGNED_OK
static inline int64_t
116 EXTRACT_BE_S_8(const void *p
)
118 return ((int64_t)(((int64_t)ntohl(*((const uint32_t *)(p
) + 0))) << 32 |
119 ((uint64_t)ntohl(*((const uint32_t *)(p
) + 1))) << 0));
124 * Extract an IPv4 address, which is in network byte order, and not
125 * necessarily aligned, and provide the result in host byte order.
127 UNALIGNED_OK
static inline uint32_t
128 EXTRACT_IPV4_TO_HOST_ORDER(const void *p
)
130 return ((uint32_t)ntohl(*(const uint32_t *)(p
)));
132 #elif ND_IS_AT_LEAST_GNUC_VERSION(2,0) && \
133 (defined(__alpha) || defined(__alpha__) || \
134 defined(__mips) || defined(__mips__))
136 * This is MIPS or Alpha, which don't natively handle unaligned loads,
137 * but which have instructions that can help when doing unaligned
138 * loads, and this is GCC 2.0 or later or a compiler that claims to
139 * be GCC 2.0 or later, which we assume that mean we have
140 * __attribute__((packed)), which we can use to convince the compiler
141 * to generate those instructions.
143 * Declare packed structures containing a uint16_t and a uint32_t,
144 * cast the pointer to point to one of those, and fetch through it;
145 * the GCC manual doesn't appear to explicitly say that
146 * __attribute__((packed)) causes the compiler to generate unaligned-safe
147 * code, but it apppears to do so.
149 * We do this in case the compiler can generate code using those
150 * instructions to do an unaligned load and pass stuff to "ntohs()" or
151 * "ntohl()", which might be better than than the code to fetch the
152 * bytes one at a time and assemble them. (That might not be the
153 * case on a little-endian platform, such as DEC's MIPS machines and
154 * Alpha machines, where "ntohs()" and "ntohl()" might not be done
157 * We do this only for specific architectures because, for example,
158 * at least some versions of GCC, when compiling for 64-bit SPARC,
159 * generate code that assumes alignment if we do this.
161 * XXX - add other architectures and compilers as possible and
164 * HP's C compiler, indicated by __HP_cc being defined, supports
165 * "#pragma unaligned N" in version A.05.50 and later, where "N"
166 * specifies a number of bytes at which the typedef on the next
167 * line is aligned, e.g.
170 * typedef uint16_t unaligned_uint16_t;
172 * to define unaligned_uint16_t as a 16-bit unaligned data type.
173 * This could be presumably used, in sufficiently recent versions of
174 * the compiler, with macros similar to those below. This would be
175 * useful only if that compiler could generate better code for PA-RISC
176 * or Itanium than would be generated by a bunch of shifts-and-ORs.
178 * DEC C, indicated by __DECC being defined, has, at least on Alpha,
179 * an __unaligned qualifier that can be applied to pointers to get the
180 * compiler to generate code that does unaligned loads and stores when
181 * dereferencing the pointer in question.
183 * XXX - what if the native C compiler doesn't support
184 * __attribute__((packed))? How can we get it to generate unaligned
185 * accesses for *specific* items?
189 } __attribute__((packed
)) unaligned_uint16_t
;
193 } __attribute__((packed
)) unaligned_int16_t
;
197 } __attribute__((packed
)) unaligned_uint32_t
;
201 } __attribute__((packed
)) unaligned_int32_t
;
203 UNALIGNED_OK
static inline uint16_t
204 EXTRACT_BE_U_2(const void *p
)
206 return ((uint16_t)ntohs(((const unaligned_uint16_t
*)(p
))->val
));
209 UNALIGNED_OK
static inline int16_t
210 EXTRACT_BE_S_2(const void *p
)
212 return ((int16_t)ntohs(((const unaligned_int16_t
*)(p
))->val
));
215 UNALIGNED_OK
static inline uint32_t
216 EXTRACT_BE_U_4(const void *p
)
218 return ((uint32_t)ntohl(((const unaligned_uint32_t
*)(p
))->val
));
221 UNALIGNED_OK
static inline int32_t
222 EXTRACT_BE_S_4(const void *p
)
224 return ((int32_t)ntohl(((const unaligned_int32_t
*)(p
))->val
));
227 UNALIGNED_OK
static inline uint64_t
228 EXTRACT_BE_U_8(const void *p
)
230 return ((uint64_t)(((uint64_t)ntohl(((const unaligned_uint32_t
*)(p
) + 0)->val
)) << 32 |
231 ((uint64_t)ntohl(((const unaligned_uint32_t
*)(p
) + 1)->val
)) << 0));
234 UNALIGNED_OK
static inline int64_t
235 EXTRACT_BE_S_8(const void *p
)
237 return ((int64_t)(((uint64_t)ntohl(((const unaligned_uint32_t
*)(p
) + 0)->val
)) << 32 |
238 ((uint64_t)ntohl(((const unaligned_uint32_t
*)(p
) + 1)->val
)) << 0));
242 * Extract an IPv4 address, which is in network byte order, and not
243 * necessarily aligned, and provide the result in host byte order.
245 UNALIGNED_OK
static inline uint32_t
246 EXTRACT_IPV4_TO_HOST_ORDER(const void *p
)
248 return ((uint32_t)ntohl(((const unaligned_uint32_t
*)(p
))->val
));
252 * This architecture doesn't natively support unaligned loads, and either
253 * this isn't a GCC-compatible compiler, we don't have __attribute__,
254 * or we do but we don't know of any better way with this instruction
255 * set to do unaligned loads, so do unaligned loads of big-endian
256 * quantities the hard way - fetch the bytes one at a time and
259 * XXX - ARM is a special case. ARMv1 through ARMv5 didn't suppory
260 * unaligned loads; ARMv6 and later support it *but* have a bit in
261 * the system control register that the OS can set and that causes
262 * unaligned loads to fault rather than succeeding.
264 * At least some OSes may set that flag, so we do *not* treat ARM
265 * as supporting unaligned loads. If your OS supports them on ARM,
266 * and you want to use them, please update the tests in the #if above
267 * to check for ARM *and* for your OS.
269 #define EXTRACT_BE_U_2(p) \
270 ((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \
271 ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0)))
272 #define EXTRACT_BE_S_2(p) \
273 ((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 0)) << 8) | \
274 ((uint16_t)(*((const uint8_t *)(p) + 1)) << 0)))
275 #define EXTRACT_BE_U_4(p) \
276 ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
277 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
278 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
279 ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
280 #define EXTRACT_BE_S_4(p) \
281 ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
282 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
283 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
284 ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
285 #define EXTRACT_BE_U_8(p) \
286 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \
287 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \
288 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \
289 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 32) | \
290 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 24) | \
291 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \
292 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \
293 ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0)))
294 #define EXTRACT_BE_S_8(p) \
295 ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 56) | \
296 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 48) | \
297 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 40) | \
298 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 32) | \
299 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 24) | \
300 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 16) | \
301 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 8) | \
302 ((uint64_t)(*((const uint8_t *)(p) + 7)) << 0)))
305 * Extract an IPv4 address, which is in network byte order, and not
306 * necessarily aligned, and provide the result in host byte order.
308 #define EXTRACT_IPV4_TO_HOST_ORDER(p) \
309 ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 24) | \
310 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 16) | \
311 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 8) | \
312 ((uint32_t)(*((const uint8_t *)(p) + 3)) << 0)))
313 #endif /* unaligned access checks */
316 * Extract numerical values in *host* byte order. (Some metadata
317 * headers are in the byte order of the host that wrote the file,
318 * and libpcap translate them to the byte order of the host
319 * reading the file. This means that if a program on that host
320 * reads with libpcap and writes to a new file, the new file will
321 * be written in the byte order of the host writing the file. Thus,
322 * the magic number in pcap files and byte-order magic in pcapng
323 * files can be used to determine the byte order in those metadata
326 * XXX - on platforms that can do unaligned accesses, just cast and
327 * dereference the pointer.
329 static inline uint16_t
330 EXTRACT_HE_U_2(const void *p
)
334 UNALIGNED_MEMCPY(&val
, p
, sizeof(uint16_t));
338 static inline int16_t
339 EXTRACT_HE_S_2(const void *p
)
343 UNALIGNED_MEMCPY(&val
, p
, sizeof(int16_t));
347 static inline uint32_t
348 EXTRACT_HE_U_4(const void *p
)
352 UNALIGNED_MEMCPY(&val
, p
, sizeof(uint32_t));
356 static inline int32_t
357 EXTRACT_HE_S_4(const void *p
)
361 UNALIGNED_MEMCPY(&val
, p
, sizeof(int32_t));
366 * Extract an IPv4 address, which is in network byte order, and which
367 * is not necessarily aligned on a 4-byte boundary, and provide the
368 * result in network byte order.
370 * This works the same way regardless of the host's byte order.
372 static inline uint32_t
373 EXTRACT_IPV4_TO_NETWORK_ORDER(const void *p
)
377 UNALIGNED_MEMCPY(&addr
, p
, sizeof(uint32_t));
382 * Non-power-of-2 sizes.
384 #define EXTRACT_BE_U_3(p) \
385 ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
386 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
387 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0)))
389 #define EXTRACT_BE_S_3(p) \
390 (((*((const uint8_t *)(p) + 0)) & 0x80) ? \
391 ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
392 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
393 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0))) : \
394 ((int32_t)(0xFF000000U | \
395 ((uint32_t)(*((const uint8_t *)(p) + 0)) << 16) | \
396 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
397 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 0))))
399 #define EXTRACT_BE_U_5(p) \
400 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
401 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
402 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
403 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
404 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0)))
406 #define EXTRACT_BE_S_5(p) \
407 (((*((const uint8_t *)(p) + 0)) & 0x80) ? \
408 ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
409 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
410 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
411 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
412 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0))) : \
413 ((int64_t)(INT64_T_CONSTANT(0xFFFFFF0000000000U) | \
414 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 32) | \
415 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 24) | \
416 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
417 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 8) | \
418 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 0))))
420 #define EXTRACT_BE_U_6(p) \
421 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
422 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
423 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
424 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
425 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
426 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0)))
428 #define EXTRACT_BE_S_6(p) \
429 (((*((const uint8_t *)(p) + 0)) & 0x80) ? \
430 ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
431 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
432 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
433 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
434 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
435 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0))) : \
436 ((int64_t)(INT64_T_CONSTANT(0xFFFFFFFF00000000U) | \
437 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 40) | \
438 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 32) | \
439 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 24) | \
440 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 16) | \
441 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 8) | \
442 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 0))))
444 #define EXTRACT_BE_U_7(p) \
445 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
446 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
447 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
448 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
449 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
450 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
451 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0)))
453 #define EXTRACT_BE_S_7(p) \
454 (((*((const uint8_t *)(p) + 0)) & 0x80) ? \
455 ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
456 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
457 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
458 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
459 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
460 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
461 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0))) : \
462 ((int64_t)(INT64_T_CONSTANT(0xFFFFFFFFFF000000U) | \
463 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 48) | \
464 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 40) | \
465 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 32) | \
466 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
467 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 16) | \
468 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 8) | \
469 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 0))))
472 * Macros to extract possibly-unaligned little-endian integral values.
473 * XXX - do loads on little-endian machines that support unaligned loads?
475 #define EXTRACT_LE_U_2(p) \
476 ((uint16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \
477 ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0)))
478 #define EXTRACT_LE_S_2(p) \
479 ((int16_t)(((uint16_t)(*((const uint8_t *)(p) + 1)) << 8) | \
480 ((uint16_t)(*((const uint8_t *)(p) + 0)) << 0)))
481 #define EXTRACT_LE_U_4(p) \
482 ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \
483 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
484 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
485 ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
486 #define EXTRACT_LE_S_4(p) \
487 ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 3)) << 24) | \
488 ((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
489 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
490 ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
491 #define EXTRACT_LE_U_8(p) \
492 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \
493 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \
494 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
495 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
496 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
497 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
498 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
499 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
500 #define EXTRACT_LE_S_8(p) \
501 ((int64_t)(((uint64_t)(*((const uint8_t *)(p) + 7)) << 56) | \
502 ((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \
503 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
504 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
505 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
506 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
507 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
508 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
511 * Non-power-of-2 sizes.
514 #define EXTRACT_LE_U_3(p) \
515 ((uint32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
516 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
517 ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
518 #define EXTRACT_LE_S_3(p) \
519 ((int32_t)(((uint32_t)(*((const uint8_t *)(p) + 2)) << 16) | \
520 ((uint32_t)(*((const uint8_t *)(p) + 1)) << 8) | \
521 ((uint32_t)(*((const uint8_t *)(p) + 0)) << 0)))
522 #define EXTRACT_LE_U_5(p) \
523 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
524 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
525 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
526 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
527 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
528 #define EXTRACT_LE_U_6(p) \
529 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
530 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
531 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
532 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
533 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
534 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
535 #define EXTRACT_LE_U_7(p) \
536 ((uint64_t)(((uint64_t)(*((const uint8_t *)(p) + 6)) << 48) | \
537 ((uint64_t)(*((const uint8_t *)(p) + 5)) << 40) | \
538 ((uint64_t)(*((const uint8_t *)(p) + 4)) << 32) | \
539 ((uint64_t)(*((const uint8_t *)(p) + 3)) << 24) | \
540 ((uint64_t)(*((const uint8_t *)(p) + 2)) << 16) | \
541 ((uint64_t)(*((const uint8_t *)(p) + 1)) << 8) | \
542 ((uint64_t)(*((const uint8_t *)(p) + 0)) << 0)))
545 * Macros to check the presence of the values in question.
547 #define ND_TTEST_1(p) ND_TTEST_LEN((p), 1)
548 #define ND_TCHECK_1(p) ND_TCHECK_LEN((p), 1)
550 #define ND_TTEST_2(p) ND_TTEST_LEN((p), 2)
551 #define ND_TCHECK_2(p) ND_TCHECK_LEN((p), 2)
553 #define ND_TTEST_3(p) ND_TTEST_LEN((p), 3)
554 #define ND_TCHECK_3(p) ND_TCHECK_LEN((p), 3)
556 #define ND_TTEST_4(p) ND_TTEST_LEN((p), 4)
557 #define ND_TCHECK_4(p) ND_TCHECK_LEN((p), 4)
559 #define ND_TTEST_5(p) ND_TTEST_LEN((p), 5)
560 #define ND_TCHECK_5(p) ND_TCHECK_LEN((p), 5)
562 #define ND_TTEST_6(p) ND_TTEST_LEN((p), 6)
563 #define ND_TCHECK_6(p) ND_TCHECK_LEN((p), 6)
565 #define ND_TTEST_7(p) ND_TTEST_LEN((p), 7)
566 #define ND_TCHECK_7(p) ND_TCHECK_LEN((p), 7)
568 #define ND_TTEST_8(p) ND_TTEST_LEN((p), 8)
569 #define ND_TCHECK_8(p) ND_TCHECK_LEN((p), 8)
571 #define ND_TTEST_16(p) ND_TTEST_LEN((p), 16)
572 #define ND_TCHECK_16(p) ND_TCHECK_LEN((p), 16)