]>
The Tcpdump Group git mirrors - tcpdump/blob - extract.h
2 * Copyright (c) 1992, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 * Macros to extract possibly-unaligned big-endian integral values.
27 * The processor doesn't natively handle unaligned loads.
29 #ifdef HAVE___ATTRIBUTE__
31 * We have __attribute__; we assume that means we have __attribute__((packed)).
32 * Declare packed structures containing a u_int16_t and a u_int32_t,
33 * cast the pointer to point to one of those, and fetch through it;
34 * the GCC manual doesn't appear to explicitly say that
35 * __attribute__((packed)) causes the compiler to generate unaligned-safe
36 * code, but it apppears to do so.
38 * We do this in case the compiler can generate, for this instruction set,
39 * better code to do an unaligned load and pass stuff to "ntohs()" or
40 * "ntohl()" than the code to fetch the bytes one at a time and
41 * assemble them. (That might not be the case on a little-endian platform,
42 * where "ntohs()" and "ntohl()" might not be done inline.)
46 } __attribute__((packed
)) unaligned_u_int16_t
;
50 } __attribute__((packed
)) unaligned_u_int32_t
;
52 static inline u_int16_t
53 EXTRACT_16BITS(const void *p
)
55 return ((u_int16_t
)ntohs(((const unaligned_u_int16_t
*)(p
))->val
));
58 static inline u_int32_t
59 EXTRACT_32BITS(const void *p
)
61 return ((u_int32_t
)ntohl(((const unaligned_u_int32_t
*)(p
))->val
));
64 static inline u_int64_t
65 EXTRACT_64BITS(const void *p
)
67 return ((u_int64_t
)(((u_int64_t
)ntohl(((const unaligned_u_int32_t
*)(p
) + 0)->val
)) << 32 | \
68 ((u_int64_t
)ntohl(((const unaligned_u_int32_t
*)(p
) + 1)->val
)) << 0));
72 #else /* HAVE___ATTRIBUTE__ */
74 * We don't have __attribute__, so do unaligned loads of big-endian
75 * quantities the hard way - fetch the bytes one at a time and
78 #define EXTRACT_16BITS(p) \
79 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
80 (u_int16_t)*((const u_int8_t *)(p) + 1)))
81 #define EXTRACT_32BITS(p) \
82 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
83 (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
84 (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
85 (u_int32_t)*((const u_int8_t *)(p) + 3)))
86 #define EXTRACT_64BITS(p) \
87 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
88 (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
89 (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
90 (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
91 (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
92 (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
93 (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
94 (u_int64_t)*((const u_int8_t *)(p) + 7)))
95 #endif /* HAVE___ATTRIBUTE__ */
98 * The processor natively handles unaligned loads, so we can just
99 * cast the pointer and fetch through it.
101 static inline u_int16_t
102 EXTRACT_16BITS(const void *p
)
104 return ((u_int16_t
)ntohs(*(const u_int16_t
*)(p
)));
107 static inline u_int32_t
108 EXTRACT_32BITS(const void *p
)
110 return ((u_int32_t
)ntohl(*(const u_int32_t
*)(p
)));
113 static inline u_int64_t
114 EXTRACT_64BITS(const void *p
)
116 return ((u_int64_t
)(((u_int64_t
)ntohl(*((const u_int32_t
*)(p
) + 0))) << 32 | \
117 ((u_int64_t
)ntohl(*((const u_int32_t
*)(p
) + 1))) << 0));
121 #endif /* LBL_ALIGN */
123 #define EXTRACT_24BITS(p) \
124 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
125 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
126 (u_int32_t)*((const u_int8_t *)(p) + 2)))
129 * Macros to extract possibly-unaligned little-endian integral values.
130 * XXX - do loads on little-endian machines that support unaligned loads?
132 #define EXTRACT_LE_8BITS(p) (*(p))
133 #define EXTRACT_LE_16BITS(p) \
134 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
135 (u_int16_t)*((const u_int8_t *)(p) + 0)))
136 #define EXTRACT_LE_32BITS(p) \
137 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
138 (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
139 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
140 (u_int32_t)*((const u_int8_t *)(p) + 0)))
141 #define EXTRACT_LE_24BITS(p) \
142 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
143 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
144 (u_int32_t)*((const u_int8_t *)(p) + 0)))
145 #define EXTRACT_LE_64BITS(p) \
146 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \
147 (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \
148 (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \
149 (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \
150 (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \
151 (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \
152 (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \
153 (u_int64_t)*((const u_int8_t *)(p) + 0)))