]>
The Tcpdump Group git mirrors - tcpdump/blob - extract.h
2 * Copyright (c) 1992, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp $ (LBL)
25 * Macros to extract possibly-unaligned big-endian integral values.
29 * The processor doesn't natively handle unaligned loads.
31 #ifdef HAVE___ATTRIBUTE__
33 * We have __attribute__; we assume that means we have __attribute__((packed)).
34 * Declare packed structures containing a u_int16_t and a u_int32_t,
35 * cast the pointer to point to one of those, and fetch through it;
36 * the GCC manual doesn't appear to explicitly say that
37 * __attribute__((packed)) causes the compiler to generate unaligned-safe
38 * code, but it apppears to do so.
40 * We do this in case the compiler can generate, for this instruction set,
41 * better code to do an unaligned load and pass stuff to "ntohs()" or
42 * "ntohl()" than the code to fetch the bytes one at a time and
43 * assemble them. (That might not be the case on a little-endian platform,
44 * where "ntohs()" and "ntohl()" might not be done inline.)
48 } __attribute__((packed
)) unaligned_u_int16_t
;
52 } __attribute__((packed
)) unaligned_u_int32_t
;
54 static inline u_int16_t
55 EXTRACT_16BITS(const void *p
)
57 return ((u_int16_t
)ntohs(((const unaligned_u_int16_t
*)(p
))->val
));
60 static inline u_int32_t
61 EXTRACT_32BITS(const void *p
)
63 return ((u_int32_t
)ntohl(((const unaligned_u_int32_t
*)(p
))->val
));
66 static inline u_int64_t
67 EXTRACT_64BITS(const void *p
)
69 return ((u_int64_t
)(((u_int64_t
)ntohl(((const unaligned_u_int32_t
*)(p
) + 0)->val
)) << 32 | \
70 ((u_int64_t
)ntohl(((const unaligned_u_int32_t
*)(p
) + 1)->val
)) << 0));
74 #else /* HAVE___ATTRIBUTE__ */
76 * We don't have __attribute__, so do unaligned loads of big-endian
77 * quantities the hard way - fetch the bytes one at a time and
80 #define EXTRACT_16BITS(p) \
81 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
82 (u_int16_t)*((const u_int8_t *)(p) + 1)))
83 #define EXTRACT_32BITS(p) \
84 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
85 (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
86 (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
87 (u_int32_t)*((const u_int8_t *)(p) + 3)))
88 #define EXTRACT_64BITS(p) \
89 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
90 (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
91 (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
92 (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
93 (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
94 (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
95 (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
96 (u_int64_t)*((const u_int8_t *)(p) + 7)))
97 #endif /* HAVE___ATTRIBUTE__ */
100 * The processor natively handles unaligned loads, so we can just
101 * cast the pointer and fetch through it.
103 static inline u_int16_t
104 EXTRACT_16BITS(const void *p
)
106 return ((u_int16_t
)ntohs(*(const u_int16_t
*)(p
)));
109 static inline u_int32_t
110 EXTRACT_32BITS(const void *p
)
112 return ((u_int32_t
)ntohl(*(const u_int32_t
*)(p
)));
115 static inline u_int64_t
116 EXTRACT_64BITS(const void *p
)
118 return ((u_int64_t
)(((u_int64_t
)ntohl(*((const u_int32_t
*)(p
) + 0))) << 32 | \
119 ((u_int64_t
)ntohl(*((const u_int32_t
*)(p
) + 1))) << 0));
123 #endif /* LBL_ALIGN */
125 #define EXTRACT_24BITS(p) \
126 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
127 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
128 (u_int32_t)*((const u_int8_t *)(p) + 2)))
131 * Macros to extract possibly-unaligned little-endian integral values.
132 * XXX - do loads on little-endian machines that support unaligned loads?
134 #define EXTRACT_LE_8BITS(p) (*(p))
135 #define EXTRACT_LE_16BITS(p) \
136 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
137 (u_int16_t)*((const u_int8_t *)(p) + 0)))
138 #define EXTRACT_LE_32BITS(p) \
139 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
140 (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
141 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
142 (u_int32_t)*((const u_int8_t *)(p) + 0)))
143 #define EXTRACT_LE_24BITS(p) \
144 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
145 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
146 (u_int32_t)*((const u_int8_t *)(p) + 0)))
147 #define EXTRACT_LE_64BITS(p) \
148 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \
149 (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \
150 (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \
151 (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \
152 (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \
153 (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \
154 (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \
155 (u_int64_t)*((const u_int8_t *)(p) + 0)))