+ ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val))
+#define EXTRACT_64BITS(p) \
+ ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \
+ ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0))
+
+#else /* HAVE___ATTRIBUTE__ */
+/*
+ * We don't have __attribute__, so do unaligned loads of big-endian
+ * quantities the hard way - fetch the bytes one at a time and
+ * assemble them.
+ */
+#define EXTRACT_16BITS(p) \
+ ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
+ (u_int16_t)*((const u_int8_t *)(p) + 1)))
+#define EXTRACT_32BITS(p) \
+ ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
+ (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
+ (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
+ (u_int32_t)*((const u_int8_t *)(p) + 3)))
+#define EXTRACT_64BITS(p) \
+ ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
+ (u_int64_t)*((const u_int8_t *)(p) + 7)))
+#endif /* HAVE___ATTRIBUTE__ */
+#else /* LBL_ALIGN */
+/*
+ * The processor natively handles unaligned loads, so we can just
+ * cast the pointer and fetch through it.
+ */