Copied from http://code.google.com/p/lz4/
Code is licensed under a 2-clause BSD license and copyrighted by Yann Collet.
override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
LIBS += $(PTHREAD_LIBS)
-SUBDIRS = snappy
+SUBDIRS = snappy lz4
include $(top_srcdir)/src/backend/common.mk
--- /dev/null
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for common/lz4
+#
+# IDENTIFICATION
+# src/common/lz4/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/common/lz4
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+override CPPFLAGS := -Wno-missing-declarations -Wno-missing-prototypes $(CPPFLAGS)
+override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
+
+OBJS = lz4.o xxhash.o
+OBJS_SRV = $(OBJS:%.o=%_srv.o)
+
+include $(top_srcdir)/src/backend/common.mk
+
+%_srv.o: %.c %.o
+ $(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
+
+clean distclean maintainer-clean:
+ rm -f $(OBJS_SRV)
+
+all: $(OBJS_SRV)
--- /dev/null
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ - LZ4 source repository : http://code.google.com/p/lz4/
+*/
+
+/*
+Note : this source file requires "lz4_encoder.h"
+*/
+
+//**************************************
+// Tuning parameters
+//**************************************
+// MEMORY_USAGE :
+// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+// Increasing memory usage improves compression ratio
+// Reduced memory usage can improve speed, due to cache effect
+// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+#define MEMORY_USAGE 14
+
+// HEAPMODE :
+// Select how default compression function will allocate memory for its hash table,
+// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
+// Default allocation strategy is to use stack (HEAPMODE 0)
+// Note : explicit functions *_stack* and *_heap* are unaffected by this setting
+#define HEAPMODE 0
+
+// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
+// This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
+// You can set this option to 1 in situations where data will remain within closed environment
+// This option is useless on Little_Endian CPU (such as x86)
+//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
+
+
+
+//**************************************
+// CPU Feature Detection
+//**************************************
+// 32 or 64 bits ?
+#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
+ || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
+ || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
+ || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
+# define LZ4_ARCH64 1
+#else
+# define LZ4_ARCH64 0
+#endif
+
+// Little Endian or Big Endian ?
+// Overwrite the #define below if you know your architecture endianess
+#if defined (__GLIBC__)
+# include <endian.h>
+# if (__BYTE_ORDER == __BIG_ENDIAN)
+# define LZ4_BIG_ENDIAN 1
+# endif
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
+# define LZ4_BIG_ENDIAN 1
+#elif defined(__sparc) || defined(__sparc__) \
+ || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+ || defined(__hpux) || defined(__hppa) \
+ || defined(_MIPSEB) || defined(__s390__)
+# define LZ4_BIG_ENDIAN 1
+#else
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
+#endif
+
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+#if defined(__ARM_FEATURE_UNALIGNED)
+# define LZ4_FORCE_UNALIGNED_ACCESS 1
+#endif
+
+// Define this parameter if your target system or compiler does not support hardware bit count
+#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
+# define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+
+//**************************************
+// Compiler Options
+//**************************************
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
+/* "restrict" is a known keyword */
+#else
+# define restrict // Disable restrict
+#endif
+
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifdef _MSC_VER // Visual Studio
+# include <intrin.h> // For Visual 2005
+# if LZ4_ARCH64 // 64-bit
+# pragma intrinsic(_BitScanForward64) // For Visual 2005
+# pragma intrinsic(_BitScanReverse64) // For Visual 2005
+# else
+# pragma intrinsic(_BitScanForward) // For Visual 2005
+# pragma intrinsic(_BitScanReverse) // For Visual 2005
+# endif
+# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
+#endif
+
+#ifdef _MSC_VER
+# define lz4_bswap16(x) _byteswap_ushort(x)
+#else
+# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+#endif
+
+#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+#else
+# define expect(expr,value) (expr)
+#endif
+
+#define likely(expr) expect((expr) != 0, 1)
+#define unlikely(expr) expect((expr) != 0, 0)
+
+
+//**************************************
+// Includes
+//**************************************
+#include <stdlib.h> // for malloc
+#include <string.h> // for memset
+#include "common/lz4/lz4.h"
+
+
+//**************************************
+// Basic Types
+//**************************************
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
+# define _PACKED __attribute__ ((packed))
+#else
+# define _PACKED
+#endif
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+# pragma pack(push, 1)
+#endif
+
+typedef struct _U16_S { U16 v; } _PACKED U16_S;
+typedef struct _U32_S { U32 v; } _PACKED U32_S;
+typedef struct _U64_S { U64 v; } _PACKED U64_S;
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+# pragma pack(pop)
+#endif
+
+#define A64(x) (((U64_S *)(x))->v)
+#define A32(x) (((U32_S *)(x))->v)
+#define A16(x) (((U16_S *)(x))->v)
+
+
+//**************************************
+// Constants
+//**************************************
+#define HASHTABLESIZE (1 << MEMORY_USAGE)
+
+#define MINMATCH 4
+
+#define COPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (COPYLENGTH+MINMATCH)
+#define MINLENGTH (MFLIMIT+1)
+
+#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
+#define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+//**************************************
+// Architecture-specific macros
+//**************************************
+#if LZ4_ARCH64 // 64-bit
+# define STEPSIZE 8
+# define UARCH U64
+# define AARCH A64
+# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
+# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
+# define HTYPE U32
+# define INITBASE(base) const BYTE* const base = ip
+#else // 32-bit
+# define STEPSIZE 4
+# define UARCH U32
+# define AARCH A32
+# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
+# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
+# define LZ4_SECURECOPY LZ4_WILDCOPY
+# define HTYPE const BYTE*
+# define INITBASE(base) const int base = 0
+#endif
+
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
+# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
+#else // Little Endian
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
+# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
+#endif
+
+
+//**************************************
+// Macros
+//**************************************
+#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
+#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
+
+
+//****************************
+// Private functions
+//****************************
+#if LZ4_ARCH64
+
+static inline int LZ4_NbCommonBytes (register U64 val)
+{
+#if defined(LZ4_BIG_ENDIAN)
+ #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (int)(r>>3);
+ #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clzll(val) >> 3);
+ #else
+ int r;
+ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+ #endif
+#else
+ #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64( &r, val );
+ return (int)(r>>3);
+ #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctzll(val) >> 3);
+ #else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
+ #endif
+#endif
+}
+
+#else
+
+static inline int LZ4_NbCommonBytes (register U32 val)
+{
+#if defined(LZ4_BIG_ENDIAN)
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse( &r, val );
+ return (int)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clz(val) >> 3);
+# else
+ int r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+#else
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward( &r, val );
+ return (int)(r>>3);
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctz(val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+#endif
+}
+
+#endif
+
+
+
+//******************************
+// Compression functions
+//******************************
+
+/*
+int LZ4_compress_stack(
+ const char* source,
+ char* dest,
+ int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_stack
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress_stack_limitedOutput(
+ const char* source,
+ char* dest,
+ int inputSize,
+ int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_stack_limitedOutput
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_stack(
+ const char* source,
+ char* dest,
+ int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+This function compresses better than LZ4_compress_stack(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest', or 0 if compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack
+#define COMPRESS_64K
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_stack_limitedOutput(
+ const char* source,
+ char* dest,
+ int inputSize,
+ int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
+
+
+/*
+void* LZ4_createHeapMemory();
+int LZ4_freeHeapMemory(void* ctx);
+
+Used to allocate and free hashTable memory
+to be used by the LZ4_compress_heap* family of functions.
+LZ4_createHeapMemory() returns NULL is memory allocation fails.
+*/
+void* LZ4_create() { return malloc(HASHTABLESIZE); }
+int LZ4_free(void* ctx) { free(ctx); return 0; }
+
+
+/*
+int LZ4_compress_heap(
+ void* ctx,
+ const char* source,
+ char* dest,
+ int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_heap
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress_heap_limitedOutput(
+ void* ctx,
+ const char* source,
+ char* dest,
+ int inputSize,
+ int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_heap_limitedOutput
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_heap(
+ void* ctx,
+ const char* source,
+ char* dest,
+ int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap
+#define COMPRESS_64K
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_heap_limitedOutput(
+ void* ctx,
+ const char* source,
+ char* dest,
+ int inputSize,
+ int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+int LZ4_compress(const char* source, char* dest, int inputSize)
+{
+#if HEAPMODE
+ void* ctx = LZ4_create();
+ int result;
+ if (ctx == NULL) return 0; // Failed allocation => compression not done
+ if (inputSize < LZ4_64KLIMIT)
+ result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
+ else result = LZ4_compress_heap(ctx, source, dest, inputSize);
+ LZ4_free(ctx);
+ return result;
+#else
+ if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
+ return LZ4_compress_stack(source, dest, inputSize);
+#endif
+}
+
+
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+#if HEAPMODE
+ void* ctx = LZ4_create();
+ int result;
+ if (ctx == NULL) return 0; // Failed allocation => compression not done
+ if (inputSize < LZ4_64KLIMIT)
+ result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+ else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+ LZ4_free(ctx);
+ return result;
+#else
+ if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+ return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+#endif
+}
+
+
+//****************************
+// Decompression functions
+//****************************
+
+typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } end_directive;
+typedef enum { full = 0, partial = 1 } exit_directive;
+
+
+// This generic decompression function cover all use cases.
+// It shall be instanciated several times, using different sets of directives
+// Note that it is essential this generic function is really inlined,
+// in order to remove useless branches during compilation optimisation.
+static inline int LZ4_decompress_generic(
+ const char* source,
+ char* dest,
+ int inputSize, //
+ int outputSize, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
+
+ int endOnInput, // endOnOutputSize, endOnInputSize
+ int prefix64k, // noPrefix, withPrefix
+ int partialDecoding, // full, partial
+ int targetOutputSize // only used if partialDecoding==partial
+ )
+{
+ // Local Variables
+ const BYTE* restrict ip = (const BYTE*) source;
+ const BYTE* ref;
+ const BYTE* const iend = ip + inputSize;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
+ BYTE* oexit = op + targetOutputSize;
+
+ size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+ size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
+#endif
+
+
+ // Special case
+ if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too large, better decode everything
+ if unlikely(outputSize==0) goto _output_error; // Empty output buffer
+
+
+ // Main Loop
+ while (1)
+ {
+ unsigned token;
+ size_t length;
+
+ // get runlength
+ token = *ip++;
+ if ((length=(token>>ML_BITS)) == RUN_MASK)
+ {
+ unsigned s=255;
+ while (((endOnInput)?ip<iend:1) && (s==255))
+ {
+ s = *ip++;
+ length += s;
+ }
+ }
+
+ // copy literals
+ cpy = op+length;
+ if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
+ || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
+ {
+ if (partialDecoding)
+ {
+ if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer
+ if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer
+ }
+ else
+ {
+ if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there, due to parsing restrictions
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : not enough place for another match (min 4) + 5 literals
+ }
+ memcpy(op, ip, length);
+ ip += length;
+ op += length;
+ break; // Necessarily EOF, due to parsing restrictions
+ }
+ LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
+
+ // get offset
+ LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
+ if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer
+
+ // get matchlength
+ if ((length=(token&ML_MASK)) == ML_MASK)
+ {
+ while (endOnInput ? ip<iend-(LASTLITERALS+1) : 1) // A minimum nb of input bytes must remain for LASTLITERALS + token
+ {
+ unsigned s = *ip++;
+ length += s;
+ if (s==255) continue;
+ break;
+ }
+ }
+
+ // copy repeated sequence
+ if unlikely((op-ref)<STEPSIZE)
+ {
+#if LZ4_ARCH64
+ size_t dec64 = dec64table[op-ref];
+#else
+ const size_t dec64 = 0;
+#endif
+ op[0] = ref[0];
+ op[1] = ref[1];
+ op[2] = ref[2];
+ op[3] = ref[3];
+ op += 4, ref += 4; ref -= dec32table[op-ref];
+ A32(op) = A32(ref);
+ op += STEPSIZE-4; ref -= dec64;
+ } else { LZ4_COPYSTEP(ref,op); }
+ cpy = op + length - (STEPSIZE-4);
+
+ if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
+ {
+ if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals
+ LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
+ while(op<cpy) *op++=*ref++;
+ op=cpy;
+ continue;
+ }
+ LZ4_WILDCOPY(ref, op, cpy);
+ op=cpy; // correction
+ }
+
+ // end of decoding
+ if (endOnInput)
+ return (int) (((char*)op)-dest); // Nb of output bytes decoded
+ else
+ return (int) (((char*)ip)-source); // Nb of input bytes read
+
+ // Overflow error detected
+_output_error:
+ return (int) (-(((char*)ip)-source))-1;
+}
+
+
+int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
+}
+
+int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
+}
+
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
+}
--- /dev/null
+/*
+ LZ4 Encoder - Part of LZ4 compression algorithm
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ - LZ4 source repository : http://code.google.com/p/lz4/
+*/
+
+/* lz4_encoder.h must be included into lz4.c
+ The objective of this file is to create a single LZ4 compression function source
+ which will be instanciated multiple times with minor variations
+ depending on a set of #define.
+*/
+
+
+
+//****************************
+// Check required defines
+//****************************
+
+#ifndef FUNCTION_NAME
+# error "FUNTION_NAME is not defined"
+#endif
+
+
+//****************************
+// Local definitions
+//****************************
+
+#ifdef COMPRESS_64K
+# define HASHLOG (MEMORY_USAGE-1)
+# define CURRENT_H_TYPE U16
+# define CURRENTBASE(base) const BYTE* const base = ip
+#else
+# define HASHLOG (MEMORY_USAGE-2)
+# define CURRENT_H_TYPE HTYPE
+# define CURRENTBASE(base) INITBASE(base)
+#endif
+
+#define HASHTABLE_NBCELLS (1U<<HASHLOG)
+#define LZ4_HASH(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG))
+#define LZ4_HASHVALUE(p) LZ4_HASH(A32(p))
+
+
+
+//****************************
+// Function code
+//****************************
+
+int FUNCTION_NAME(
+#ifdef USE_HEAPMEMORY
+ void* ctx,
+#endif
+ const char* source,
+ char* dest,
+ int inputSize
+#ifdef LIMITED_OUTPUT
+ ,int maxOutputSize
+#endif
+ )
+{
+#ifdef USE_HEAPMEMORY
+ CURRENT_H_TYPE* HashTable = (CURRENT_H_TYPE*)ctx;
+#else
+ CURRENT_H_TYPE HashTable[HASHTABLE_NBCELLS] = {0};
+#endif
+
+ const BYTE* ip = (BYTE*) source;
+ CURRENTBASE(base);
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+#define matchlimit (iend - LASTLITERALS)
+
+ BYTE* op = (BYTE*) dest;
+#ifdef LIMITED_OUTPUT
+ BYTE* const oend = op + maxOutputSize;
+#endif
+
+ int length;
+ const int skipStrength = SKIPSTRENGTH;
+ U32 forwardH;
+
+
+ // Init
+ if (inputSize<MINLENGTH) goto _last_literals;
+#ifdef COMPRESS_64K
+ if (inputSize>LZ4_64KLIMIT) return 0; // Size too large (not within 64K limit)
+#endif
+#ifdef USE_HEAPMEMORY
+ memset((void*)HashTable, 0, HASHTABLESIZE);
+#endif
+
+ // First Byte
+ HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base);
+ ip++; forwardH = LZ4_HASHVALUE(ip);
+
+ // Main Loop
+ for ( ; ; )
+ {
+ int findMatchAttempts = (1U << skipStrength) + 3;
+ const BYTE* forwardIp = ip;
+ const BYTE* ref;
+ BYTE* token;
+
+ // Find a match
+ do {
+ U32 h = forwardH;
+ int step = findMatchAttempts++ >> skipStrength;
+ ip = forwardIp;
+ forwardIp = ip + step;
+
+ if unlikely(forwardIp > mflimit) { goto _last_literals; }
+
+ forwardH = LZ4_HASHVALUE(forwardIp);
+ ref = base + HashTable[h];
+ HashTable[h] = (CURRENT_H_TYPE)(ip - base);
+
+ } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
+
+ // Catch up
+ while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
+
+ // Encode Literal length
+ length = (int)(ip - anchor);
+ token = op++;
+#ifdef LIMITED_OUTPUT
+ if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit
+#endif
+ if (length>=(int)RUN_MASK)
+ {
+ int len = length-RUN_MASK;
+ *token=(RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(length<<ML_BITS);
+
+ // Copy Literals
+ LZ4_BLINDCOPY(anchor, op, length);
+
+_next_match:
+ // Encode Offset
+ LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
+
+ // Start Counting
+ ip+=MINMATCH; ref+=MINMATCH; // MinMatch already verified
+ anchor = ip;
+ while likely(ip<matchlimit-(STEPSIZE-1))
+ {
+ UARCH diff = AARCH(ref) ^ AARCH(ip);
+ if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
+ ip += LZ4_NbCommonBytes(diff);
+ goto _endCount;
+ }
+ if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
+ if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
+ if ((ip<matchlimit) && (*ref == *ip)) ip++;
+_endCount:
+
+ // Encode MatchLength
+ length = (int)(ip - anchor);
+#ifdef LIMITED_OUTPUT
+ if unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit
+#endif
+ if (length>=(int)ML_MASK)
+ {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length-=255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ }
+ else *token += (BYTE)length;
+
+ // Test end of chunk
+ if (ip > mflimit) { anchor = ip; break; }
+
+ // Fill table
+ HashTable[LZ4_HASHVALUE(ip-2)] = (CURRENT_H_TYPE)(ip - 2 - base);
+
+ // Test next position
+ ref = base + HashTable[LZ4_HASHVALUE(ip)];
+ HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base);
+ if ((ref >= ip - MAX_DISTANCE) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
+
+ // Prepare next loop
+ anchor = ip++;
+ forwardH = LZ4_HASHVALUE(ip);
+ }
+
+_last_literals:
+ // Encode Last Literals
+ {
+ int lastRun = (int)(iend - anchor);
+#ifdef LIMITED_OUTPUT
+ if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0; // Check output limit
+#endif
+ if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
+ else *op++ = (BYTE)(lastRun<<ML_BITS);
+ memcpy(op, anchor, iend - anchor);
+ op += iend-anchor;
+ }
+
+ // End
+ return (int) (((char*)op)-dest);
+}
+
+
+
+//****************************
+// Clean defines
+//****************************
+
+// Required defines
+#undef FUNCTION_NAME
+
+// Locally Generated
+#undef HASHLOG
+#undef HASHTABLE_NBCELLS
+#undef LZ4_HASH
+#undef LZ4_HASHVALUE
+#undef CURRENT_H_TYPE
+#undef CURRENTBASE
+
+// Optional defines
+#ifdef LIMITED_OUTPUT
+#undef LIMITED_OUTPUT
+#endif
+
+#ifdef USE_HEAPMEMORY
+#undef USE_HEAPMEMORY
+#endif
--- /dev/null
+/*
+xxHash - Fast Hash algorithm
+Copyright (C) 2012-2013, Yann Collet.
+BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You can contact the author at :
+- xxHash source repository : http://code.google.com/p/xxhash/
+*/
+
+
+
+//**************************************
+// Tuning parameters
+//**************************************
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
+// You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
+#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_USE_UNALIGNED_ACCESS 1
+#endif
+
+// XXH_ACCEPT_NULL_INPUT_POINTER :
+// If the input pointer is a null pointer, xxHash default behavior is to crash, since it is a bad input.
+// If this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+// This option has a very small performance cost (only measurable on small inputs).
+// By default, this option is disabled. To enable it, uncomment below define :
+//#define XXH_ACCEPT_NULL_INPUT_POINTER 1
+
+// XXH_FORCE_NATIVE_FORMAT :
+// By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+// Results are therefore identical for little-endian and big-endian CPU.
+// This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+// Should endian-independance be of no importance for your application, you may uncomment the #define below.
+// It will improve speed for Big-endian CPU.
+// This option has no impact on Little_Endian CPU.
+//#define XXH_FORCE_NATIVE_FORMAT 1
+
+
+//**************************************
+// Compiler Options
+//**************************************
+#if defined(_MSC_VER) && !defined(__cplusplus) // Visual Studio
+# define inline __inline // Visual C is not C99, but supports some kind of inline
+#endif
+
+
+//**************************************
+// Includes & Memory related functions
+//**************************************
+#include "xxhash.h"
+// Modify the local functions below should you wish to use some other memory related routines
+// for malloc(), free()
+#include <stdlib.h>
+static inline void* XXH_malloc(size_t s) { return malloc(s); }
+static inline void XXH_free (void* p) { free(p); }
+// for memcpy()
+#include <string.h>
+static inline void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+
+//**************************************
+// CPU Feature Detection
+//**************************************
+// Little Endian or Big Endian ?
+// You can overwrite the #define below if you know your architecture endianess
+#if defined(XXH_FORCE_NATIVE_FORMAT) && (XXH_FORCE_NATIVE_FORMAT==1)
+// Force native format. The result will be endian dependant.
+# define XXH_BIG_ENDIAN 0
+#elif defined (__GLIBC__)
+# include <endian.h>
+# if (__BYTE_ORDER == __BIG_ENDIAN)
+# define XXH_BIG_ENDIAN 1
+# endif
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
+# define XXH_BIG_ENDIAN 1
+#elif defined(__sparc) || defined(__sparc__) \
+ || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+ || defined(__hpux) || defined(__hppa) \
+ || defined(_MIPSEB) || defined(__s390__)
+# define XXH_BIG_ENDIAN 1
+#endif
+
+#if !defined(XXH_BIG_ENDIAN)
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
+# define XXH_BIG_ENDIAN 0
+#endif
+
+
+//**************************************
+// Basic Types
+//**************************************
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+#if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
+# define _PACKED __attribute__ ((packed))
+#else
+# define _PACKED
+#endif
+
+#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+# pragma pack(push, 1)
+#endif
+
+typedef struct _U32_S { U32 v; } _PACKED U32_S;
+
+#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+# pragma pack(pop)
+#endif
+
+#define A32(x) (((U32_S *)(x))->v)
+
+
+//***************************************
+// Compiler-specific Functions and Macros
+//***************************************
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+// Note : although _rotl exists for minGW (GCC under windows), performance seems poor
+#if defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+#else
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
+
+#if defined(_MSC_VER) // Visual Studio
+# define XXH_swap32 _byteswap_ulong
+#elif GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static inline U32 XXH_swap32 (U32 x) {
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );}
+#endif
+
+
+//**************************************
+// Constants
+//**************************************
+#define PRIME32_1 2654435761U
+#define PRIME32_2 2246822519U
+#define PRIME32_3 3266489917U
+#define PRIME32_4 668265263U
+#define PRIME32_5 374761393U
+
+
+//**************************************
+// Macros
+//**************************************
+#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
+#define XXH_LE32(p) (XXH_BIG_ENDIAN ? XXH_swap32(A32(p)) : A32(p))
+#define XXH_alignedLE32(p) (XXH_BIG_ENDIAN ? XXH_swap32(*(U32*)(p)) : *(U32*)(p))
+
+
+
+//****************************
+// Simple Hash Functions
+//****************************
+
+#if !defined(XXH_USE_UNALIGNED_ACCESS)
+// Specific version, for aligned 32-bits input. Useless for CPU supporting unaligned access.
+static U32 XXH32_alignedInput(const void* input, int len, U32 seed)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+ U32 h32;
+
+ if (len>=16)
+ {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+ do
+ {
+ v1 += XXH_alignedLE32(p) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
+ v2 += XXH_alignedLE32(p) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
+ v3 += XXH_alignedLE32(p) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
+ v4 += XXH_alignedLE32(p) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
+ } while (p<=limit);
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ }
+ else { h32 = seed + PRIME32_5; }
+ h32 += (U32) len;
+ while (p<=bEnd-4)
+ {
+ h32 += XXH_alignedLE32(p) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
+ p+=4;
+ }
+ while (p<bEnd)
+ {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+ p++;
+ }
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+ return h32;
+}
+#endif
+
+U32 XXH32(const void* input, int len, U32 seed)
+{
+#if 0
+ // Simple version, good for code maintenance, but unfortunately slow for small inputs
+ void* state = XXH32_init(seed);
+ XXH32_update(state, input, len);
+ return XXH32_digest(state);
+#else
+
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+ U32 h32;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (p==NULL) { len=0; p=(const BYTE*)16; }
+#endif
+
+#if !defined(XXH_USE_UNALIGNED_ACCESS)
+ if ((((U32)p) & 3) == 0) return XXH32_alignedInput(input, len, seed); // Input is aligned, let's leverage the speed advantage
+#endif
+
+ if (len>=16)
+ {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do
+ {
+ v1 += XXH_LE32(p) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
+ v2 += XXH_LE32(p) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
+ v3 += XXH_LE32(p) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
+ v4 += XXH_LE32(p) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
+ } while (p<=limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ }
+ else
+ {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32) len;
+
+ while (p<=bEnd-4)
+ {
+ h32 += XXH_LE32(p) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
+ p+=4;
+ }
+
+ while (p<bEnd)
+ {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+
+#endif
+}
+
+
+//****************************
+// Advanced Hash Functions
+//****************************
+
+struct XXH_state32_t
+{
+ U64 total_len;
+ U32 seed;
+ U32 v1;
+ U32 v2;
+ U32 v3;
+ U32 v4;
+ int memsize;
+ char memory[16];
+};
+
+
+int XXH32_sizeofState()
+{
+ XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t)); // A compilation error here means XXH32_SIZEOFSTATE is not large enough
+ return sizeof(struct XXH_state32_t);
+}
+
+
+XXH_errorcode XXH32_resetState(void* state_in, U32 seed)
+{
+ struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
+ state->seed = seed;
+ state->v1 = seed + PRIME32_1 + PRIME32_2;
+ state->v2 = seed + PRIME32_2;
+ state->v3 = seed + 0;
+ state->v4 = seed - PRIME32_1;
+ state->total_len = 0;
+ state->memsize = 0;
+ return XXH_OK;
+}
+
+
+void* XXH32_init (U32 seed)
+{
+ void* state = XXH_malloc (sizeof(struct XXH_state32_t));
+ XXH32_resetState(state, seed);
+ return state;
+}
+
+
+XXH_errorcode XXH32_update (void* state_in, const void* input, int len)
+{
+ struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (input==NULL) return XXH_ERROR;
+#endif
+
+ state->total_len += len;
+
+ if (state->memsize + len < 16) // fill in tmp buffer
+ {
+ XXH_memcpy(state->memory + state->memsize, input, len);
+ state->memsize += len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) // some data left from previous update
+ {
+ XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize);
+ {
+ const U32* p32 = (const U32*)state->memory;
+ state->v1 += XXH_LE32(p32) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++;
+ state->v2 += XXH_LE32(p32) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++;
+ state->v3 += XXH_LE32(p32) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++;
+ state->v4 += XXH_LE32(p32) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++;
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16)
+ {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do
+ {
+ v1 += XXH_LE32(p) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
+ v2 += XXH_LE32(p) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
+ v3 += XXH_LE32(p) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
+ v4 += XXH_LE32(p) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd)
+ {
+ XXH_memcpy(state->memory, p, bEnd-p);
+ state->memsize = (int)(bEnd-p);
+ }
+
+ return XXH_OK;
+}
+
+
+U32 XXH32_intermediateDigest (void* state_in)
+{
+ struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
+ BYTE * p = (BYTE*)state->memory;
+ BYTE* bEnd = (BYTE*)state->memory + state->memsize;
+ U32 h32;
+
+ if (state->total_len >= 16)
+ {
+ h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+ }
+ else
+ {
+ h32 = state->seed + PRIME32_5;
+ }
+
+ h32 += (U32) state->total_len;
+
+ while (p<=bEnd-4)
+ {
+ h32 += XXH_LE32(p) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4;
+ p+=4;
+ }
+
+ while (p<bEnd)
+ {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+
+
+U32 XXH32_digest (void* state_in)
+{
+ U32 h32 = XXH32_intermediateDigest(state_in);
+
+ XXH_free(state_in);
+
+ return h32;
+}
--- /dev/null
+/*
+ xxHash - Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : http://code.google.com/p/xxhash/
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+*/
+
+#pragma once
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+//****************************
+// Type
+//****************************
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+
+//****************************
+// Simple Hash Functions
+//****************************
+
+unsigned int XXH32 (const void* input, int len, unsigned int seed);
+
+/*
+XXH32() :
+ Calculate the 32-bits hash of sequence of length "len" stored at memory address "input".
+ The memory between input & input+len must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ This function successfully passes all SMHasher tests.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+ Note that "len" is type "int", which means it is limited to 2^31-1.
+ If your data is larger, use the advanced functions below.
+*/
+
+
+
+//****************************
+// Advanced Hash Functions
+//****************************
+
+void* XXH32_init (unsigned int seed);
+XXH_errorcode XXH32_update (void* state, const void* input, int len);
+unsigned int XXH32_digest (void* state);
+
+/*
+These functions calculate the xxhash of an input provided in several small packets,
+as opposed to an input provided as a single block.
+
+It must be started with :
+void* XXH32_init()
+The function returns a pointer which holds the state of calculation.
+
+This pointer must be provided as "void* state" parameter for XXH32_update().
+XXH32_update() can be called as many times as necessary.
+The user must provide a valid (allocated) input.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+Note that "len" is type "int", which means it is limited to 2^31-1.
+If your data is larger, it is recommended to chunk your data into blocks
+of size for example 2^30 (1GB) to avoid any "int" overflow issue.
+
+Finally, you can end the calculation anytime, by using XXH32_digest().
+This function returns the final 32-bits hash.
+You must provide the same "void* state" parameter created by XXH32_init().
+Memory will be freed by XXH32_digest().
+*/
+
+
+int XXH32_sizeofState();
+XXH_errorcode XXH32_resetState(void* state, unsigned int seed);
+
+#define XXH32_SIZEOFSTATE 48
+typedef struct { long long ll[(XXH32_SIZEOFSTATE+(sizeof(long long)-1))/sizeof(long long)]; } XXH32_stateSpace_t;
+/*
+These functions allow user application to make its own allocation for state.
+
+XXH32_sizeofState() is used to know how much space must be allocated for the xxHash 32-bits state.
+Note that the state must be aligned to access 'long long' fields. Memory must be allocated and referenced by a pointer.
+This pointer must then be provided as 'state' into XXH32_resetState(), which initializes the state.
+
+For static allocation purposes (such as allocation on stack, or freestanding systems without malloc()),
+use the structure XXH32_stateSpace_t, which will ensure that memory space is large enough and correctly aligned to access 'long long' fields.
+*/
+
+
+unsigned int XXH32_intermediateDigest (void* state);
+/*
+This function does the same as XXH32_digest(), generating a 32-bit hash,
+but preserve memory context.
+This way, it becomes possible to generate intermediate hashes, and then continue feeding data with XXH32_update().
+To free memory context, use XXH32_digest(), or free().
+*/
+
+
+
+//****************************
+// Deprecated function names
+//****************************
+// The following translations are provided to ease code transition
+// You are encouraged to no longer this function names
+#define XXH32_feed XXH32_update
+#define XXH32_result XXH32_digest
+#define XXH32_getIntermediateResult XXH32_intermediateDigest
+
+
+
+#if defined (__cplusplus)
+}
+#endif
--- /dev/null
+/*
+ LZ4 - Fast LZ compression algorithm
+ Header File
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ - LZ4 source repository : http://code.google.com/p/lz4/
+*/
+#pragma once
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+//**************************************
+// Compiler Options
+//**************************************
+#if defined(_MSC_VER) && !defined(__cplusplus) // Visual Studio
+# define inline __forceinline // Visual C is not C99, but supports some kind of inline. Note : we *do* want to force inline
+#endif
+
+
+//****************************
+// Simple Functions
+//****************************
+
+int LZ4_compress (const char* source, char* dest, int inputSize);
+int LZ4_decompress_safe (const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/*
+LZ4_compress() :
+ Compresses 'inputSize' bytes from 'source' into 'dest'.
+ Destination buffer must be already allocated,
+ and must be sized to handle worst cases situations (input data not compressible)
+ Worst case size evaluation is provided by function LZ4_compressBound()
+ inputSize : Max supported value is ~1.9GB
+ return : the number of bytes written in buffer dest
+ or 0 if the compression fails
+
+LZ4_decompress_safe() :
+ maxOutputSize : is the size of the destination buffer (which must be already allocated)
+ return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
+ If the source stream is malformed or too large, the function will stop decoding and return a negative result.
+ This function is protected against any kind of buffer overflow attemps (never writes outside of output buffer, and never reads outside of input buffer). It is therefore protected against malicious data packets
+*/
+
+
+//****************************
+// Advanced Functions
+//****************************
+
+static inline int LZ4_compressBound(int isize) { return ((isize) + ((isize)/255) + 16); }
+#define LZ4_COMPRESSBOUND( isize) ((isize) + ((isize)/255) + 16)
+
+/*
+LZ4_compressBound() :
+ Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
+ primarily useful for memory allocation of output buffer.
+ inline function is recommended for the general case,
+ macro is also provided when result needs to be evaluated at compile time (such as table size allocation).
+
+ isize : is the input size. Max supported value is ~1.9GB
+ return : maximum output size in a "worst case" scenario
+ note : this function is limited by "int" range (2^31-1)
+*/
+
+
+int LZ4_compress_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/*
+LZ4_compress_limitedOutput() :
+ Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+ If it cannot achieve it, compression will stop, and result of the function will be zero.
+ This function never writes outside of provided output buffer.
+
+ inputSize : Max supported value is ~1.9GB
+ maxOutputSize : is the size of the destination buffer (which must be already allocated)
+ return : the number of bytes written in buffer 'dest'
+ or 0 if the compression fails
+*/
+
+
+int LZ4_decompress_fast (const char* source, char* dest, int outputSize);
+
+/*
+LZ4_decompress_fast() :
+ outputSize : is the original (uncompressed) size
+ return : the number of bytes read from the source buffer (in other words, the compressed size)
+ If the source stream is malformed, the function will stop decoding and return a negative result.
+ note : This function is a bit faster than LZ4_decompress_safe()
+ This function never writes outside of output buffers, and never read before input buffer, but may read beyond input buffer (since it doesn't know its size) in case of malicious data packet.
+ Use this function preferably into a trusted environment (data to decode comes from a trusted source).
+ Destination buffer must be already allocated. Its size must be a minimum of 'outputSize' bytes.
+*/
+
+int LZ4_decompress_safe_partial (const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize);
+
+/*
+LZ4_decompress_safe_partial() :
+ This function decompress a compressed block of size 'inputSize' at position 'source'
+ into output buffer 'dest' of size 'maxOutputSize'.
+ The function stops decompressing operation as soon as 'targetOutputSize' has been reached,
+ reducing decompression time.
+ return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
+ Note : this number might be < 'targetOutputSize' if the number of bytes to decode into the compressed block is not enough.
+ Always control how many bytes were decoded.
+ If the source stream is malformed, the function will stop decoding and return a negative result.
+ This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
+*/
+
+
+int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int inputSize, int maxOutputSize);
+int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int outputSize);
+
+/*
+*_withPrefix64k() :
+ These decoding functions work the same as their "normal name" versions,
+ but will potentially use up to 64KB of data in front of 'char* dest'.
+ These functions are used for decoding inter-dependant blocks.
+*/
+
+
+//****************************
+// Obsolete Functions
+//****************************
+
+static inline int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
+static inline int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
+
+/*
+These functions are deprecated and should no longer be used.
+They are provided here for compatibility with existing user programs.
+*/
+
+
+
+#if defined (__cplusplus)
+}
+#endif