mirror of
https://github.com/borgbackup/borg.git
synced 2026-02-18 18:19:16 -05:00
bundle zstd 1.3.2 source code
only .c and .h files + license
This commit is contained in:
parent
aec36f64a2
commit
dc883f62ae
62 changed files with 50784 additions and 0 deletions
30
docs/3rd_party/zstd/LICENSE
vendored
Normal file
30
docs/3rd_party/zstd/LICENSE
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
BSD License
|
||||
|
||||
For Zstandard software
|
||||
|
||||
Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
471
src/borg/algorithms/zstd/lib/common/bitstream.h
Normal file
471
src/borg/algorithms/zstd/lib/common/bitstream.h
Normal file
|
|
@ -0,0 +1,471 @@
|
|||
/* ******************************************************************
|
||||
bitstream
|
||||
Part of FSE library
|
||||
header file (to include)
|
||||
Copyright (C) 2013-2017, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
#ifndef BITSTREAM_H_MODULE
|
||||
#define BITSTREAM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This API consists of small unitary functions, which must be inlined for best performance.
|
||||
* Since link-time-optimization is not available for all compilers,
|
||||
* these functions are defined into a .h to be included.
|
||||
*/
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "mem.h" /* unaligned access routines */
|
||||
#include "error_private.h" /* error codes and messages */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Debug
|
||||
***************************************/
|
||||
#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
|
||||
# include <assert.h>
|
||||
#else
|
||||
# ifndef assert
|
||||
# define assert(condition) ((void)0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/*=========================================
|
||||
* Target specific
|
||||
=========================================*/
|
||||
#if defined(__BMI__) && defined(__GNUC__)
|
||||
# include <immintrin.h> /* support for bextr (experimental) */
|
||||
#endif
|
||||
|
||||
#define STREAM_ACCUMULATOR_MIN_32 25
|
||||
#define STREAM_ACCUMULATOR_MIN_64 57
|
||||
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
|
||||
|
||||
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
size_t bitContainer;
|
||||
unsigned bitPos;
|
||||
char* startPtr;
|
||||
char* ptr;
|
||||
char* endPtr;
|
||||
} BIT_CStream_t;
|
||||
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
|
||||
|
||||
/* Start with initCStream, providing the size of buffer to write into.
|
||||
* bitStream will never write outside of this buffer.
|
||||
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
|
||||
*
|
||||
* bits are first added to a local register.
|
||||
* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
|
||||
* Writing data into memory is an explicit operation, performed by the flushBits function.
|
||||
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
|
||||
* After a flushBits, a maximum of 7 bits might still be stored into local register.
|
||||
*
|
||||
* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
|
||||
*
|
||||
* Last operation is to close the bitStream.
|
||||
* The function returns the final size of CStream in bytes.
|
||||
* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
|
||||
*/
|
||||
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
typedef struct
|
||||
{
|
||||
size_t bitContainer;
|
||||
unsigned bitsConsumed;
|
||||
const char* ptr;
|
||||
const char* start;
|
||||
const char* limitPtr;
|
||||
} BIT_DStream_t;
|
||||
|
||||
typedef enum { BIT_DStream_unfinished = 0,
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
BIT_DStream_completed = 2,
|
||||
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
|
||||
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
|
||||
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
|
||||
|
||||
|
||||
/* Start by invoking BIT_initDStream().
|
||||
* A chunk of the bitStream is then stored into a local register.
|
||||
* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
* You can then retrieve bitFields stored into the local register, **in reverse order**.
|
||||
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
|
||||
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
|
||||
* Otherwise, it can be less than that, so proceed accordingly.
|
||||
* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
|
||||
*/
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* unsafe API
|
||||
******************************************/
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
|
||||
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
|
||||
/* unsafe version; does not check buffer overflow */
|
||||
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
/* faster, but works only if nbBits >= 1 */
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Internal functions
|
||||
****************************************************************/
|
||||
MEM_STATIC unsigned BIT_highbit32 (register U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
# if defined(_MSC_VER) /* Visual */
|
||||
unsigned long r=0;
|
||||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
|
||||
11, 14, 16, 18, 22, 25, 3, 30,
|
||||
8, 12, 20, 28, 15, 17, 24, 7,
|
||||
19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
/*===== Local Constants =====*/
|
||||
static const unsigned BIT_mask[] = {
|
||||
0, 1, 3, 7, 0xF, 0x1F,
|
||||
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
||||
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
|
||||
0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
|
||||
0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
|
||||
0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
|
||||
#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
|
||||
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
void* startPtr, size_t dstCapacity)
|
||||
{
|
||||
bitC->bitContainer = 0;
|
||||
bitC->bitPos = 0;
|
||||
bitC->startPtr = (char*)startPtr;
|
||||
bitC->ptr = bitC->startPtr;
|
||||
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
|
||||
if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
assert((value>>nbBits) == 0);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= value << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1); /* endMark */
|
||||
BIT_flushBits(bitC);
|
||||
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
|
||||
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
|
||||
}
|
||||
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
||||
{
|
||||
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
|
||||
|
||||
bitD->start = (const char*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
|
||||
|
||||
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
|
||||
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
} else {
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
||||
switch(srcSize)
|
||||
{
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
/* fall-through */
|
||||
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
/* fall-through */
|
||||
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
/* fall-through */
|
||||
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
/* fall-through */
|
||||
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
/* fall-through */
|
||||
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
/* fall-through */
|
||||
|
||||
default: break;
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
|
||||
}
|
||||
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
|
||||
{
|
||||
return bitContainer >> start;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */
|
||||
# if defined(__x86_64__)
|
||||
if (sizeof(bitContainer)==8)
|
||||
return _bextr_u64(bitContainer, start, nbBits);
|
||||
else
|
||||
# endif
|
||||
return _bextr_u32(bitContainer, start, nbBits);
|
||||
#else
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return (bitContainer >> start) & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||
{
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
|
||||
#else
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
assert(nbBits >= 1);
|
||||
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
|
||||
}
|
||||
|
||||
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works only if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
return BIT_DStream_overflow;
|
||||
|
||||
if (bitD->ptr >= bitD->limitPtr) {
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_unfinished;
|
||||
}
|
||||
if (bitD->ptr == bitD->start) {
|
||||
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_completed;
|
||||
}
|
||||
/* start < ptr < limitPtr */
|
||||
{ U32 nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start) {
|
||||
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
|
||||
result = BIT_DStream_endOfBuffer;
|
||||
}
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes*8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BITSTREAM_H_MODULE */
|
||||
86
src/borg/algorithms/zstd/lib/common/compiler.h
Normal file
86
src/borg/algorithms/zstd/lib/common/compiler.h
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPILER_H
|
||||
#define ZSTD_COMPILER_H
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
/* force inlining */
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/**
|
||||
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
|
||||
* parameters. They must be inlined for the compiler to elimininate the constant
|
||||
* branches.
|
||||
*/
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
/**
|
||||
* HINT_INLINE is used to help the compiler generate better code. It is *not*
|
||||
* used for "templates", so it can be tweaked based on the compilers
|
||||
* performance.
|
||||
*
|
||||
* gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
|
||||
* always_inline attribute.
|
||||
*
|
||||
* clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
|
||||
* attribute.
|
||||
*/
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
|
||||
# define HINT_INLINE static INLINE_KEYWORD
|
||||
#else
|
||||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* prefetch */
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0)
|
||||
#elif defined(__GNUC__)
|
||||
# define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
|
||||
#else
|
||||
# define PREFETCH(ptr) /* disabled */
|
||||
#endif
|
||||
|
||||
/* disable warnings */
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPILER_H */
|
||||
221
src/borg/algorithms/zstd/lib/common/entropy_common.c
Normal file
221
src/borg/algorithms/zstd/lib/common/entropy_common.c
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
/*
|
||||
Common functions of New Generation Entropy library
|
||||
Copyright (C) 2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*************************************************************************** */
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "mem.h"
|
||||
#include "error_private.h" /* ERR_*, ERROR */
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
|
||||
#include "huf.h"
|
||||
|
||||
|
||||
/*=== Version ===*/
|
||||
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/*=== Error Management ===*/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) headerBuffer;
|
||||
const BYTE* const iend = istart + hbSize;
|
||||
const BYTE* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
int previous0 = 0;
|
||||
|
||||
if (hbSize < 4) return ERROR(srcSize_wrong);
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
|
||||
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = nbBits;
|
||||
remaining = (1<<nbBits)+1;
|
||||
threshold = 1<<nbBits;
|
||||
nbBits++;
|
||||
|
||||
while ((remaining>1) & (charnum<=*maxSVPtr)) {
|
||||
if (previous0) {
|
||||
unsigned n0 = charnum;
|
||||
while ((bitStream & 0xFFFF) == 0xFFFF) {
|
||||
n0 += 24;
|
||||
if (ip < iend-5) {
|
||||
ip += 2;
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} else {
|
||||
bitStream >>= 16;
|
||||
bitCount += 16;
|
||||
} }
|
||||
while ((bitStream & 3) == 3) {
|
||||
n0 += 3;
|
||||
bitStream >>= 2;
|
||||
bitCount += 2;
|
||||
}
|
||||
n0 += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
|
||||
while (charnum < n0) normalizedCounter[charnum++] = 0;
|
||||
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} else {
|
||||
bitStream >>= 2;
|
||||
} }
|
||||
{ int const max = (2*threshold-1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (threshold-1)) < (U32)max) {
|
||||
count = bitStream & (threshold-1);
|
||||
bitCount += nbBits-1;
|
||||
} else {
|
||||
count = bitStream & (2*threshold-1);
|
||||
if (count >= threshold) count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--; /* extra accuracy */
|
||||
remaining -= count < 0 ? -count : count; /* -1 means +1 */
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = !count;
|
||||
while (remaining < threshold) {
|
||||
nbBits--;
|
||||
threshold >>= 1;
|
||||
}
|
||||
|
||||
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
|
||||
} } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
|
||||
if (remaining != 1) return ERROR(corruption_detected);
|
||||
if (bitCount > 32) return ERROR(corruption_detected);
|
||||
*maxSVPtr = charnum-1;
|
||||
|
||||
ip += (bitCount+7)>>3;
|
||||
return ip-istart;
|
||||
}
|
||||
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 weightTotal;
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
size_t iSize;
|
||||
size_t oSize;
|
||||
|
||||
if (!srcSize) return ERROR(srcSize_wrong);
|
||||
iSize = ip[0];
|
||||
/* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
if (iSize >= 128) { /* special header */
|
||||
oSize = iSize - 127;
|
||||
iSize = ((oSize+1)/2);
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
if (oSize >= hwSize) return ERROR(corruption_detected);
|
||||
ip += 1;
|
||||
{ U32 n;
|
||||
for (n=0; n<oSize; n+=2) {
|
||||
huffWeight[n] = ip[n/2] >> 4;
|
||||
huffWeight[n+1] = ip[n/2] & 15;
|
||||
} } }
|
||||
else { /* header compressed with FSE (normal case) */
|
||||
FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
|
||||
if (FSE_isError(oSize)) return oSize;
|
||||
}
|
||||
|
||||
/* collect weight stats */
|
||||
memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||
weightTotal = 0;
|
||||
{ U32 n; for (n=0; n<oSize; n++) {
|
||||
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||
} }
|
||||
if (weightTotal == 0) return ERROR(corruption_detected);
|
||||
|
||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||
{ U32 const tableLog = BIT_highbit32(weightTotal) + 1;
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
*tableLogPtr = tableLog;
|
||||
/* determine last weight */
|
||||
{ U32 const total = 1 << tableLog;
|
||||
U32 const rest = total - weightTotal;
|
||||
U32 const verif = 1 << BIT_highbit32(rest);
|
||||
U32 const lastWeight = BIT_highbit32(rest) + 1;
|
||||
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
||||
huffWeight[oSize] = (BYTE)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
} }
|
||||
|
||||
/* check tree construction validity */
|
||||
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
|
||||
|
||||
/* results */
|
||||
*nbSymbolsPtr = (U32)(oSize+1);
|
||||
return iSize+1;
|
||||
}
|
||||
47
src/borg/algorithms/zstd/lib/common/error_private.c
Normal file
47
src/borg/algorithms/zstd/lib/common/error_private.c
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* The purpose of this file is to have a single list of error strings embedded in binary */
|
||||
|
||||
#include "error_private.h"
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code)
|
||||
{
|
||||
static const char* const notErrorCode = "Unspecified error code";
|
||||
switch( code )
|
||||
{
|
||||
case PREFIX(no_error): return "No error detected";
|
||||
case PREFIX(GENERIC): return "Error (generic)";
|
||||
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
|
||||
case PREFIX(version_unsupported): return "Version not supported";
|
||||
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
|
||||
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
|
||||
case PREFIX(corruption_detected): return "Corrupted block detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(parameter_unsupported): return "Unsupported parameter";
|
||||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
|
||||
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
|
||||
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
|
||||
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
|
||||
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
|
||||
case PREFIX(srcSize_wrong): return "Src size is incorrect";
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
|
||||
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
|
||||
case PREFIX(maxCode):
|
||||
default: return notErrorCode;
|
||||
}
|
||||
}
|
||||
76
src/borg/algorithms/zstd/lib/common/error_private.h
Normal file
76
src/borg/algorithms/zstd/lib/common/error_private.h
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* Note : this module is expected to remain private, do not expose it */
|
||||
|
||||
#ifndef ERROR_H_MODULE
|
||||
#define ERROR_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd_errors.h" /* enum list */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific
|
||||
******************************************/
|
||||
#if defined(__GNUC__)
|
||||
# define ERR_STATIC static __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define ERR_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define ERR_STATIC static __inline
|
||||
#else
|
||||
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Customization (error_public.h)
|
||||
******************************************/
|
||||
typedef ZSTD_ErrorCode ERR_enum;
|
||||
#define PREFIX(name) ZSTD_error_##name
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error codes handling
|
||||
******************************************/
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
|
||||
|
||||
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
|
||||
|
||||
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
|
||||
|
||||
ERR_STATIC const char* ERR_getErrorName(size_t code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ERROR_H_MODULE */
|
||||
704
src/borg/algorithms/zstd/lib/common/fse.h
Normal file
704
src/borg/algorithms/zstd/lib/common/fse.h
Normal file
|
|
@ -0,0 +1,704 @@
|
|||
/* ******************************************************************
|
||||
FSE : Finite State Entropy codec
|
||||
Public Prototypes declaration
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE_PUBLIC_API : control library symbols visibility
|
||||
******************************************/
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define FSE_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define FSE_PUBLIC_API
|
||||
#endif
|
||||
|
||||
/*------ Version ------*/
|
||||
#define FSE_VERSION_MAJOR 0
|
||||
#define FSE_VERSION_MINOR 9
|
||||
#define FSE_VERSION_RELEASE 0
|
||||
|
||||
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
|
||||
#define FSE_QUOTE(str) #str
|
||||
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
|
||||
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
|
||||
|
||||
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
|
||||
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
/*-****************************************
|
||||
* FSE simple functions
|
||||
******************************************/
|
||||
/*! FSE_compress() :
|
||||
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
|
||||
'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
|
||||
@return : size of compressed data (<= dstCapacity).
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
|
||||
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_decompress():
|
||||
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated destination buffer 'dst', of size 'dstCapacity'.
|
||||
@return : size of regenerated data (<= maxDstSize),
|
||||
or an error code, which can be tested using FSE_isError() .
|
||||
|
||||
** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
|
||||
Why ? : making this distinction requires a header.
|
||||
Header management is intentionally delegated to the user layer, which can better manage special cases.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
|
||||
/* Error Management */
|
||||
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE advanced functions
|
||||
******************************************/
|
||||
/*! FSE_compress2() :
|
||||
Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
|
||||
Both parameters can be defined as '0' to mean : use default value
|
||||
@return : size of compressed data
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
|
||||
if FSE_isError(return), it's an error code.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
FSE_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[]
|
||||
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
3. save normalized counters to memory buffer using writeNCount()
|
||||
4. build encoding table 'CTable' from normalized counters
|
||||
5. encode the data stream using encoding table 'CTable'
|
||||
|
||||
FSE_decompress() does the following:
|
||||
1. read normalized counters with readNCount()
|
||||
2. build decoding table 'DTable' from normalized counters
|
||||
3. decode the data stream using decoding table 'DTable'
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and provide normalized distribution using external method.
|
||||
*/
|
||||
|
||||
/* *** COMPRESSION *** */
|
||||
|
||||
/*! FSE_count():
|
||||
Provides the precise count of each byte within a table 'count'.
|
||||
'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
|
||||
*maxSymbolValuePtr will be updated if detected smaller than initial value.
|
||||
@return : the count of the most frequent symbol (which is not identified).
|
||||
if return == srcSize, there is only one symbol.
|
||||
Can also return an error code, which can be tested with FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_NCountWriteBound():
|
||||
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
|
||||
Typically useful for allocation purpose. */
|
||||
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*! Constructor and Destructor of FSE_CTable.
|
||||
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
|
||||
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
|
||||
FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
The first step is to count all symbols. FSE_count() does this job very fast.
|
||||
Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
|
||||
'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
|
||||
maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
|
||||
FSE_count() will return the number of occurrence of the most frequent symbol.
|
||||
This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
The next step is to normalize the frequencies.
|
||||
FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
|
||||
It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
|
||||
You can use 'tableLog'==0 to mean "use default tableLog value".
|
||||
If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
|
||||
which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
|
||||
|
||||
The result of FSE_normalizeCount() will be saved into a table,
|
||||
called 'normalizedCounter', which is a table of signed short.
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
|
||||
The return value is tableLog if everything proceeded as expected.
|
||||
It is 0 if there is a single symbol within distribution.
|
||||
If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
|
||||
'buffer' must be already allocated.
|
||||
For guaranteed success, buffer size must be at least FSE_headerBound().
|
||||
The result of the function is the number of bytes written into 'buffer'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
|
||||
|
||||
'normalizedCounter' can then be used to create the compression table 'CTable'.
|
||||
The space required by 'CTable' must be already allocated, using FSE_createCTable().
|
||||
You can then use FSE_buildCTable() to fill 'CTable'.
|
||||
If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
|
||||
Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
|
||||
The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
|
||||
If it returns '0', compressed data could not fit into 'dst'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
*/
|
||||
|
||||
|
||||
/* *** DECOMPRESSION *** */
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
|
||||
|
||||
/*! Constructor and Destructor of FSE_DTable.
|
||||
Note that its size depends on 'tableLog' */
|
||||
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
||||
FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
|
||||
FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
|
||||
|
||||
/*! FSE_buildDTable():
|
||||
Builds 'dt', which must be already allocated, using FSE_createDTable().
|
||||
return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_decompress_usingDTable():
|
||||
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
|
||||
into `dst` which must be already allocated.
|
||||
@return : size of regenerated data (necessarily <= `dstCapacity`),
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
(Note : these functions only decompress FSE-compressed blocks.
|
||||
If block is uncompressed, use memcpy() instead
|
||||
If block is a single repeated byte, use memset() instead )
|
||||
|
||||
The first step is to obtain the normalized frequencies of symbols.
|
||||
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
|
||||
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
|
||||
or size the table to handle worst case situations (typically 256).
|
||||
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
|
||||
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
|
||||
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
|
||||
This is performed by the function FSE_buildDTable().
|
||||
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
|
||||
`cSrcSize` must be strictly correct, otherwise decompression will fail.
|
||||
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
|
||||
*/
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
||||
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
|
||||
#define FSE_H_FSE_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependency *** */
|
||||
#include "bitstream.h"
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Static allocation
|
||||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) (size + (size>>7))
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
|
||||
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
|
||||
|
||||
/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
|
||||
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
|
||||
#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
*******************************************/
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned
|
||||
*/
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace);
|
||||
|
||||
/** FSE_countFast() :
|
||||
* same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
|
||||
*/
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` must be a table of minimum `1024` unsigned
|
||||
*/
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
|
||||
|
||||
/*! FSE_count_simple
|
||||
* Same as FSE_countFast(), but does not use any additional memory (not even on stack).
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
|
||||
/**< same as FSE_optimalTableLog(), which used `minus==2` */
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
|
||||
*/
|
||||
#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
|
||||
/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
||||
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` must be >= `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
|
||||
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
|
||||
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
|
||||
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
|
||||
|
||||
typedef enum {
|
||||
FSE_repeat_none, /**< Cannot use the previous table */
|
||||
FSE_repeat_check, /**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} FSE_repeat;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
typedef struct {
|
||||
ptrdiff_t value;
|
||||
const void* stateTable;
|
||||
const void* symbolTT;
|
||||
unsigned stateLog;
|
||||
} FSE_CState_t;
|
||||
|
||||
static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
|
||||
|
||||
static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
|
||||
|
||||
static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
|
||||
|
||||
/**<
|
||||
These functions are inner components of FSE_compress_usingCTable().
|
||||
They allow the creation of custom streams, mixing multiple tables and bit sources.
|
||||
|
||||
A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
|
||||
So the first symbol you will encode is the last you will decode, like a LIFO stack.
|
||||
|
||||
You will need a few variables to track your CStream. They are :
|
||||
|
||||
FSE_CTable ct; // Provided by FSE_buildCTable()
|
||||
BIT_CStream_t bitStream; // bitStream tracking structure
|
||||
FSE_CState_t state; // State tracking structure (can have several)
|
||||
|
||||
|
||||
The first thing to do is to init bitStream and state.
|
||||
size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
|
||||
FSE_initCState(&state, ct);
|
||||
|
||||
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
|
||||
You can then encode your input data, byte after byte.
|
||||
FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
|
||||
Remember decoding will be done in reverse direction.
|
||||
FSE_encodeByte(&bitStream, &state, symbol);
|
||||
|
||||
At any time, you can also add any bit sequence.
|
||||
Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
|
||||
BIT_addBits(&bitStream, bitField, nbBits);
|
||||
|
||||
The above methods don't commit data to memory, they just store it into local register, for speed.
|
||||
Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
Writing data to memory is a manual operation, performed by the flushBits function.
|
||||
BIT_flushBits(&bitStream);
|
||||
|
||||
Your last FSE encoding operation shall be to flush your last state value(s).
|
||||
FSE_flushState(&bitStream, &state);
|
||||
|
||||
Finally, you must close the bitStream.
|
||||
The function returns the size of CStream in bytes.
|
||||
If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
|
||||
If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
|
||||
size_t size = BIT_closeCStream(&bitStream);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
size_t state;
|
||||
const void* table; /* precise table may vary, depending on U16 */
|
||||
} FSE_DState_t;
|
||||
|
||||
|
||||
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
|
||||
|
||||
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
|
||||
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
|
||||
|
||||
/**<
|
||||
Let's now decompose FSE_decompress_usingDTable() into its unitary components.
|
||||
You will decode FSE-encoded symbols from the bitStream,
|
||||
and also any other bitFields you put in, **in reverse order**.
|
||||
|
||||
You will need a few variables to track your bitStream. They are :
|
||||
|
||||
BIT_DStream_t DStream; // Stream context
|
||||
FSE_DState_t DState; // State context. Multiple ones are possible
|
||||
FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
|
||||
|
||||
The first thing to do is to init the bitStream.
|
||||
errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
|
||||
|
||||
You should then retrieve your initial state(s)
|
||||
(in reverse flushing order if you have several ones) :
|
||||
errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
|
||||
|
||||
You can then decode your data, symbol after symbol.
|
||||
For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
|
||||
Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
|
||||
unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
|
||||
|
||||
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
|
||||
Note : maximum allowed nbBits is 25, for 32-bits compatibility
|
||||
size_t bitField = BIT_readBits(&DStream, nbBits);
|
||||
|
||||
All above operations only read from local register (which size depends on size_t).
|
||||
Refueling the register from memory is manually performed by the reload method.
|
||||
endSignal = FSE_reloadDStream(&DStream);
|
||||
|
||||
BIT_reloadDStream() result tells if there is still some more data to read from DStream.
|
||||
BIT_DStream_unfinished : there is still some data left into the DStream.
|
||||
BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
|
||||
BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
|
||||
BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
|
||||
|
||||
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
|
||||
to properly detect the exact end of stream.
|
||||
After each decoded symbol, check if DStream is fully consumed using this simple test :
|
||||
BIT_reloadDStream(&DStream) >= BIT_DStream_completed
|
||||
|
||||
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
|
||||
Checking if DStream has reached its end is performed by :
|
||||
BIT_endOfDStream(&DStream);
|
||||
Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
|
||||
FSE_endOfDState(&DState);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE unsafe API
|
||||
*******************************************/
|
||||
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
int deltaFindState;
|
||||
U32 deltaNbBits;
|
||||
} FSE_symbolCompressionTransform; /* total 8 bytes */
|
||||
|
||||
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
|
||||
{
|
||||
const void* ptr = ct;
|
||||
const U16* u16ptr = (const U16*) ptr;
|
||||
const U32 tableLog = MEM_read16(ptr);
|
||||
statePtr->value = (ptrdiff_t)1<<tableLog;
|
||||
statePtr->stateTable = u16ptr+2;
|
||||
statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
|
||||
{
|
||||
FSE_initCState(statePtr, ct);
|
||||
{ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
|
||||
statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
|
||||
statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
|
||||
{
|
||||
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* const stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
|
||||
BIT_addBits(bitC, statePtr->value, nbBitsOut);
|
||||
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
|
||||
{
|
||||
BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
|
||||
BIT_flushBits(bitC);
|
||||
}
|
||||
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
|
||||
typedef struct {
|
||||
U16 tableLog;
|
||||
U16 fastMode;
|
||||
} FSE_DTableHeader; /* sizeof U32 */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned short newState;
|
||||
unsigned char symbol;
|
||||
unsigned char nbBits;
|
||||
} FSE_decode_t; /* size == U32 */
|
||||
|
||||
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
|
||||
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
|
||||
BIT_reloadDStream(bitD);
|
||||
DStatePtr->table = dt + 1;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return DStatePtr->state == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/* **************************************************************
|
||||
* Tuning parameters
|
||||
****************************************************************/
|
||||
/*!MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio
|
||||
* Reduced memory usage can improve speed, due to cache effect
|
||||
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
||||
#ifndef FSE_MAX_MEMORY_USAGE
|
||||
# define FSE_MAX_MEMORY_USAGE 14
|
||||
#endif
|
||||
#ifndef FSE_DEFAULT_MEMORY_USAGE
|
||||
# define FSE_DEFAULT_MEMORY_USAGE 13
|
||||
#endif
|
||||
|
||||
/*!FSE_MAX_SYMBOL_VALUE :
|
||||
* Maximum symbol value authorized.
|
||||
* Required for proper stack allocation */
|
||||
#ifndef FSE_MAX_SYMBOL_VALUE
|
||||
# define FSE_MAX_SYMBOL_VALUE 255
|
||||
#endif
|
||||
|
||||
/* **************************************************************
|
||||
* template functions type & suffix
|
||||
****************************************************************/
|
||||
#define FSE_FUNCTION_TYPE BYTE
|
||||
#define FSE_FUNCTION_EXTENSION
|
||||
#define FSE_DECODE_TYPE FSE_decode_t
|
||||
|
||||
|
||||
#endif /* !FSE_COMMONDEFS_ONLY */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Constants
|
||||
*****************************************************************/
|
||||
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
|
||||
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
|
||||
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
|
||||
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
|
||||
#define FSE_MIN_TABLELOG 5
|
||||
|
||||
#define FSE_TABLELOG_ABSOLUTE_MAX 15
|
||||
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
|
||||
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
|
||||
#endif
|
||||
|
||||
#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
|
||||
|
||||
|
||||
#endif /* FSE_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
309
src/borg/algorithms/zstd/lib/common/fse_decompress.c
Normal file
309
src/borg/algorithms/zstd/lib/common/fse_decompress.c
Normal file
|
|
@ -0,0 +1,309 @@
|
|||
/* ******************************************************************
|
||||
FSE : Finite State Entropy decoder
|
||||
Copyright (C) 2013-2015, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
/* check and forward error code */
|
||||
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
FSE_DTable* FSE_createDTable (unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
|
||||
return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
|
||||
}
|
||||
|
||||
void FSE_freeDTable (FSE_DTable* dt)
|
||||
{
|
||||
free(dt);
|
||||
}
|
||||
|
||||
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
|
||||
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
|
||||
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
|
||||
|
||||
U32 const maxSV1 = maxSymbolValue + 1;
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* Sanity Checks */
|
||||
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
|
||||
/* Init, lay down lowprob symbols */
|
||||
{ FSE_DTableHeader DTableH;
|
||||
DTableH.tableLog = (U16)tableLog;
|
||||
DTableH.fastMode = 1;
|
||||
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
if (normalizedCounter[s]==-1) {
|
||||
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
symbolNext[s] = 1;
|
||||
} else {
|
||||
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
|
||||
symbolNext[s] = normalizedCounter[s];
|
||||
} } }
|
||||
memcpy(dt, &DTableH, sizeof(DTableH));
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
{ U32 const tableMask = tableSize-1;
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 s, position = 0;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
int i;
|
||||
for (i=0; i<normalizedCounter[s]; i++) {
|
||||
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
|
||||
} }
|
||||
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
|
||||
}
|
||||
|
||||
/* Build Decoding table */
|
||||
{ U32 u;
|
||||
for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
|
||||
U16 nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
|
||||
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = dt;
|
||||
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
||||
void* dPtr = dt + 1;
|
||||
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
|
||||
|
||||
DTableH->tableLog = 0;
|
||||
DTableH->fastMode = 0;
|
||||
|
||||
cell->newState = 0;
|
||||
cell->symbol = symbolValue;
|
||||
cell->nbBits = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
|
||||
{
|
||||
void* ptr = dt;
|
||||
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
||||
void* dPtr = dt + 1;
|
||||
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
|
||||
const unsigned tableSize = 1 << nbBits;
|
||||
const unsigned tableMask = tableSize - 1;
|
||||
const unsigned maxSV1 = tableMask+1;
|
||||
unsigned s;
|
||||
|
||||
/* Sanity checks */
|
||||
if (nbBits < 1) return ERROR(GENERIC); /* min size */
|
||||
|
||||
/* Build Decoding Table */
|
||||
DTableH->tableLog = (U16)nbBits;
|
||||
DTableH->fastMode = 1;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
dinfo[s].newState = 0;
|
||||
dinfo[s].symbol = (BYTE)s;
|
||||
dinfo[s].nbBits = (BYTE)nbBits;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt, const unsigned fast)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const omax = op + maxDstSize;
|
||||
BYTE* const olimit = omax-3;
|
||||
|
||||
BIT_DStream_t bitD;
|
||||
FSE_DState_t state1;
|
||||
FSE_DState_t state2;
|
||||
|
||||
/* Init */
|
||||
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
|
||||
|
||||
FSE_initDState(&state1, &bitD, dt);
|
||||
FSE_initDState(&state2, &bitD, dt);
|
||||
|
||||
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
|
||||
|
||||
/* 4 symbols per loop */
|
||||
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
|
||||
op[0] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[1] = FSE_GETSYMBOL(&state2);
|
||||
|
||||
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
|
||||
|
||||
op[2] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[3] = FSE_GETSYMBOL(&state2);
|
||||
}
|
||||
|
||||
/* tail */
|
||||
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
|
||||
while (1) {
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
break;
|
||||
} }
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
|
||||
const U32 fastMode = DTableH->fastMode;
|
||||
|
||||
/* select fast mode (static) */
|
||||
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
|
||||
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)cSrc;
|
||||
const BYTE* ip = istart;
|
||||
short counting[FSE_MAX_SYMBOL_VALUE+1];
|
||||
unsigned tableLog;
|
||||
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
|
||||
/* normal FSE decoding mode */
|
||||
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
//if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
|
||||
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
|
||||
CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
|
||||
|
||||
return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
|
||||
}
|
||||
|
||||
|
||||
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
|
||||
|
||||
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
|
||||
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
||||
302
src/borg/algorithms/zstd/lib/common/huf.h
Normal file
302
src/borg/algorithms/zstd/lib/common/huf.h
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
/* ******************************************************************
|
||||
Huffman coder, part of New Generation Entropy library
|
||||
header file
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *** library symbols visibility *** */
|
||||
/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
|
||||
* HUF symbols remain "private" (internal symbols for library only).
|
||||
* Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define HUF_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
|
||||
#else
|
||||
# define HUF_PUBLIC_API
|
||||
#endif
|
||||
|
||||
|
||||
/* *** simple functions *** */
|
||||
/**
|
||||
HUF_compress() :
|
||||
Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
|
||||
'dst' buffer must be already allocated.
|
||||
Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
|
||||
`srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
|
||||
@return : size of compressed data (<= `dstCapacity`).
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
if return == 1, srcData is a single repeated byte symbol (RLE compression).
|
||||
if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
HUF_decompress() :
|
||||
Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated buffer 'dst', of minimum size 'dstSize'.
|
||||
`originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
Note : in contrast with FSE, HUF_decompress can regenerate
|
||||
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||
because it knows size to regenerate.
|
||||
@return : size of regenerated data (== originalSize),
|
||||
or an error code, which can be tested using HUF_isError()
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/* *** Tool functions *** */
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
||||
HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
||||
|
||||
/* Error Management */
|
||||
HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
||||
HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/* *** Advanced function *** */
|
||||
|
||||
/** HUF_compress2() :
|
||||
* Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog`.
|
||||
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
||||
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/** HUF_compress4X_wksp() :
|
||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
||||
* `workspace` must have minimum alignment of 4, and be at least as large as following macro */
|
||||
#define HUF_WORKSPACE_SIZE (6 << 10)
|
||||
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
|
||||
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
/* ******************************************************************
|
||||
* WARNING !!
|
||||
* The following section contains advanced and experimental definitions
|
||||
* which shall never be used in the context of dll
|
||||
* because they are not guaranteed to remain stable in the future.
|
||||
* Only consider them in association with static linking.
|
||||
*******************************************************************/
|
||||
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
|
||||
#define HUF_H_HUF_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include "mem.h" /* U32 */
|
||||
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Static allocation
|
||||
******************************************/
|
||||
/* HUF buffer bounds */
|
||||
#define HUF_CTABLEBOUND 129
|
||||
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
|
||||
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* static allocation of HUF's Compression Table */
|
||||
#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
|
||||
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
|
||||
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
||||
U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
|
||||
void* name##hv = &(name##hb); \
|
||||
HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
|
||||
|
||||
/* static allocation of HUF's DTable */
|
||||
typedef U32 HUF_DTable;
|
||||
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
|
||||
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Advanced decompression functions
|
||||
******************************************/
|
||||
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
|
||||
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
|
||||
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
|
||||
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
HUF_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[] using FSE_count()
|
||||
2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
3. build Huffman table from count using HUF_buildCTable()
|
||||
4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
/* FSE_count() : find it within "fse.h" */
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
|
||||
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
typedef enum {
|
||||
HUF_repeat_none, /**< Cannot use the previous table */
|
||||
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
||||
HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} HUF_repeat;
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
*/
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HUF_readCTable() :
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*
|
||||
HUF_decompress() does the following:
|
||||
1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
|
||||
2. build Huffman table from save, using HUF_readDTableXn()
|
||||
3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
|
||||
*/
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-determined metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
||||
|
||||
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
||||
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
|
||||
/* single stream variants */
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
||||
|
||||
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
|
||||
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
360
src/borg/algorithms/zstd/lib/common/mem.h
Normal file
360
src/borg/algorithms/zstd/lib/common/mem.h
Normal file
|
|
@ -0,0 +1,360 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef MEM_H_MODULE
|
||||
#define MEM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
#include <string.h> /* memcpy */
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Compiler specifics
|
||||
******************************************/
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# include <stdlib.h> /* _byteswap_ulong */
|
||||
# include <intrin.h> /* _byteswap_* */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define MEM_STATIC static __inline __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define MEM_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define MEM_STATIC static __inline
|
||||
#else
|
||||
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
/* code only tested on 32 and 64 bits systems */
|
||||
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
||||
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Basic Types
|
||||
*****************************************************************/
|
||||
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
typedef intptr_t iPtrDiff;
|
||||
typedef uintptr_t uPtrDiff;
|
||||
#else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef signed short S16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
typedef ptrdiff_t iPtrDiff;
|
||||
typedef size_t uPtrDiff;
|
||||
#endif
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O
|
||||
*****************************************************************/
|
||||
/* MEM_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method is portable but violate C standard.
|
||||
* It can generate buggy code on targets depending on alignment.
|
||||
* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
|
||||
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define MEM_FORCE_MEMORY_ACCESS 2
|
||||
# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
# define MEM_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
|
||||
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
||||
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
||||
{
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
}
|
||||
|
||||
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
||||
|
||||
/* violates C standard, by lying on structure alignment.
|
||||
Only use if no other choice to achieve best performance on target platform */
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
||||
|
||||
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
|
||||
__pragma( pack(push, 1) )
|
||||
typedef union { U16 u16; U32 u32; U64 u64; size_t st; } unalign;
|
||||
__pragma( pack(pop) )
|
||||
#else
|
||||
typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
|
||||
#endif
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
|
||||
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
||||
MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
|
||||
|
||||
#else
|
||||
|
||||
/* default method, safe and standard.
|
||||
can sometimes prove slower */
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
||||
{
|
||||
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
||||
{
|
||||
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
||||
{
|
||||
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
||||
{
|
||||
size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
||||
|
||||
MEM_STATIC U32 MEM_swap32(U32 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_ulong(in);
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap32(in);
|
||||
#else
|
||||
return ((in << 24) & 0xff000000 ) |
|
||||
((in << 8) & 0x00ff0000 ) |
|
||||
((in >> 8) & 0x0000ff00 ) |
|
||||
((in >> 24) & 0x000000ff );
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64(U64 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_uint64(in);
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap64(in);
|
||||
#else
|
||||
return ((in << 56) & 0xff00000000000000ULL) |
|
||||
((in << 40) & 0x00ff000000000000ULL) |
|
||||
((in << 24) & 0x0000ff0000000000ULL) |
|
||||
((in << 8) & 0x000000ff00000000ULL) |
|
||||
((in >> 8) & 0x00000000ff000000ULL) |
|
||||
((in >> 24) & 0x0000000000ff0000ULL) |
|
||||
((in >> 40) & 0x000000000000ff00ULL) |
|
||||
((in >> 56) & 0x00000000000000ffULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_swapST(size_t in)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_swap32((U32)in);
|
||||
else
|
||||
return (size_t)MEM_swap64((U64)in);
|
||||
}
|
||||
|
||||
/*=== Little endian r/w ===*/
|
||||
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read16(memPtr);
|
||||
else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U16)(p[0] + (p[1]<<8));
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
MEM_write16(memPtr, val);
|
||||
} else {
|
||||
BYTE* p = (BYTE*)memPtr;
|
||||
p[0] = (BYTE)val;
|
||||
p[1] = (BYTE)(val>>8);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
||||
{
|
||||
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
||||
{
|
||||
MEM_writeLE16(memPtr, (U16)val);
|
||||
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr);
|
||||
else
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, val32);
|
||||
else
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read64(memPtr);
|
||||
else
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, val64);
|
||||
else
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readLE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readLE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeLE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeLE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/*=== Big endian r/w ===*/
|
||||
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
else
|
||||
return MEM_read32(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
else
|
||||
MEM_write32(memPtr, val32);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
else
|
||||
return MEM_read64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
else
|
||||
MEM_write64(memPtr, val64);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readBE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readBE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeBE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeBE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* MEM_H_MODULE */
|
||||
255
src/borg/algorithms/zstd/lib/common/pool.c
Normal file
255
src/borg/algorithms/zstd/lib/common/pool.c
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
/* ====== Dependencies ======= */
|
||||
#include <stddef.h> /* size_t */
|
||||
#include <stdlib.h> /* malloc, calloc, free */
|
||||
#include "pool.h"
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
#if defined(_MSC_VER)
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
|
||||
#include "threading.h" /* pthread adaptation */
|
||||
|
||||
/* A job is a function and an opaque argument */
|
||||
typedef struct POOL_job_s {
|
||||
POOL_function function;
|
||||
void *opaque;
|
||||
} POOL_job;
|
||||
|
||||
struct POOL_ctx_s {
|
||||
ZSTD_customMem customMem;
|
||||
/* Keep track of the threads */
|
||||
ZSTD_pthread_t *threads;
|
||||
size_t numThreads;
|
||||
|
||||
/* The queue is a circular buffer */
|
||||
POOL_job *queue;
|
||||
size_t queueHead;
|
||||
size_t queueTail;
|
||||
size_t queueSize;
|
||||
|
||||
/* The number of threads working on jobs */
|
||||
size_t numThreadsBusy;
|
||||
/* Indicates if the queue is empty */
|
||||
int queueEmpty;
|
||||
|
||||
/* The mutex protects the queue */
|
||||
ZSTD_pthread_mutex_t queueMutex;
|
||||
/* Condition variable for pushers to wait on when the queue is full */
|
||||
ZSTD_pthread_cond_t queuePushCond;
|
||||
/* Condition variables for poppers to wait on when the queue is empty */
|
||||
ZSTD_pthread_cond_t queuePopCond;
|
||||
/* Indicates if the queue is shutting down */
|
||||
int shutdown;
|
||||
};
|
||||
|
||||
/* POOL_thread() :
|
||||
Work thread for the thread pool.
|
||||
Waits for jobs and executes them.
|
||||
@returns : NULL on failure else non-null.
|
||||
*/
|
||||
static void* POOL_thread(void* opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)opaque;
|
||||
if (!ctx) { return NULL; }
|
||||
for (;;) {
|
||||
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
|
||||
while (ctx->queueEmpty && !ctx->shutdown) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
||||
}
|
||||
/* empty => shutting down: so stop */
|
||||
if (ctx->queueEmpty) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return opaque;
|
||||
}
|
||||
/* Pop a job off the queue */
|
||||
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
||||
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
||||
ctx->numThreadsBusy++;
|
||||
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
|
||||
/* Unlock the mutex, signal a pusher, and run the job */
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
|
||||
job.function(job.opaque);
|
||||
|
||||
/* If the intended queue size was 0, signal after finishing job */
|
||||
if (ctx->queueSize == 1) {
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->numThreadsBusy--;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
} }
|
||||
} /* for (;;) */
|
||||
/* Unreachable */
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
|
||||
POOL_ctx* ctx;
|
||||
/* Check the parameters */
|
||||
if (!numThreads) { return NULL; }
|
||||
/* Allocate the context and zero initialize */
|
||||
ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
|
||||
if (!ctx) { return NULL; }
|
||||
/* Initialize the job queue.
|
||||
* It needs one extra space since one space is wasted to differentiate empty
|
||||
* and full queues.
|
||||
*/
|
||||
ctx->queueSize = queueSize + 1;
|
||||
ctx->queue = (POOL_job*) malloc(ctx->queueSize * sizeof(POOL_job));
|
||||
ctx->queueHead = 0;
|
||||
ctx->queueTail = 0;
|
||||
ctx->numThreadsBusy = 0;
|
||||
ctx->queueEmpty = 1;
|
||||
(void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
(void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
(void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
ctx->shutdown = 0;
|
||||
/* Allocate space for the thread handles */
|
||||
ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
||||
ctx->numThreads = 0;
|
||||
ctx->customMem = customMem;
|
||||
/* Check for errors */
|
||||
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
||||
/* Initialize the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < numThreads; ++i) {
|
||||
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
|
||||
ctx->numThreads = i;
|
||||
POOL_free(ctx);
|
||||
return NULL;
|
||||
} }
|
||||
ctx->numThreads = numThreads;
|
||||
}
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*! POOL_join() :
|
||||
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
||||
*/
|
||||
static void POOL_join(POOL_ctx* ctx) {
|
||||
/* Shut down the queue */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->shutdown = 1;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
/* Wake up sleeping threads */
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
/* Join all of the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < ctx->numThreads; ++i) {
|
||||
ZSTD_pthread_join(ctx->threads[i], NULL);
|
||||
} }
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx *ctx) {
|
||||
if (!ctx) { return; }
|
||||
POOL_join(ctx);
|
||||
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
||||
ZSTD_free(ctx->queue, ctx->customMem);
|
||||
ZSTD_free(ctx->threads, ctx->customMem);
|
||||
ZSTD_free(ctx, ctx->customMem);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx *ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*ctx)
|
||||
+ ctx->queueSize * sizeof(POOL_job)
|
||||
+ ctx->numThreads * sizeof(ZSTD_pthread_t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 1 if the queue is full and 0 otherwise.
|
||||
*
|
||||
* If the queueSize is 1 (the pool was created with an intended queueSize of 0),
|
||||
* then a queue is empty if there is a thread free and no job is waiting.
|
||||
*/
|
||||
static int isQueueFull(POOL_ctx const* ctx) {
|
||||
if (ctx->queueSize > 1) {
|
||||
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
||||
} else {
|
||||
return ctx->numThreadsBusy == ctx->numThreads ||
|
||||
!ctx->queueEmpty;
|
||||
}
|
||||
}
|
||||
|
||||
void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
|
||||
if (!ctx) { return; }
|
||||
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
{ POOL_job const job = {function, opaque};
|
||||
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && !ctx->shutdown) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
/* The queue is still going => there is space */
|
||||
if (!ctx->shutdown) {
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
}
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
||||
}
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multi-threading support */
|
||||
|
||||
/* We don't need any data, but if it is empty malloc() might return NULL. */
|
||||
struct POOL_ctx_s {
|
||||
int dummy;
|
||||
};
|
||||
static POOL_ctx g_ctx;
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
|
||||
(void)numThreads;
|
||||
(void)queueSize;
|
||||
(void)customMem;
|
||||
return &g_ctx;
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx* ctx) {
|
||||
assert(!ctx || ctx == &g_ctx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
void POOL_add(void* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
assert(ctx == &g_ctx);
|
||||
return sizeof(*ctx);
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
65
src/borg/algorithms/zstd/lib/common/pool.h
Normal file
65
src/borg/algorithms/zstd/lib/common/pool.h
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef POOL_H
|
||||
#define POOL_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd_internal.h" /* ZSTD_customMem */
|
||||
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
/*! POOL_create() :
|
||||
* Create a thread pool with at most `numThreads` threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
POOL_ctx *POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
|
||||
|
||||
/*! POOL_free() :
|
||||
Free a thread pool returned by POOL_create().
|
||||
*/
|
||||
void POOL_free(POOL_ctx *ctx);
|
||||
|
||||
/*! POOL_sizeof() :
|
||||
return memory usage of pool returned by POOL_create().
|
||||
*/
|
||||
size_t POOL_sizeof(POOL_ctx *ctx);
|
||||
|
||||
/*! POOL_function :
|
||||
The function type that can be added to a thread pool.
|
||||
*/
|
||||
typedef void (*POOL_function)(void *);
|
||||
/*! POOL_add_function :
|
||||
The function type for a generic thread pool add function.
|
||||
*/
|
||||
typedef void (*POOL_add_function)(void *, POOL_function, void *);
|
||||
|
||||
/*! POOL_add() :
|
||||
Add the job `function(opaque)` to the thread pool.
|
||||
Possibly blocks until there is room in the queue.
|
||||
Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
|
||||
*/
|
||||
void POOL_add(void *ctx, POOL_function function, void *opaque);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
75
src/borg/algorithms/zstd/lib/common/threading.c
Normal file
75
src/borg/algorithms/zstd/lib/common/threading.c
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*/
|
||||
|
||||
/**
|
||||
* This file will hold wrapper for systems, which do not support pthreads
|
||||
*/
|
||||
|
||||
/* create fake symbol to avoid empty trnaslation unit warning */
|
||||
int g_ZSTD_threading_useles_symbol;
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper, based on :
|
||||
* http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
|
||||
*/
|
||||
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <process.h>
|
||||
#include <errno.h>
|
||||
#include "threading.h"
|
||||
|
||||
|
||||
/* === Implementation === */
|
||||
|
||||
static unsigned __stdcall worker(void *arg)
|
||||
{
|
||||
ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
|
||||
thread->arg = thread->start_routine(thread->arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg)
|
||||
{
|
||||
(void)unused;
|
||||
thread->arg = arg;
|
||||
thread->start_routine = start_routine;
|
||||
thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
|
||||
|
||||
if (!thread->handle)
|
||||
return errno;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
if (!thread.handle) return 0;
|
||||
|
||||
result = WaitForSingleObject(thread.handle, INFINITE);
|
||||
switch (result) {
|
||||
case WAIT_OBJECT_0:
|
||||
if (value_ptr) *value_ptr = thread.arg;
|
||||
return 0;
|
||||
case WAIT_ABANDONED:
|
||||
return EINVAL;
|
||||
default:
|
||||
return GetLastError();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
123
src/borg/algorithms/zstd/lib/common/threading.h
Normal file
123
src/borg/algorithms/zstd/lib/common/threading.h
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*/
|
||||
|
||||
#ifndef THREADING_H_938743
|
||||
#define THREADING_H_938743
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper, based on :
|
||||
* http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
|
||||
*/
|
||||
#ifdef WINVER
|
||||
# undef WINVER
|
||||
#endif
|
||||
#define WINVER 0x0600
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
# undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#include <windows.h>
|
||||
#undef ERROR
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
|
||||
|
||||
/* mutex */
|
||||
#define ZSTD_pthread_mutex_t CRITICAL_SECTION
|
||||
#define ZSTD_pthread_mutex_init(a, b) (InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define ZSTD_pthread_cond_t CONDITION_VARIABLE
|
||||
#define ZSTD_pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) /* No delete */
|
||||
#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
||||
|
||||
/* ZSTD_pthread_create() and ZSTD_pthread_join() */
|
||||
typedef struct {
|
||||
HANDLE handle;
|
||||
void* (*start_routine)(void*);
|
||||
void* arg;
|
||||
} ZSTD_pthread_t;
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg);
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
|
||||
|
||||
/**
|
||||
* add here more wrappers as required
|
||||
*/
|
||||
|
||||
|
||||
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
|
||||
/* === POSIX Systems === */
|
||||
# include <pthread.h>
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t
|
||||
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
|
||||
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t
|
||||
#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
|
||||
#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multithreading support */
|
||||
|
||||
typedef int ZSTD_pthread_mutex_t;
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)a, 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a)
|
||||
#define ZSTD_pthread_mutex_lock(a)
|
||||
#define ZSTD_pthread_mutex_unlock(a)
|
||||
|
||||
typedef int ZSTD_pthread_cond_t;
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)a, 0)
|
||||
#define ZSTD_pthread_cond_destroy(a)
|
||||
#define ZSTD_pthread_cond_wait(a, b)
|
||||
#define ZSTD_pthread_cond_signal(a)
|
||||
#define ZSTD_pthread_cond_broadcast(a)
|
||||
|
||||
/* do not use ZSTD_pthread_t */
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* THREADING_H_938743 */
|
||||
875
src/borg/algorithms/zstd/lib/common/xxhash.c
Normal file
875
src/borg/algorithms/zstd/lib/common/xxhash.c
Normal file
|
|
@ -0,0 +1,875 @@
|
|||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (C) 2012-2016, Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash homepage: http://www.xxhash.com
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tuning parameters
|
||||
***************************************/
|
||||
/*!XXH_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
|
||||
* It can generate buggy code on targets which do not support unaligned memory accesses.
|
||||
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
||||
* See http://stackoverflow.com/a/32095106/646947 for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define XXH_FORCE_MEMORY_ACCESS 2
|
||||
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
||||
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
|
||||
# define XXH_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
||||
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
|
||||
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
|
||||
* By default, this option is disabled. To enable it, uncomment below define :
|
||||
*/
|
||||
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
|
||||
|
||||
/*!XXH_FORCE_NATIVE_FORMAT :
|
||||
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
|
||||
* Results are therefore identical for little-endian and big-endian CPU.
|
||||
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
|
||||
* Should endian-independance be of no importance for your application, you may set the #define below to 1,
|
||||
* to improve speed for Big-endian CPU.
|
||||
* This option has no impact on Little_Endian CPU.
|
||||
*/
|
||||
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
|
||||
# define XXH_FORCE_NATIVE_FORMAT 0
|
||||
#endif
|
||||
|
||||
/*!XXH_FORCE_ALIGN_CHECK :
|
||||
* This is a minor performance trick, only useful with lots of very small keys.
|
||||
* It means : check for aligned/unaligned input.
|
||||
* The check costs one initial branch per hash; set to 0 when the input data
|
||||
* is guaranteed to be aligned.
|
||||
*/
|
||||
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
||||
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
||||
# define XXH_FORCE_ALIGN_CHECK 0
|
||||
# else
|
||||
# define XXH_FORCE_ALIGN_CHECK 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Includes & Memory related functions
|
||||
***************************************/
|
||||
/* Modify the local functions below should you wish to use some other memory routines */
|
||||
/* for malloc(), free() */
|
||||
#include <stdlib.h>
|
||||
static void* XXH_malloc(size_t s) { return malloc(s); }
|
||||
static void XXH_free (void* p) { free(p); }
|
||||
/* for memcpy() */
|
||||
#include <string.h>
|
||||
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
||||
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
#include "xxhash.h"
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Compiler Specific Options
|
||||
***************************************/
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Basic Types
|
||||
***************************************/
|
||||
#ifndef MEM_MODULE
|
||||
# define MEM_MODULE
|
||||
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
# else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
|
||||
|
||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
|
||||
static U32 XXH_read32(const void* memPtr)
|
||||
{
|
||||
U32 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static U64 XXH_read64(const void* memPtr)
|
||||
{
|
||||
U64 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific Functions and Macros
|
||||
******************************************/
|
||||
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
|
||||
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
|
||||
#if defined(_MSC_VER)
|
||||
# define XXH_rotl32(x,r) _rotl(x,r)
|
||||
# define XXH_rotl64(x,r) _rotl64(x,r)
|
||||
#else
|
||||
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# define XXH_swap32 _byteswap_ulong
|
||||
# define XXH_swap64 _byteswap_uint64
|
||||
#elif GCC_VERSION >= 403
|
||||
# define XXH_swap32 __builtin_bswap32
|
||||
# define XXH_swap64 __builtin_bswap64
|
||||
#else
|
||||
static U32 XXH_swap32 (U32 x)
|
||||
{
|
||||
return ((x << 24) & 0xff000000 ) |
|
||||
((x << 8) & 0x00ff0000 ) |
|
||||
((x >> 8) & 0x0000ff00 ) |
|
||||
((x >> 24) & 0x000000ff );
|
||||
}
|
||||
static U64 XXH_swap64 (U64 x)
|
||||
{
|
||||
return ((x << 56) & 0xff00000000000000ULL) |
|
||||
((x << 40) & 0x00ff000000000000ULL) |
|
||||
((x << 24) & 0x0000ff0000000000ULL) |
|
||||
((x << 8) & 0x000000ff00000000ULL) |
|
||||
((x >> 8) & 0x00000000ff000000ULL) |
|
||||
((x >> 24) & 0x0000000000ff0000ULL) |
|
||||
((x >> 40) & 0x000000000000ff00ULL) |
|
||||
((x >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Architecture Macros
|
||||
***************************************/
|
||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
|
||||
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
||||
#ifndef XXH_CPU_LITTLE_ENDIAN
|
||||
static const int g_one = 1;
|
||||
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
|
||||
#endif
|
||||
|
||||
|
||||
/* ***************************
|
||||
* Memory reads
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U32 XXH_readBE32(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U64 XXH_readBE64(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
||||
}
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Macros
|
||||
***************************************/
|
||||
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
static const U32 PRIME32_1 = 2654435761U;
|
||||
static const U32 PRIME32_2 = 2246822519U;
|
||||
static const U32 PRIME32_3 = 3266489917U;
|
||||
static const U32 PRIME32_4 = 668265263U;
|
||||
static const U32 PRIME32_5 = 374761393U;
|
||||
|
||||
static const U64 PRIME64_1 = 11400714785074694791ULL;
|
||||
static const U64 PRIME64_2 = 14029467366897019727ULL;
|
||||
static const U64 PRIME64_3 = 1609587929392839161ULL;
|
||||
static const U64 PRIME64_4 = 9650029242287828579ULL;
|
||||
static const U64 PRIME64_5 = 2870177450012600261ULL;
|
||||
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/* **************************
|
||||
* Utils
|
||||
****************************/
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
|
||||
/* ***************************
|
||||
* Simple Hash Functions
|
||||
*****************************/
|
||||
|
||||
static U32 XXH32_round(U32 seed, U32 input)
|
||||
{
|
||||
seed += input * PRIME32_2;
|
||||
seed = XXH_rotl32(seed, 13);
|
||||
seed *= PRIME32_1;
|
||||
return seed;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
U32 h32;
|
||||
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)16;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
U32 v2 = seed + PRIME32_2;
|
||||
U32 v3 = seed + 0;
|
||||
U32 v4 = seed - PRIME32_1;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
|
||||
} else {
|
||||
h32 = seed + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += (U32) len;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_get32bits(p) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH32_CREATESTATE_STATIC(state);
|
||||
XXH32_reset(state, seed);
|
||||
XXH32_update(state, input, len);
|
||||
return XXH32_digest(state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static U64 XXH64_round(U64 acc, U64 input)
|
||||
{
|
||||
acc += input * PRIME64_2;
|
||||
acc = XXH_rotl64(acc, 31);
|
||||
acc *= PRIME64_1;
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
{
|
||||
val = XXH64_round(0, val);
|
||||
acc ^= val;
|
||||
acc = acc * PRIME64_1 + PRIME64_4;
|
||||
return acc;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
U64 h64;
|
||||
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=32) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
U64 v2 = seed + PRIME64_2;
|
||||
U64 v3 = seed + 0;
|
||||
U64 v4 = seed - PRIME64_1;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
|
||||
} else {
|
||||
h64 = seed + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH64_CREATESTATE_STATIC(state);
|
||||
XXH64_reset(state, seed);
|
||||
XXH64_update(state, input, len);
|
||||
return XXH64_digest(state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* **************************************************
|
||||
* Advanced Hash Functions
|
||||
****************************************************/
|
||||
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
|
||||
{
|
||||
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
|
||||
{
|
||||
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
/*** Hash feed ***/
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
||||
{
|
||||
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
state.v2 = seed + PRIME32_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME32_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
||||
{
|
||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
state.v2 = seed + PRIME64_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME64_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len_32 += (unsigned)len;
|
||||
state->large_len |= (len>=16) | (state->total_len_32>=16);
|
||||
|
||||
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
||||
state->memsize += (unsigned)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* some data left from previous update */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
||||
{ const U32* p32 = state->mem32;
|
||||
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
|
||||
}
|
||||
p += 16-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p <= bEnd-16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = state->v1;
|
||||
U32 v2 = state->v2;
|
||||
U32 v3 = state->v3;
|
||||
U32 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem32;
|
||||
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
||||
U32 h32;
|
||||
|
||||
if (state->large_len) {
|
||||
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
|
||||
} else {
|
||||
h32 = state->v3 /* == seed */ + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += state->total_len_32;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_readLE32(p, endian) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* **** XXH64 **** */
|
||||
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len += len;
|
||||
|
||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (U32)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* tmp buffer is full */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
||||
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
||||
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
||||
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
||||
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
||||
p += 32-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p+32 <= bEnd) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = state->v1;
|
||||
U64 v2 = state->v2;
|
||||
U64 v3 = state->v3;
|
||||
U64 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem64;
|
||||
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
||||
U64 h64;
|
||||
|
||||
if (state->total_len >= 32) {
|
||||
U64 const v1 = state->v1;
|
||||
U64 const v2 = state->v2;
|
||||
U64 const v3 = state->v3;
|
||||
U64 const v4 = state->v4;
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
} else {
|
||||
h64 = state->v3 + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) state->total_len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
/* **************************
|
||||
* Canonical representation
|
||||
****************************/
|
||||
|
||||
/*! Default XXH result types are basic unsigned 32 and 64 bits.
|
||||
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
|
||||
*/
|
||||
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE32(src);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE64(src);
|
||||
}
|
||||
305
src/borg/algorithms/zstd/lib/common/xxhash.h
Normal file
305
src/borg/algorithms/zstd/lib/common/xxhash.h
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
/*
|
||||
xxHash - Extremely Fast Hash algorithm
|
||||
Header File
|
||||
Copyright (C) 2012-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
/* Notice extracted from xxHash homepage :
|
||||
|
||||
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
|
||||
It also successfully passes all tests from the SMHasher suite.
|
||||
|
||||
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
|
||||
|
||||
Name Speed Q.Score Author
|
||||
xxHash 5.4 GB/s 10
|
||||
CrapWow 3.2 GB/s 2 Andrew
|
||||
MumurHash 3a 2.7 GB/s 10 Austin Appleby
|
||||
SpookyHash 2.0 GB/s 10 Bob Jenkins
|
||||
SBox 1.4 GB/s 9 Bret Mulvey
|
||||
Lookup3 1.2 GB/s 9 Bob Jenkins
|
||||
SuperFastHash 1.2 GB/s 1 Paul Hsieh
|
||||
CityHash64 1.05 GB/s 10 Pike & Alakuijala
|
||||
FNV 0.55 GB/s 5 Fowler, Noll, Vo
|
||||
CRC32 0.43 GB/s 9
|
||||
MD5-32 0.33 GB/s 10 Ronald L. Rivest
|
||||
SHA1-32 0.28 GB/s 10
|
||||
|
||||
Q.Score is a measure of quality of the hash function.
|
||||
It depends on successfully passing SMHasher test set.
|
||||
10 is a perfect score.
|
||||
|
||||
A 64-bits version, named XXH64, is available since r35.
|
||||
It offers much better speed, but for 64-bits applications only.
|
||||
Name Speed on 64 bits Speed on 32 bits
|
||||
XXH64 13.8 GB/s 1.9 GB/s
|
||||
XXH32 6.8 GB/s 6.0 GB/s
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef XXHASH_H_5627135585666179
|
||||
#define XXHASH_H_5627135585666179 1
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Definitions
|
||||
******************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
|
||||
|
||||
|
||||
/* ****************************
|
||||
* API modifier
|
||||
******************************/
|
||||
/** XXH_PRIVATE_API
|
||||
* This is useful if you want to include xxhash functions in `static` mode
|
||||
* in order to inline them, and remove their symbol from the public list.
|
||||
* Methodology :
|
||||
* #define XXH_PRIVATE_API
|
||||
* #include "xxhash.h"
|
||||
* `xxhash.c` is automatically included.
|
||||
* It's not useful to compile and link it as a separate module anymore.
|
||||
*/
|
||||
#ifdef XXH_PRIVATE_API
|
||||
# ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
# endif
|
||||
# if defined(__GNUC__)
|
||||
# define XXH_PUBLIC_API static __inline __attribute__((unused))
|
||||
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define XXH_PUBLIC_API static inline
|
||||
# elif defined(_MSC_VER)
|
||||
# define XXH_PUBLIC_API static __inline
|
||||
# else
|
||||
# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
# endif
|
||||
#else
|
||||
# define XXH_PUBLIC_API /* do nothing */
|
||||
#endif /* XXH_PRIVATE_API */
|
||||
|
||||
/*!XXH_NAMESPACE, aka Namespace Emulation :
|
||||
|
||||
If you want to include _and expose_ xxHash functions from within your own library,
|
||||
but also want to avoid symbol collisions with another library which also includes xxHash,
|
||||
|
||||
you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
|
||||
with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
|
||||
|
||||
Note that no change is required within the calling program as long as it includes `xxhash.h` :
|
||||
regular symbol name will be automatically translated by this header.
|
||||
*/
|
||||
#ifdef XXH_NAMESPACE
|
||||
# define XXH_CAT(A,B) A##B
|
||||
# define XXH_NAME2(A,B) XXH_CAT(A,B)
|
||||
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
|
||||
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
|
||||
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
|
||||
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
|
||||
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
|
||||
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
|
||||
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
|
||||
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
|
||||
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
|
||||
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
|
||||
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
|
||||
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
|
||||
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
|
||||
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
|
||||
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
|
||||
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
|
||||
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
|
||||
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
|
||||
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Version
|
||||
***************************************/
|
||||
#define XXH_VERSION_MAJOR 0
|
||||
#define XXH_VERSION_MINOR 6
|
||||
#define XXH_VERSION_RELEASE 2
|
||||
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Simple Hash Functions
|
||||
******************************/
|
||||
typedef unsigned int XXH32_hash_t;
|
||||
typedef unsigned long long XXH64_hash_t;
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
|
||||
|
||||
/*!
|
||||
XXH32() :
|
||||
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
|
||||
The memory between input & input+length must be valid (allocated and read-accessible).
|
||||
"seed" can be used to alter the result predictably.
|
||||
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
|
||||
XXH64() :
|
||||
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
|
||||
"seed" can be used to alter the result predictably.
|
||||
This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
|
||||
*/
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Streaming Hash Functions
|
||||
******************************/
|
||||
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
|
||||
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
|
||||
|
||||
/*! State allocation, compatible with dynamic libraries */
|
||||
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
|
||||
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
|
||||
|
||||
|
||||
/* hash streaming */
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
|
||||
|
||||
/*
|
||||
These functions generate the xxHash of an input provided in multiple segments.
|
||||
Note that, for small input, they are slower than single-call functions, due to state management.
|
||||
For small input, prefer `XXH32()` and `XXH64()` .
|
||||
|
||||
XXH state must first be allocated, using XXH*_createState() .
|
||||
|
||||
Start a new hash by initializing state with a seed, using XXH*_reset().
|
||||
|
||||
Then, feed the hash state by calling XXH*_update() as many times as necessary.
|
||||
Obviously, input must be allocated and read accessible.
|
||||
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
|
||||
|
||||
Finally, a hash value can be produced anytime, by using XXH*_digest().
|
||||
This function returns the nn-bits hash as an int or long long.
|
||||
|
||||
It's still possible to continue inserting input into the hash state after a digest,
|
||||
and generate some new hashes later on, by calling again XXH*_digest().
|
||||
|
||||
When done, free XXH state space if it was allocated dynamically.
|
||||
*/
|
||||
|
||||
|
||||
/* **************************
|
||||
* Utils
|
||||
****************************/
|
||||
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */
|
||||
# define restrict /* disable restrict */
|
||||
#endif
|
||||
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
|
||||
|
||||
|
||||
/* **************************
|
||||
* Canonical representation
|
||||
****************************/
|
||||
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
|
||||
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
|
||||
*/
|
||||
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
|
||||
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
|
||||
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
|
||||
|
||||
#endif /* XXHASH_H_5627135585666179 */
|
||||
|
||||
|
||||
|
||||
/* ================================================================================================
|
||||
This section contains definitions which are not guaranteed to remain stable.
|
||||
They may change in future versions, becoming incompatible with a different version of the library.
|
||||
They shall only be used with static linking.
|
||||
Never use these definitions in association with dynamic linking !
|
||||
=================================================================================================== */
|
||||
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
|
||||
#define XXH_STATIC_H_3543687687345
|
||||
|
||||
/* These definitions are only meant to allow allocation of XXH state
|
||||
statically, on stack, or in a struct for example.
|
||||
Do not use members directly. */
|
||||
|
||||
struct XXH32_state_s {
|
||||
unsigned total_len_32;
|
||||
unsigned large_len;
|
||||
unsigned v1;
|
||||
unsigned v2;
|
||||
unsigned v3;
|
||||
unsigned v4;
|
||||
unsigned mem32[4]; /* buffer defined as U32 for alignment */
|
||||
unsigned memsize;
|
||||
unsigned reserved; /* never read nor write, will be removed in a future version */
|
||||
}; /* typedef'd to XXH32_state_t */
|
||||
|
||||
struct XXH64_state_s {
|
||||
unsigned long long total_len;
|
||||
unsigned long long v1;
|
||||
unsigned long long v2;
|
||||
unsigned long long v3;
|
||||
unsigned long long v4;
|
||||
unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
|
||||
unsigned memsize;
|
||||
unsigned reserved[2]; /* never read nor write, will be removed in a future version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
|
||||
|
||||
# ifdef XXH_PRIVATE_API
|
||||
# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
|
||||
# endif
|
||||
|
||||
#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
80
src/borg/algorithms/zstd/lib/common/zstd_common.c
Normal file
80
src/borg/algorithms/zstd/lib/common/zstd_common.c
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stdlib.h> /* malloc, calloc, free */
|
||||
#include <string.h> /* memset */
|
||||
#include "error_private.h"
|
||||
#include "zstd_internal.h"
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Version
|
||||
******************************************/
|
||||
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
|
||||
|
||||
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* ZSTD Error Management
|
||||
******************************************/
|
||||
/*! ZSTD_isError() :
|
||||
* tells if a return value is an error code */
|
||||
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*! ZSTD_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
/*! ZSTD_getError() :
|
||||
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
|
||||
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
|
||||
/*! ZSTD_getErrorString() :
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
||||
|
||||
|
||||
/*=**************************************************************
|
||||
* Custom allocator
|
||||
****************************************************************/
|
||||
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc)
|
||||
return customMem.customAlloc(customMem.opaque, size);
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc) {
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* const ptr = customMem.customAlloc(customMem.opaque, size);
|
||||
memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
void ZSTD_free(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr!=NULL) {
|
||||
if (customMem.customFree)
|
||||
customMem.customFree(customMem.opaque, ptr);
|
||||
else
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
83
src/borg/algorithms/zstd/lib/common/zstd_errors.h
Normal file
83
src/borg/algorithms/zstd/lib/common/zstd_errors.h
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ERRORS_H_398273423
|
||||
#define ZSTD_ERRORS_H_398273423
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*===== dependency =====*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZSTDERRORLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* error codes list
|
||||
* note : this API is still considered unstable
|
||||
* and shall not be used with a dynamic library.
|
||||
* only static linking is allowed
|
||||
******************************************/
|
||||
typedef enum {
|
||||
ZSTD_error_no_error = 0,
|
||||
ZSTD_error_GENERIC = 1,
|
||||
ZSTD_error_prefix_unknown = 10,
|
||||
ZSTD_error_version_unsupported = 12,
|
||||
ZSTD_error_frameParameter_unsupported = 14,
|
||||
ZSTD_error_frameParameter_windowTooLarge = 16,
|
||||
ZSTD_error_corruption_detected = 20,
|
||||
ZSTD_error_checksum_wrong = 22,
|
||||
ZSTD_error_dictionary_corrupted = 30,
|
||||
ZSTD_error_dictionary_wrong = 32,
|
||||
ZSTD_error_dictionaryCreation_failed = 34,
|
||||
ZSTD_error_parameter_unsupported = 40,
|
||||
ZSTD_error_parameter_outOfBound = 42,
|
||||
ZSTD_error_tableLog_tooLarge = 44,
|
||||
ZSTD_error_maxSymbolValue_tooLarge = 46,
|
||||
ZSTD_error_maxSymbolValue_tooSmall = 48,
|
||||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
||||
} ZSTD_ErrorCode;
|
||||
|
||||
/*! ZSTD_getErrorCode() :
|
||||
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
|
||||
which can be used to compare with enum list published above */
|
||||
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
|
||||
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_ERRORS_H_398273423 */
|
||||
409
src/borg/algorithms/zstd/lib/common/zstd_internal.h
Normal file
409
src/borg/algorithms/zstd/lib/common/zstd_internal.h
Normal file
|
|
@ -0,0 +1,409 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CCOMMON_H_MODULE
|
||||
#define ZSTD_CCOMMON_H_MODULE
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "compiler.h"
|
||||
#include "mem.h"
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Debug
|
||||
***************************************/
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
|
||||
# include <assert.h>
|
||||
#else
|
||||
# ifndef assert
|
||||
# define assert(condition) ((void)0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
|
||||
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
|
||||
# include <stdio.h>
|
||||
/* recommended values for ZSTD_DEBUG display levels :
|
||||
* 1 : no display, enables assert() only
|
||||
* 2 : reserved for currently active debugging path
|
||||
* 3 : events once per object lifetime (CCtx, CDict)
|
||||
* 4 : events once per frame
|
||||
* 5 : events once per block
|
||||
* 6 : events once per sequence (*very* verbose) */
|
||||
# define DEBUGLOG(l, ...) { \
|
||||
if (l<=ZSTD_DEBUG) { \
|
||||
fprintf(stderr, __FILE__ ": "); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, " \n"); \
|
||||
} }
|
||||
#else
|
||||
# define DEBUGLOG(l, ...) {} /* disabled */
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* shared macros
|
||||
***************************************/
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||
#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
|
||||
#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Common constants
|
||||
***************************************/
|
||||
#define ZSTD_OPT_NUM (1<<12)
|
||||
|
||||
#define ZSTD_REP_NUM 3 /* number of repcodes */
|
||||
#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
|
||||
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
|
||||
#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
|
||||
static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
||||
|
||||
#define KB *(1 <<10)
|
||||
#define MB *(1 <<20)
|
||||
#define GB *(1U<<30)
|
||||
|
||||
#define BIT7 128
|
||||
#define BIT6 64
|
||||
#define BIT5 32
|
||||
#define BIT4 16
|
||||
#define BIT1 2
|
||||
#define BIT0 1
|
||||
|
||||
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
|
||||
#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
|
||||
static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
||||
static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
||||
|
||||
#define ZSTD_FRAMEIDSIZE 4
|
||||
static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE; /* magic number size */
|
||||
|
||||
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
|
||||
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
||||
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
|
||||
|
||||
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
|
||||
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
|
||||
|
||||
#define HufLog 12
|
||||
typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
|
||||
|
||||
#define LONGNBSEQ 0x7F00
|
||||
|
||||
#define MINMATCH 3
|
||||
|
||||
#define Litbits 8
|
||||
#define MaxLit ((1<<Litbits) - 1)
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define DefaultMaxOff 28
|
||||
#define MaxOff 31
|
||||
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
|
||||
#define MLFSELog 9
|
||||
#define LLFSELog 9
|
||||
#define OffFSELog 8
|
||||
|
||||
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
|
||||
13,14,15,16 };
|
||||
static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1 };
|
||||
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
||||
|
||||
static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
|
||||
12,13,14,15,16 };
|
||||
static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
|
||||
-1,-1,-1,-1,-1 };
|
||||
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
||||
|
||||
static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
|
||||
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
|
||||
static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Shared functions to include for inlining
|
||||
*********************************************/
|
||||
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/*! ZSTD_wildcopy() :
|
||||
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = (BYTE*)dstEnd;
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Private interfaces
|
||||
*********************************************/
|
||||
typedef struct ZSTD_stats_s ZSTD_stats_t;
|
||||
|
||||
typedef struct seqDef_s {
|
||||
U32 offset;
|
||||
U16 litLength;
|
||||
U16 matchLength;
|
||||
} seqDef;
|
||||
|
||||
|
||||
typedef struct {
|
||||
seqDef* sequencesStart;
|
||||
seqDef* sequences;
|
||||
BYTE* litStart;
|
||||
BYTE* lit;
|
||||
BYTE* llCode;
|
||||
BYTE* mlCode;
|
||||
BYTE* ofCode;
|
||||
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
|
||||
U32 longLengthPos;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
U32 repToConfirm[ZSTD_REP_NUM];
|
||||
} seqStore_t;
|
||||
|
||||
typedef struct {
|
||||
U32 off;
|
||||
U32 len;
|
||||
} ZSTD_match_t;
|
||||
|
||||
typedef struct {
|
||||
U32 price;
|
||||
U32 off;
|
||||
U32 mlen;
|
||||
U32 litlen;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_optimal_t;
|
||||
|
||||
typedef struct {
|
||||
U32* litFreq;
|
||||
U32* litLengthFreq;
|
||||
U32* matchLengthFreq;
|
||||
U32* offCodeFreq;
|
||||
ZSTD_match_t* matchTable;
|
||||
ZSTD_optimal_t* priceTable;
|
||||
|
||||
U32 matchLengthSum;
|
||||
U32 matchSum;
|
||||
U32 litLengthSum;
|
||||
U32 litSum;
|
||||
U32 offCodeSum;
|
||||
U32 log2matchLengthSum;
|
||||
U32 log2matchSum;
|
||||
U32 log2litLengthSum;
|
||||
U32 log2litSum;
|
||||
U32 log2offCodeSum;
|
||||
U32 factor;
|
||||
U32 staticPrices;
|
||||
U32 cachedPrice;
|
||||
U32 cachedLitLength;
|
||||
const BYTE* cachedLiterals;
|
||||
} optState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 offset;
|
||||
U32 checksum;
|
||||
} ldmEntry_t;
|
||||
|
||||
typedef struct {
|
||||
ldmEntry_t* hashTable;
|
||||
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
|
||||
U64 hashPower; /* Used to compute the rolling hash.
|
||||
* Depends on ldmParams.minMatchLength */
|
||||
} ldmState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 enableLdm; /* 1 if enable long distance matching */
|
||||
U32 hashLog; /* Log size of hashTable */
|
||||
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
||||
U32 minMatchLength; /* Minimum match length */
|
||||
U32 hashEveryLog; /* Log number of entries to skip */
|
||||
} ldmParams_t;
|
||||
|
||||
typedef struct {
|
||||
U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
|
||||
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
U32 workspace[HUF_WORKSPACE_SIZE_U32];
|
||||
HUF_repeat hufCTable_repeatMode;
|
||||
FSE_repeat offcode_repeatMode;
|
||||
FSE_repeat matchlength_repeatMode;
|
||||
FSE_repeat litlength_repeatMode;
|
||||
} ZSTD_entropyCTables_t;
|
||||
|
||||
struct ZSTD_CCtx_params_s {
|
||||
ZSTD_format_e format;
|
||||
ZSTD_compressionParameters cParams;
|
||||
ZSTD_frameParameters fParams;
|
||||
|
||||
int compressionLevel;
|
||||
U32 forceWindow; /* force back-references to respect limit of
|
||||
* 1<<wLog, even for dictionary */
|
||||
|
||||
/* Multithreading: used to pass parameters to mtctx */
|
||||
U32 nbThreads;
|
||||
unsigned jobSize;
|
||||
unsigned overlapSizeLog;
|
||||
|
||||
/* Long distance matching parameters */
|
||||
ldmParams_t ldmParams;
|
||||
|
||||
/* For use with createCCtxParams() and freeCCtxParams() only */
|
||||
ZSTD_customMem customMem;
|
||||
|
||||
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
|
||||
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
|
||||
|
||||
/* custom memory allocation functions */
|
||||
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
|
||||
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
|
||||
void ZSTD_free(void* ptr, ZSTD_customMem customMem);
|
||||
|
||||
|
||||
/*====== common function ======*/
|
||||
|
||||
MEM_STATIC U32 ZSTD_highbit32(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
# if defined(_MSC_VER) /* Visual */
|
||||
unsigned long r=0;
|
||||
_BitScanReverse(&r, val);
|
||||
return (unsigned)r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
||||
return 31 - __builtin_clz(val);
|
||||
# else /* Software version */
|
||||
static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
int r;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
|
||||
return r;
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* hidden functions */
|
||||
|
||||
/* ZSTD_invalidateRepCodes() :
|
||||
* ensures next compression will not use repcodes from previous block.
|
||||
* Note : only works with regular variant;
|
||||
* do not use with extDict variant ! */
|
||||
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
|
||||
|
||||
|
||||
/*! ZSTD_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
* must receive dict, or cdict, or none, but not both.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
/*! ZSTD_compressStream_generic() :
|
||||
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
|
||||
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective const flushMode);
|
||||
|
||||
/*! ZSTD_getCParamsFromCDict() :
|
||||
* as the name implies */
|
||||
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
|
||||
|
||||
/* ZSTD_compressBegin_advanced_internal() :
|
||||
* Private use only. To be called from zstdmt_compress.c. */
|
||||
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictMode_e dictMode,
|
||||
ZSTD_CCtx_params params,
|
||||
unsigned long long pledgedSrcSize);
|
||||
|
||||
/* ZSTD_compress_advanced_internal() :
|
||||
* Private use only. To be called from zstdmt_compress.c. */
|
||||
size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_CCtx_params params);
|
||||
|
||||
typedef struct {
|
||||
blockType_e blockType;
|
||||
U32 lastBlock;
|
||||
U32 origSize;
|
||||
} blockProperties_t;
|
||||
|
||||
/*! ZSTD_getcBlockSize() :
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CCOMMON_H_MODULE */
|
||||
841
src/borg/algorithms/zstd/lib/compress/fse_compress.c
Normal file
841
src/borg/algorithms/zstd/lib/compress/fse_compress.c
Normal file
|
|
@ -0,0 +1,841 @@
|
|||
/* ******************************************************************
|
||||
FSE : Finite State Entropy encoder
|
||||
Copyright (C) 2013-2015, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 const tableMask = tableSize - 1;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
|
||||
|
||||
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* CTable header */
|
||||
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
tableU16[-2] = (U16) tableLog;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
|
||||
/* For explanations on how to distribute symbol values over the table :
|
||||
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
|
||||
|
||||
/* symbol start positions */
|
||||
{ U32 u;
|
||||
cumul[0] = 0;
|
||||
for (u=1; u<=maxSymbolValue+1; u++) {
|
||||
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
||||
cumul[u] = cumul[u-1] + 1;
|
||||
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
||||
} else {
|
||||
cumul[u] = cumul[u-1] + normalizedCounter[u-1];
|
||||
} }
|
||||
cumul[maxSymbolValue+1] = tableSize+1;
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
{ U32 position = 0;
|
||||
U32 symbol;
|
||||
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
|
||||
int nbOccurences;
|
||||
for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
|
||||
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */
|
||||
} }
|
||||
|
||||
if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */
|
||||
}
|
||||
|
||||
/* Build table */
|
||||
{ U32 u; for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
|
||||
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
|
||||
} }
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ unsigned total = 0;
|
||||
unsigned s;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0: break;
|
||||
|
||||
case -1:
|
||||
case 1:
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
||||
symbolTT[s].deltaFindState = total - 1;
|
||||
total ++;
|
||||
break;
|
||||
default :
|
||||
{
|
||||
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
|
||||
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = total - normalizedCounter[s];
|
||||
total += normalizedCounter[s];
|
||||
} } } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
||||
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
unsigned writeIsSafe)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) header;
|
||||
BYTE* out = ostart;
|
||||
BYTE* const oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
const int tableSize = 1 << tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
int previous0 = 0;
|
||||
|
||||
bitStream = 0;
|
||||
bitCount = 0;
|
||||
/* Table Size */
|
||||
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
|
||||
bitCount += 4;
|
||||
|
||||
/* Init */
|
||||
remaining = tableSize+1; /* +1 for extra accuracy */
|
||||
threshold = tableSize;
|
||||
nbBits = tableLog+1;
|
||||
|
||||
while (remaining>1) { /* stops at 1 */
|
||||
if (previous0) {
|
||||
unsigned start = charnum;
|
||||
while (!normalizedCounter[charnum]) charnum++;
|
||||
while (charnum >= start+24) {
|
||||
start+=24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE) bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+=2;
|
||||
bitStream>>=16;
|
||||
}
|
||||
while (charnum >= start+3) {
|
||||
start+=3;
|
||||
bitStream += 3 << bitCount;
|
||||
bitCount += 2;
|
||||
}
|
||||
bitStream += (charnum-start) << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
{ int count = normalizedCounter[charnum++];
|
||||
int const max = (2*threshold-1)-remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++; /* +1 for extra accuracy */
|
||||
if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
|
||||
bitStream += count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= (count<max);
|
||||
previous0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) nbBits--, threshold>>=1;
|
||||
}
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
|
||||
/* flush remaining bitStream */
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+= (bitCount+7) /8;
|
||||
|
||||
if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
|
||||
|
||||
return (out-ostart);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
|
||||
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
|
||||
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Counting histogram
|
||||
****************************************************************/
|
||||
/*! FSE_count_simple
|
||||
This function counts byte values within `src`, and store the histogram into table `count`.
|
||||
It doesn't use any additional memory.
|
||||
But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
|
||||
For this reason, prefer using a table `count` with 256 elements.
|
||||
@return : count of most numerous element
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const end = ip + srcSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
|
||||
memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
while (ip<end) count[*ip++]++;
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
|
||||
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
|
||||
/* FSE_count_parallel_wksp() :
|
||||
* Same as FSE_count_parallel(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
|
||||
static size_t FSE_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
unsigned checkMax, unsigned* const workSpace)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)source;
|
||||
const BYTE* const iend = ip+sourceSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
U32* const Counting1 = workSpace;
|
||||
U32* const Counting2 = Counting1 + 256;
|
||||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
memset(Counting1, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* safety checks */
|
||||
if (!sourceSize) {
|
||||
memset(count, 0, maxSymbolValue + 1);
|
||||
*maxSymbolValuePtr = 0;
|
||||
return 0;
|
||||
}
|
||||
if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
|
||||
|
||||
/* by stripes of 16 bytes */
|
||||
{ U32 cached = MEM_read32(ip); ip += 4;
|
||||
while (ip < iend-15) {
|
||||
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
}
|
||||
ip-=4;
|
||||
}
|
||||
|
||||
/* finish last symbols */
|
||||
while (ip<iend) Counting1[*ip++]++;
|
||||
|
||||
if (checkMax) { /* verify stats will fit into destination table */
|
||||
U32 s; for (s=255; s>maxSymbolValue; s--) {
|
||||
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
|
||||
} }
|
||||
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) {
|
||||
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (count[s] > max) max = count[s];
|
||||
} }
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
{
|
||||
if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
|
||||
}
|
||||
|
||||
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
{
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
|
||||
}
|
||||
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
{
|
||||
if (*maxSymbolValuePtr < 255)
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
|
||||
}
|
||||
|
||||
size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE Compression Code
|
||||
****************************************************************/
|
||||
/*! FSE_sizeof_CTable() :
|
||||
FSE_CTable is a variable size structure which contains :
|
||||
`U16 tableLog;`
|
||||
`U16 maxSymbolValue;`
|
||||
`U16 nextStateNumber[1 << tableLog];` // This size is variable
|
||||
`FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
|
||||
Allocation is manual (C standard does not support variable-size structures).
|
||||
*/
|
||||
size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
}
|
||||
|
||||
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t size;
|
||||
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
|
||||
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
return (FSE_CTable*)malloc(size);
|
||||
}
|
||||
|
||||
void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
|
||||
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
|
||||
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
return minBits;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
|
||||
{
|
||||
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
|
||||
U32 tableLog = maxTableLog;
|
||||
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
|
||||
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
|
||||
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
|
||||
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
|
||||
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
|
||||
{
|
||||
short const NOT_YET_ASSIGNED = -2;
|
||||
U32 s;
|
||||
U32 distributed = 0;
|
||||
U32 ToDistribute;
|
||||
|
||||
/* Init */
|
||||
U32 const lowThreshold = (U32)(total >> tableLog);
|
||||
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == 0) {
|
||||
norm[s]=0;
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowThreshold) {
|
||||
norm[s] = -1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowOne) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s]=NOT_YET_ASSIGNED;
|
||||
}
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
|
||||
if ((total / ToDistribute) > lowOne) {
|
||||
/* risk of rounding to zero */
|
||||
lowOne = (U32)((total * 3) / (ToDistribute * 2));
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
} }
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue+1) {
|
||||
/* all values are pretty poor;
|
||||
probably incompressible data (should have already been detected);
|
||||
find max, then give all remaining points to max */
|
||||
U32 maxV = 0, maxC = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > maxC) maxV=s, maxC=count[s];
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) ToDistribute--, norm[s]++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==NOT_YET_ASSIGNED) {
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
U32 const weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return ERROR(GENERIC);
|
||||
norm[s] = (short)weight;
|
||||
tmpTotal = end;
|
||||
} } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t total,
|
||||
unsigned maxSymbolValue)
|
||||
{
|
||||
/* Sanity checks */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
||||
|
||||
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
U64 const scale = 62 - tableLog;
|
||||
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
|
||||
U64 const vStep = 1ULL<<(scale-20);
|
||||
int stillToDistribute = 1<<tableLog;
|
||||
unsigned s;
|
||||
unsigned largest=0;
|
||||
short largestP=0;
|
||||
U32 lowThreshold = (U32)(total >> tableLog);
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == total) return 0; /* rle special case */
|
||||
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
|
||||
if (count[s] <= lowThreshold) {
|
||||
normalizedCounter[s] = -1;
|
||||
stillToDistribute--;
|
||||
} else {
|
||||
short proba = (short)((count[s]*step) >> scale);
|
||||
if (proba<8) {
|
||||
U64 restToBeat = vStep * rtbTable[proba];
|
||||
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
|
||||
}
|
||||
if (proba > largestP) largestP=proba, largest=s;
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
} }
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
||||
/* corner case, need another normalization method */
|
||||
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
}
|
||||
else normalizedCounter[largest] += (short)stillToDistribute;
|
||||
}
|
||||
|
||||
#if 0
|
||||
{ /* Print Table (debug) */
|
||||
U32 s;
|
||||
U32 nTotal = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
printf("%3i: %4i \n", s, normalizedCounter[s]);
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
nTotal += abs(normalizedCounter[s]);
|
||||
if (nTotal != (1U<<tableLog))
|
||||
printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
|
||||
getchar();
|
||||
}
|
||||
#endif
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
|
||||
/* fake FSE_CTable, for raw (uncompressed) input */
|
||||
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
|
||||
{
|
||||
const unsigned tableSize = 1 << nbBits;
|
||||
const unsigned tableMask = tableSize - 1;
|
||||
const unsigned maxSymbolValue = tableMask;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
unsigned s;
|
||||
|
||||
/* Sanity checks */
|
||||
if (nbBits < 1) return ERROR(GENERIC); /* min size */
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) nbBits;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
|
||||
/* Build table */
|
||||
for (s=0; s<tableSize; s++)
|
||||
tableU16[s] = (U16)(tableSize + s);
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
symbolTT[s].deltaNbBits = deltaNbBits;
|
||||
symbolTT[s].deltaFindState = s-1;
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
U16* tableU16 = ( (U16*) ptr) + 2;
|
||||
void* FSCTptr = (U32*)ptr + 2;
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) 0;
|
||||
tableU16[-1] = (U16) symbolValue;
|
||||
|
||||
/* Build table */
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0; /* just in case */
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct, const unsigned fast)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) src;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* ip=iend;
|
||||
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
/* init */
|
||||
if (srcSize <= 2) return 0;
|
||||
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
|
||||
|
||||
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
|
||||
if (srcSize & 1) {
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
} else {
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
/* join to mod 4 */
|
||||
srcSize -= 2;
|
||||
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
/* 2 or 4 encoding per loop */
|
||||
while ( ip>istart ) {
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
}
|
||||
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
FSE_flushCState(&bitC, &CState2);
|
||||
FSE_flushCState(&bitC, &CState1);
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct)
|
||||
{
|
||||
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
|
||||
if (fast)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
else
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` size must be `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 count[FSE_MAX_SYMBOL_VALUE+1];
|
||||
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
|
||||
FSE_CTable* CTable = (FSE_CTable*)workSpace;
|
||||
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
|
||||
void* scratchBuffer = (void*)(CTable + CTableSize);
|
||||
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
|
||||
|
||||
/* init conditions */
|
||||
if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
|
||||
if (srcSize <= 1) return 0; /* Not compressible */
|
||||
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
|
||||
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += nc_err;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
/* check compressibility */
|
||||
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
|
||||
BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
|
||||
} fseWkspMax_t;
|
||||
|
||||
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
fseWkspMax_t scratchBuffer;
|
||||
FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
|
||||
}
|
||||
|
||||
size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
||||
690
src/borg/algorithms/zstd/lib/compress/huf_compress.c
Normal file
690
src/borg/algorithms/zstd/lib/compress/huf_compress.c
Normal file
|
|
@ -0,0 +1,690 @@
|
|||
/* ******************************************************************
|
||||
Huffman encoder, part of New Generation Entropy library
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Utils
|
||||
****************************************************************/
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
|
||||
}
|
||||
|
||||
|
||||
/* *******************************************************
|
||||
* HUF : Huffman block compression
|
||||
*********************************************************/
|
||||
/* HUF_compressWeights() :
|
||||
* Same as FSE_compress(), but dedicated to huff0's weights compression.
|
||||
* The use case needs much less stack memory.
|
||||
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
|
||||
*/
|
||||
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
|
||||
size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 maxSymbolValue = HUF_TABLELOG_MAX;
|
||||
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
||||
|
||||
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
|
||||
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
|
||||
|
||||
U32 count[HUF_TABLELOG_MAX+1];
|
||||
S16 norm[HUF_TABLELOG_MAX+1];
|
||||
|
||||
/* init conditions */
|
||||
if (wtSize <= 1) return 0; /* Not compressible */
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
|
||||
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += hSize;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
struct HUF_CElt_s {
|
||||
U16 val;
|
||||
BYTE nbBits;
|
||||
}; /* typedef'd to HUF_CElt within "huf.h" */
|
||||
|
||||
/*! HUF_writeCTable() :
|
||||
`CTable` : Huffman tree to save, using huf representation.
|
||||
@return : size of saved CTable */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
||||
{
|
||||
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
|
||||
BYTE* op = (BYTE*)dst;
|
||||
U32 n;
|
||||
|
||||
/* check conditions */
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
|
||||
/* convert to weight */
|
||||
bitsToWeight[0] = 0;
|
||||
for (n=1; n<huffLog+1; n++)
|
||||
bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
|
||||
for (n=0; n<maxSymbolValue; n++)
|
||||
huffWeight[n] = bitsToWeight[CTable[n].nbBits];
|
||||
|
||||
/* attempt weights compression by FSE */
|
||||
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
|
||||
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
|
||||
op[0] = (BYTE)hSize;
|
||||
return hSize+1;
|
||||
} }
|
||||
|
||||
/* write raw values as 4-bits (max : 15) */
|
||||
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
|
||||
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
|
||||
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
|
||||
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
|
||||
for (n=0; n<maxSymbolValue; n+=2)
|
||||
op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
|
||||
return ((maxSymbolValue+1)/2) + 1;
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
|
||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
||||
U32 tableLog = 0;
|
||||
U32 nbSymbols = 0;
|
||||
|
||||
/* get symbol weights */
|
||||
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
|
||||
|
||||
/* check result */
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
|
||||
|
||||
/* Prepare base value per rank */
|
||||
{ U32 n, nextRankStart = 0;
|
||||
for (n=1; n<=tableLog; n++) {
|
||||
U32 current = nextRankStart;
|
||||
nextRankStart += (rankVal[n] << (n-1));
|
||||
rankVal[n] = current;
|
||||
} }
|
||||
|
||||
/* fill nbBits */
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) {
|
||||
const U32 w = huffWeight[n];
|
||||
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
|
||||
} }
|
||||
|
||||
/* fill val */
|
||||
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
|
||||
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
|
||||
/* determine stating value per rank */
|
||||
valPerRank[tableLog+1] = 0; /* for w==0 */
|
||||
{ U16 min = 0;
|
||||
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
|
||||
valPerRank[n] = min; /* get starting value within each rank */
|
||||
min += nbPerRank[n];
|
||||
min >>= 1;
|
||||
} }
|
||||
/* assign value within rank, symbol order */
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
|
||||
}
|
||||
|
||||
*maxSymbolValuePtr = nbSymbols - 1;
|
||||
return readSize;
|
||||
}
|
||||
|
||||
|
||||
typedef struct nodeElt_s {
|
||||
U32 count;
|
||||
U16 parent;
|
||||
BYTE byte;
|
||||
BYTE nbBits;
|
||||
} nodeElt;
|
||||
|
||||
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
||||
{
|
||||
const U32 largestBits = huffNode[lastNonNull].nbBits;
|
||||
if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
|
||||
|
||||
/* there are several too large elements (at least >= 2) */
|
||||
{ int totalCost = 0;
|
||||
const U32 baseCost = 1 << (largestBits - maxNbBits);
|
||||
U32 n = lastNonNull;
|
||||
|
||||
while (huffNode[n].nbBits > maxNbBits) {
|
||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
|
||||
huffNode[n].nbBits = (BYTE)maxNbBits;
|
||||
n --;
|
||||
} /* n stops at huffNode[n].nbBits <= maxNbBits */
|
||||
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
|
||||
|
||||
/* renorm totalCost */
|
||||
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
|
||||
|
||||
/* repay normalized cost */
|
||||
{ U32 const noSymbol = 0xF0F0F0F0;
|
||||
U32 rankLast[HUF_TABLELOG_MAX+2];
|
||||
int pos;
|
||||
|
||||
/* Get pos of last (smallest) symbol per rank */
|
||||
memset(rankLast, 0xF0, sizeof(rankLast));
|
||||
{ U32 currentNbBits = maxNbBits;
|
||||
for (pos=n ; pos >= 0; pos--) {
|
||||
if (huffNode[pos].nbBits >= currentNbBits) continue;
|
||||
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
|
||||
rankLast[maxNbBits-currentNbBits] = pos;
|
||||
} }
|
||||
|
||||
while (totalCost > 0) {
|
||||
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
|
||||
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
|
||||
U32 highPos = rankLast[nBitsToDecrease];
|
||||
U32 lowPos = rankLast[nBitsToDecrease-1];
|
||||
if (highPos == noSymbol) continue;
|
||||
if (lowPos == noSymbol) break;
|
||||
{ U32 const highTotal = huffNode[highPos].count;
|
||||
U32 const lowTotal = 2 * huffNode[lowPos].count;
|
||||
if (highTotal <= lowTotal) break;
|
||||
} }
|
||||
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
|
||||
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
|
||||
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
|
||||
nBitsToDecrease ++;
|
||||
totalCost -= 1 << (nBitsToDecrease-1);
|
||||
if (rankLast[nBitsToDecrease-1] == noSymbol)
|
||||
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
|
||||
huffNode[rankLast[nBitsToDecrease]].nbBits ++;
|
||||
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
|
||||
rankLast[nBitsToDecrease] = noSymbol;
|
||||
else {
|
||||
rankLast[nBitsToDecrease]--;
|
||||
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
|
||||
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
|
||||
} } /* while (totalCost > 0) */
|
||||
|
||||
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
|
||||
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||
while (huffNode[n].nbBits == maxNbBits) n--;
|
||||
huffNode[n+1].nbBits--;
|
||||
rankLast[1] = n+1;
|
||||
totalCost++;
|
||||
continue;
|
||||
}
|
||||
huffNode[ rankLast[1] + 1 ].nbBits--;
|
||||
rankLast[1]++;
|
||||
totalCost ++;
|
||||
} } } /* there are several too large elements (at least >= 2) */
|
||||
|
||||
return maxNbBits;
|
||||
}
|
||||
|
||||
|
||||
typedef struct {
|
||||
U32 base;
|
||||
U32 current;
|
||||
} rankPos;
|
||||
|
||||
static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
||||
{
|
||||
rankPos rank[32];
|
||||
U32 n;
|
||||
|
||||
memset(rank, 0, sizeof(rank));
|
||||
for (n=0; n<=maxSymbolValue; n++) {
|
||||
U32 r = BIT_highbit32(count[n] + 1);
|
||||
rank[r].base ++;
|
||||
}
|
||||
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
|
||||
for (n=0; n<32; n++) rank[n].current = rank[n].base;
|
||||
for (n=0; n<=maxSymbolValue; n++) {
|
||||
U32 const c = count[n];
|
||||
U32 const r = BIT_highbit32(c+1) + 1;
|
||||
U32 pos = rank[r].current++;
|
||||
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
|
||||
huffNode[pos].count = c;
|
||||
huffNode[pos].byte = (BYTE)n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
*/
|
||||
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
|
||||
typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
nodeElt* const huffNode0 = (nodeElt*)workSpace;
|
||||
nodeElt* const huffNode = huffNode0+1;
|
||||
U32 n, nonNullRank;
|
||||
int lowS, lowN;
|
||||
U16 nodeNb = STARTNODE;
|
||||
U32 nodeRoot;
|
||||
|
||||
/* safety checks */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */
|
||||
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
|
||||
memset(huffNode0, 0, sizeof(huffNodeTable));
|
||||
|
||||
/* sort, decreasing order */
|
||||
HUF_sort(huffNode, count, maxSymbolValue);
|
||||
|
||||
/* init for parents */
|
||||
nonNullRank = maxSymbolValue;
|
||||
while(huffNode[nonNullRank].count == 0) nonNullRank--;
|
||||
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
|
||||
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
|
||||
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
|
||||
nodeNb++; lowS-=2;
|
||||
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
|
||||
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
|
||||
|
||||
/* create parents */
|
||||
while (nodeNb <= nodeRoot) {
|
||||
U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
|
||||
U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
|
||||
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
|
||||
huffNode[n1].parent = huffNode[n2].parent = nodeNb;
|
||||
nodeNb++;
|
||||
}
|
||||
|
||||
/* distribute weights (unlimited tree height) */
|
||||
huffNode[nodeRoot].nbBits = 0;
|
||||
for (n=nodeRoot-1; n>=STARTNODE; n--)
|
||||
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
|
||||
for (n=0; n<=nonNullRank; n++)
|
||||
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
|
||||
|
||||
/* enforce maxTableLog */
|
||||
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
|
||||
|
||||
/* fill result into tree (val, nbBits) */
|
||||
{ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
|
||||
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
|
||||
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
|
||||
for (n=0; n<=nonNullRank; n++)
|
||||
nbPerRank[huffNode[n].nbBits]++;
|
||||
/* determine stating value per rank */
|
||||
{ U16 min = 0;
|
||||
for (n=maxNbBits; n>0; n--) {
|
||||
valPerRank[n] = min; /* get starting value within each rank */
|
||||
min += nbPerRank[n];
|
||||
min >>= 1;
|
||||
} }
|
||||
for (n=0; n<=maxSymbolValue; n++)
|
||||
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
|
||||
for (n=0; n<=maxSymbolValue; n++)
|
||||
tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
|
||||
}
|
||||
|
||||
return maxNbBits;
|
||||
}
|
||||
|
||||
/** HUF_buildCTable() :
|
||||
* Note : count is used before tree is written, so they can safely overlap
|
||||
*/
|
||||
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
|
||||
{
|
||||
huffNodeTable nodeTable;
|
||||
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
|
||||
}
|
||||
|
||||
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
|
||||
{
|
||||
size_t nbBits = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
nbBits += CTable[s].nbBits * count[s];
|
||||
}
|
||||
return nbBits >> 3;
|
||||
}
|
||||
|
||||
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
|
||||
int bad = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
|
||||
}
|
||||
return !bad;
|
||||
}
|
||||
|
||||
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
{
|
||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
||||
}
|
||||
|
||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||
|
||||
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
|
||||
|
||||
#define HUF_FLUSHBITS_1(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
|
||||
|
||||
#define HUF_FLUSHBITS_2(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
size_t n;
|
||||
BIT_CStream_t bitC;
|
||||
|
||||
/* init */
|
||||
if (dstSize < 8) return 0; /* not enough space to compress */
|
||||
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
|
||||
if (HUF_isError(initErr)) return 0; }
|
||||
|
||||
n = srcSize & ~3; /* join to mod 4 */
|
||||
switch (srcSize & 3)
|
||||
{
|
||||
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
|
||||
HUF_FLUSHBITS_2(&bitC);
|
||||
/* fall-through */
|
||||
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
/* fall-through */
|
||||
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
|
||||
HUF_FLUSHBITS(&bitC);
|
||||
/* fall-through */
|
||||
case 0 : /* fall-through */
|
||||
default: break;
|
||||
}
|
||||
|
||||
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
|
||||
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
|
||||
HUF_FLUSHBITS_2(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
|
||||
HUF_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
const BYTE* const iend = ip + srcSize;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
|
||||
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
||||
op += 6; /* jumpTable */
|
||||
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart+2, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart+4, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_compressCTable_internal(
|
||||
BYTE* const ostart, BYTE* op, BYTE* const oend,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned singleStream, const HUF_CElt* CTable)
|
||||
{
|
||||
size_t const cSize = singleStream ?
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) :
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
|
||||
if (HUF_isError(cSize)) { return cSize; }
|
||||
if (cSize==0) { return 0; } /* uncompressible */
|
||||
op += cSize;
|
||||
/* check compressibility */
|
||||
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
/* `workSpace` must a table of at least 1024 unsigned */
|
||||
static size_t HUF_compress_internal (
|
||||
void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
unsigned singleStream,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
U32* count;
|
||||
size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
HUF_CElt* CTable;
|
||||
size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
|
||||
/* checks & inits */
|
||||
if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
|
||||
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
|
||||
if (!dstSize) return 0; /* cannot fit within dst budget */
|
||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
|
||||
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
|
||||
|
||||
count = (U32*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + countSize;
|
||||
wkspSize -= countSize;
|
||||
CTable = (HUF_CElt*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + CTableSize;
|
||||
wkspSize -= CTableSize;
|
||||
|
||||
/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
|
||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
/* Check validity of previous table */
|
||||
if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
|
||||
*repeat = HUF_repeat_none;
|
||||
}
|
||||
/* Heuristic : use existing table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
|
||||
/* Build Huffman Tree */
|
||||
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
|
||||
huffLog = (U32)maxBits;
|
||||
/* Zero the unused symbols so we can check it for validity */
|
||||
memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
|
||||
}
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
|
||||
/* Check if using the previous table will be beneficial */
|
||||
if (repeat && *repeat != HUF_repeat_none) {
|
||||
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
|
||||
size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
|
||||
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
}
|
||||
/* Use the new table */
|
||||
if (hSize + 12ul >= srcSize) { return 0; }
|
||||
op += hSize;
|
||||
if (repeat) { *repeat = HUF_repeat_none; }
|
||||
if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */
|
||||
}
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[1024];
|
||||
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
}
|
||||
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[1024];
|
||||
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
||||
{
|
||||
return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT);
|
||||
}
|
||||
3023
src/borg/algorithms/zstd/lib/compress/zstd_compress.c
Normal file
3023
src/borg/algorithms/zstd/lib/compress/zstd_compress.c
Normal file
File diff suppressed because it is too large
Load diff
307
src/borg/algorithms/zstd/lib/compress/zstd_compress.h
Normal file
307
src/borg/algorithms/zstd/lib/compress/zstd_compress.h
Normal file
|
|
@ -0,0 +1,307 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef ZSTD_COMPRESS_H
|
||||
#define ZSTD_COMPRESS_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_internal.h"
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
# include "zstdmt_compress.h"
|
||||
#endif
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
static const U32 g_searchStrength = 8;
|
||||
#define HASH_READ_SIZE 8
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Context memory management
|
||||
***************************************/
|
||||
typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
|
||||
typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
|
||||
|
||||
typedef struct ZSTD_prefixDict_s {
|
||||
const void* dict;
|
||||
size_t dictSize;
|
||||
ZSTD_dictMode_e dictMode;
|
||||
} ZSTD_prefixDict;
|
||||
|
||||
struct ZSTD_CCtx_s {
|
||||
const BYTE* nextSrc; /* next block here to continue on current prefix */
|
||||
const BYTE* base; /* All regular indexes relative to this position */
|
||||
const BYTE* dictBase; /* extDict indexes relative to this position */
|
||||
U32 dictLimit; /* below that point, need extDict */
|
||||
U32 lowLimit; /* below that point, no more data */
|
||||
U32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
U32 nextToUpdate3; /* index from which to continue dictionary update */
|
||||
U32 hashLog3; /* dispatch table : larger == faster, more memory */
|
||||
U32 loadedDictEnd; /* index of end of dictionary */
|
||||
ZSTD_compressionStage_e stage;
|
||||
U32 dictID;
|
||||
ZSTD_CCtx_params requestedParams;
|
||||
ZSTD_CCtx_params appliedParams;
|
||||
void* workSpace;
|
||||
size_t workSpaceSize;
|
||||
size_t blockSize;
|
||||
U64 pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
|
||||
U64 consumedSrcSize;
|
||||
XXH64_state_t xxhState;
|
||||
ZSTD_customMem customMem;
|
||||
size_t staticSize;
|
||||
|
||||
seqStore_t seqStore; /* sequences storage ptrs */
|
||||
optState_t optState;
|
||||
ldmState_t ldmState; /* long distance matching state */
|
||||
U32* hashTable;
|
||||
U32* hashTable3;
|
||||
U32* chainTable;
|
||||
ZSTD_entropyCTables_t* entropy;
|
||||
|
||||
/* streaming */
|
||||
char* inBuff;
|
||||
size_t inBuffSize;
|
||||
size_t inToCompress;
|
||||
size_t inBuffPos;
|
||||
size_t inBuffTarget;
|
||||
char* outBuff;
|
||||
size_t outBuffSize;
|
||||
size_t outBuffContentSize;
|
||||
size_t outBuffFlushedSize;
|
||||
ZSTD_cStreamStage streamStage;
|
||||
U32 frameEnded;
|
||||
|
||||
/* Dictionary */
|
||||
ZSTD_CDict* cdictLocal;
|
||||
const ZSTD_CDict* cdict;
|
||||
ZSTD_prefixDict prefixDict; /* single-usage dictionary */
|
||||
|
||||
/* Multi-threading */
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
ZSTDMT_CCtx* mtctx;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 16, 17, 17, 18, 18, 19, 19,
|
||||
20, 20, 20, 20, 21, 21, 21, 21,
|
||||
22, 22, 22, 22, 22, 22, 22, 22,
|
||||
23, 23, 23, 23, 23, 23, 23, 23,
|
||||
24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24 };
|
||||
|
||||
static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
||||
32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
|
||||
38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
|
||||
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
|
||||
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
|
||||
|
||||
/*! ZSTD_storeSeq() :
|
||||
Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
|
||||
`offsetCode` : distance to match, or 0 == repCode.
|
||||
`matchCode` : matchLength - MINMATCH
|
||||
*/
|
||||
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
|
||||
{
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
|
||||
static const BYTE* g_start = NULL;
|
||||
U32 const pos = (U32)((const BYTE*)literals - g_start);
|
||||
if (g_start==NULL) g_start = (const BYTE*)literals;
|
||||
if ((pos > 0) && (pos < 1000000000))
|
||||
DEBUGLOG(6, "Cpos %6u :%5u literals & match %3u bytes at distance %6u",
|
||||
pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
|
||||
#endif
|
||||
/* copy Literals */
|
||||
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
|
||||
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
|
||||
seqStorePtr->lit += litLength;
|
||||
|
||||
/* literal Length */
|
||||
if (litLength>0xFFFF) {
|
||||
seqStorePtr->longLengthID = 1;
|
||||
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
||||
}
|
||||
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
||||
|
||||
/* match offset */
|
||||
seqStorePtr->sequences[0].offset = offsetCode + 1;
|
||||
|
||||
/* match Length */
|
||||
if (matchCode>0xFFFF) {
|
||||
seqStorePtr->longLengthID = 2;
|
||||
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
||||
}
|
||||
seqStorePtr->sequences[0].matchLength = (U16)matchCode;
|
||||
|
||||
seqStorePtr->sequences++;
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Match length counter
|
||||
***************************************/
|
||||
static unsigned ZSTD_NbCommonBytes (register size_t val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
if (MEM_64bits()) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
unsigned long r = 0;
|
||||
_BitScanForward64( &r, (U64)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (__builtin_ctzll((U64)val) >> 3);
|
||||
# else
|
||||
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
|
||||
0, 3, 1, 3, 1, 4, 2, 7,
|
||||
0, 2, 3, 6, 1, 5, 3, 5,
|
||||
1, 3, 4, 4, 2, 5, 6, 7,
|
||||
7, 0, 1, 2, 3, 3, 4, 6,
|
||||
2, 6, 5, 5, 3, 4, 5, 6,
|
||||
7, 1, 2, 4, 6, 4, 4, 5,
|
||||
7, 2, 6, 5, 7, 6, 7, 7 };
|
||||
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
||||
# endif
|
||||
} else { /* 32 bits */
|
||||
# if defined(_MSC_VER)
|
||||
unsigned long r=0;
|
||||
_BitScanForward( &r, (U32)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
return (__builtin_ctz((U32)val) >> 3);
|
||||
# else
|
||||
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
|
||||
3, 2, 2, 1, 3, 2, 0, 1,
|
||||
3, 3, 1, 2, 2, 2, 2, 0,
|
||||
3, 1, 2, 0, 1, 0, 1, 1 };
|
||||
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
||||
# endif
|
||||
}
|
||||
} else { /* Big Endian CPU */
|
||||
if (MEM_64bits()) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
unsigned long r = 0;
|
||||
_BitScanReverse64( &r, val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (__builtin_clzll(val) >> 3);
|
||||
# else
|
||||
unsigned r;
|
||||
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
|
||||
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
|
||||
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
|
||||
r += (!val);
|
||||
return r;
|
||||
# endif
|
||||
} else { /* 32 bits */
|
||||
# if defined(_MSC_VER)
|
||||
unsigned long r = 0;
|
||||
_BitScanReverse( &r, (unsigned long)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
return (__builtin_clz((U32)val) >> 3);
|
||||
# else
|
||||
unsigned r;
|
||||
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
||||
r += (!val);
|
||||
return r;
|
||||
# endif
|
||||
} }
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
|
||||
{
|
||||
const BYTE* const pStart = pIn;
|
||||
const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
|
||||
|
||||
while (pIn < pInLoopLimit) {
|
||||
size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
|
||||
if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
|
||||
pIn += ZSTD_NbCommonBytes(diff);
|
||||
return (size_t)(pIn - pStart);
|
||||
}
|
||||
if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
|
||||
if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
|
||||
if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
|
||||
return (size_t)(pIn - pStart);
|
||||
}
|
||||
|
||||
/** ZSTD_count_2segments() :
|
||||
* can count match length with `ip` & `match` in 2 different segments.
|
||||
* convention : on reaching mEnd, match count continue starting from iStart
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
|
||||
{
|
||||
const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
|
||||
size_t const matchLength = ZSTD_count(ip, match, vEnd);
|
||||
if (match + matchLength != mEnd) return matchLength;
|
||||
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Hashes
|
||||
***************************************/
|
||||
static const U32 prime3bytes = 506832829U;
|
||||
static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
|
||||
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
|
||||
|
||||
static const U32 prime4bytes = 2654435761U;
|
||||
static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
|
||||
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
|
||||
|
||||
static const U64 prime5bytes = 889523592379ULL;
|
||||
static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime6bytes = 227718039650203ULL;
|
||||
static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime7bytes = 58295818150454627ULL;
|
||||
static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
|
||||
static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
|
||||
|
||||
MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
||||
{
|
||||
switch(mls)
|
||||
{
|
||||
default:
|
||||
case 4: return ZSTD_hash4Ptr(p, hBits);
|
||||
case 5: return ZSTD_hash5Ptr(p, hBits);
|
||||
case 6: return ZSTD_hash6Ptr(p, hBits);
|
||||
case 7: return ZSTD_hash7Ptr(p, hBits);
|
||||
case 8: return ZSTD_hash8Ptr(p, hBits);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPRESS_H */
|
||||
308
src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c
Normal file
308
src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c
Normal file
|
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_double_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls)
|
||||
{
|
||||
U32* const hashLarge = cctx->hashTable;
|
||||
U32 const hBitsL = cctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = cctx->chainTable;
|
||||
U32 const hBitsS = cctx->appliedParams.cParams.chainLog;
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* ip = base + cctx->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const size_t fastHashFillStep = 3;
|
||||
|
||||
while(ip <= iend) {
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
|
||||
hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
|
||||
ip += fastHashFillStep;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
{
|
||||
U32* const hashLong = cctx->hashTable;
|
||||
const U32 hBitsL = cctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = cctx->chainTable;
|
||||
const U32 hBitsS = cctx->appliedParams.cParams.chainLog;
|
||||
seqStore_t* seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==lowest);
|
||||
{ U32 const maxRep = (U32)(ip-lowest);
|
||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndexL = hashLong[h2];
|
||||
U32 const matchIndexS = hashSmall[h];
|
||||
const BYTE* matchLong = base + matchIndexL;
|
||||
const BYTE* match = base + matchIndexS;
|
||||
hashLong[h2] = hashSmall[h] = current; /* update hash tables */
|
||||
|
||||
assert(offset_1 <= current); /* supposed guaranteed by construction */
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
/* favor repcode */
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
U32 offset;
|
||||
if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
|
||||
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
|
||||
offset = (U32)(ip-matchLong);
|
||||
while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
} else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
|
||||
size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndexL3 = hashLong[hl3];
|
||||
const BYTE* matchL3 = base + matchIndexL3;
|
||||
hashLong[hl3] = current + 1;
|
||||
if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
|
||||
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
|
||||
ip++;
|
||||
offset = (U32)(ip-matchL3);
|
||||
while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
} else {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
|
||||
hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
|
||||
hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
{
|
||||
U32* const hashLong = ctx->hashTable;
|
||||
U32 const hBitsL = ctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = ctx->chainTable;
|
||||
U32 const hBitsS = ctx->appliedParams.cParams.chainLog;
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 matchIndex = hashSmall[hSmall];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
|
||||
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
const U32 matchLongIndex = hashLong[hLong];
|
||||
const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* matchLong = matchLongBase + matchLongIndex;
|
||||
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
|
||||
const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
|
||||
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
|
||||
offset = current - matchLongIndex;
|
||||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndex3 = hashLong[h3];
|
||||
const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
|
||||
const BYTE* match3 = match3Base + matchIndex3;
|
||||
U32 offset;
|
||||
hashLong[h3] = current + 1;
|
||||
if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
|
||||
const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
|
||||
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
|
||||
ip++;
|
||||
offset = current+1 - matchIndex3;
|
||||
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
|
||||
offset = current - matchIndex;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
continue;
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
|
||||
hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
|
||||
hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
28
src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h
Normal file
28
src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_DOUBLE_FAST_H
|
||||
#define ZSTD_DOUBLE_FAST_H
|
||||
|
||||
#include "zstd_compress.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls);
|
||||
size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DOUBLE_FAST_H */
|
||||
242
src/borg/algorithms/zstd/lib/compress/zstd_fast.c
Normal file
242
src/borg/algorithms/zstd/lib/compress/zstd_fast.c
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hBits = zc->appliedParams.cParams.hashLog;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* ip = base + zc->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const size_t fastHashFillStep = 3;
|
||||
|
||||
while(ip <= iend) {
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
|
||||
ip += fastHashFillStep;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
{
|
||||
U32* const hashTable = cctx->hashTable;
|
||||
U32 const hBits = cctx->appliedParams.cParams.hashLog;
|
||||
seqStore_t* seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==lowest);
|
||||
{ U32 const maxRep = (U32)(ip-lowest);
|
||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
size_t const h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndex = hashTable[h];
|
||||
const BYTE* match = base + matchIndex;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
U32 offset;
|
||||
if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
{
|
||||
U32* hashTable = ctx->hashTable;
|
||||
const U32 hBits = ctx->appliedParams.cParams.hashLog;
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
const U32 matchIndex = hashTable[h];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
|
||||
const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex < lowestIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
{ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset = current - matchIndex;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
|
||||
hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
30
src/borg/algorithms/zstd/lib/compress/zstd_fast.h
Normal file
30
src/borg/algorithms/zstd/lib/compress/zstd_fast.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_FAST_H
|
||||
#define ZSTD_FAST_H
|
||||
|
||||
#include "zstd_compress.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_CCtx* zc, const void* end, const U32 mls);
|
||||
size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
||||
749
src/borg/algorithms/zstd/lib/compress/zstd_lazy.c
Normal file
749
src/borg/algorithms/zstd/lib/compress/zstd_lazy.c
Normal file
|
|
@ -0,0 +1,749 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_lazy.h"
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
||||
* ip : assumed <= iend-8 .
|
||||
* @return : nb of positions added */
|
||||
static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
|
||||
U32 extDict)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hashLog = zc->appliedParams.cParams.hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = zc->chainTable;
|
||||
U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* match;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = smallerPtr + 1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 const windowLow = zc->lowLimit;
|
||||
U32 matchEndIdx = current+8;
|
||||
size_t bestLength = 8;
|
||||
#ifdef ZSTD_C_PREDICT
|
||||
U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
|
||||
U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
|
||||
predictedSmall += (predictedSmall>0);
|
||||
predictedLarge += (predictedLarge>0);
|
||||
#endif /* ZSTD_C_PREDICT */
|
||||
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
|
||||
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
|
||||
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
|
||||
if (matchIndex == predictedSmall) {
|
||||
/* no need to check length, result known */
|
||||
*smallerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
|
||||
continue;
|
||||
}
|
||||
if (matchIndex == predictedLarge) {
|
||||
*largerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
if (match[matchLength] == ip[matchLength])
|
||||
matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
bestLength = matchLength;
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
}
|
||||
|
||||
if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
|
||||
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
|
||||
/* match+1 is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
|
||||
if (matchEndIdx > current + 8) return matchEndIdx - (current + 8);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_insertBtAndFindBestMatch (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
size_t* offsetPtr,
|
||||
U32 nbCompares, const U32 mls,
|
||||
U32 extDict)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hashLog = zc->appliedParams.cParams.hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = zc->chainTable;
|
||||
U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
const U32 windowLow = zc->lowLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
size_t bestLength = 0;
|
||||
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
if (match[matchLength] == ip[matchLength])
|
||||
matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
|
||||
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
|
||||
if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
|
||||
return bestLength;
|
||||
}
|
||||
|
||||
|
||||
void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
|
||||
{
|
||||
const BYTE* const base = zc->base;
|
||||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
|
||||
while(idx < target)
|
||||
idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
|
||||
}
|
||||
|
||||
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
|
||||
{
|
||||
const BYTE* const base = zc->base;
|
||||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
|
||||
while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
|
||||
}
|
||||
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch_extDict (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* *********************************
|
||||
* Hash Chain
|
||||
***********************************/
|
||||
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
|
||||
|
||||
/* Update chains up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
const U32 hashLog = zc->appliedParams.cParams.hashLog;
|
||||
U32* const chainTable = zc->chainTable;
|
||||
const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1;
|
||||
const BYTE* const base = zc->base;
|
||||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
|
||||
while(idx < target) { /* catch up */
|
||||
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
|
||||
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
|
||||
hashTable[h] = idx;
|
||||
idx++;
|
||||
}
|
||||
|
||||
zc->nextToUpdate = target;
|
||||
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
|
||||
}
|
||||
|
||||
|
||||
/* inlining is important to hardwire a hot branch (template emulation) */
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_HcFindBestMatch_generic (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls, const U32 extDict)
|
||||
{
|
||||
U32* const chainTable = zc->chainTable;
|
||||
const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog);
|
||||
const U32 chainMask = chainSize-1;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const U32 lowLimit = zc->lowLimit;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 minChain = current > chainSize ? current - chainSize : 0;
|
||||
int nbAttempts=maxNbAttempts;
|
||||
size_t ml=4-1;
|
||||
|
||||
/* HC4 match finder */
|
||||
U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
|
||||
|
||||
for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
|
||||
const BYTE* match;
|
||||
size_t currentMl=0;
|
||||
if ((!extDict) || matchIndex >= dictLimit) {
|
||||
match = base + matchIndex;
|
||||
if (match[ml] == ip[ml]) /* potentially better */
|
||||
currentMl = ZSTD_count(ip, match, iLimit);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
|
||||
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
|
||||
}
|
||||
|
||||
/* save best solution */
|
||||
if (currentMl > ml) {
|
||||
ml = currentMl;
|
||||
*offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
|
||||
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
|
||||
}
|
||||
|
||||
if (matchIndex <= minChain) break;
|
||||
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
|
||||
}
|
||||
|
||||
return ml;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* *******************************
|
||||
* Common parser - lazy strategy
|
||||
*********************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base + ctx->dictLimit;
|
||||
|
||||
U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
|
||||
typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
|
||||
size_t* offsetPtr,
|
||||
U32 maxNbAttempts, U32 matchLengthSearch);
|
||||
searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
|
||||
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==base);
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
{ U32 const maxRep = (U32)(ip-base);
|
||||
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
size_t matchLength=0;
|
||||
size_t offset=0;
|
||||
const BYTE* start=ip+1;
|
||||
|
||||
/* check repCode */
|
||||
if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
|
||||
/* repcode : we take it */
|
||||
matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
if (depth==0) goto _storeSequence;
|
||||
}
|
||||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* let's try to find a better solution */
|
||||
if (depth>=1)
|
||||
while (ip<ilimit) {
|
||||
ip ++;
|
||||
if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
|
||||
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
|
||||
int const gain2 = (int)(mlRep * 3);
|
||||
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((mlRep >= 4) && (gain2 > gain1))
|
||||
matchLength = mlRep, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue; /* search a better one */
|
||||
} }
|
||||
|
||||
/* let's find an even better one */
|
||||
if ((depth==2) && (ip<ilimit)) {
|
||||
ip ++;
|
||||
if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
|
||||
size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
|
||||
int const gain2 = (int)(ml2 * 4);
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((ml2 >= 4) && (gain2 > gain1))
|
||||
matchLength = ml2, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue;
|
||||
} } }
|
||||
break; /* nothing found : store previous solution */
|
||||
}
|
||||
|
||||
/* NOTE:
|
||||
* start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
|
||||
* (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
|
||||
* overflows the pointer, which is undefined behavior.
|
||||
*/
|
||||
/* catch up */
|
||||
if (offset) {
|
||||
while ( (start > anchor)
|
||||
&& (start > base+offset-ZSTD_REP_MOVE)
|
||||
&& (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]) ) /* only search for offset within prefix */
|
||||
{ start--; matchLength++; }
|
||||
offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
|
||||
}
|
||||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ((offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const dictStart = dictBase + ctx->lowLimit;
|
||||
|
||||
const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
|
||||
typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
|
||||
size_t* offsetPtr,
|
||||
U32 maxNbAttempts, U32 matchLengthSearch);
|
||||
searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
|
||||
|
||||
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
|
||||
|
||||
/* init */
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ip += (ip == prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
size_t matchLength=0;
|
||||
size_t offset=0;
|
||||
const BYTE* start=ip+1;
|
||||
U32 current = (U32)(ip-base);
|
||||
|
||||
/* check repCode */
|
||||
{ const U32 repIndex = (U32)(current+1 - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
|
||||
/* repcode detected we should take it */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
if (depth==0) goto _storeSequence;
|
||||
} }
|
||||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* let's try to find a better solution */
|
||||
if (depth>=1)
|
||||
while (ip<ilimit) {
|
||||
ip ++;
|
||||
current++;
|
||||
/* check repCode */
|
||||
if (offset) {
|
||||
const U32 repIndex = (U32)(current - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
int const gain2 = (int)(repLength * 3);
|
||||
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((repLength >= 4) && (gain2 > gain1))
|
||||
matchLength = repLength, offset = 0, start = ip;
|
||||
} }
|
||||
|
||||
/* search match, depth 1 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue; /* search a better one */
|
||||
} }
|
||||
|
||||
/* let's find an even better one */
|
||||
if ((depth==2) && (ip<ilimit)) {
|
||||
ip ++;
|
||||
current++;
|
||||
/* check repCode */
|
||||
if (offset) {
|
||||
const U32 repIndex = (U32)(current - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
int const gain2 = (int)(repLength * 4);
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((repLength >= 4) && (gain2 > gain1))
|
||||
matchLength = repLength, offset = 0, start = ip;
|
||||
} }
|
||||
|
||||
/* search match, depth 2 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue;
|
||||
} } }
|
||||
break; /* nothing found : store previous solution */
|
||||
}
|
||||
|
||||
/* catch up */
|
||||
if (offset) {
|
||||
U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
|
||||
const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
|
||||
const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
|
||||
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
|
||||
offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
|
||||
}
|
||||
|
||||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
const U32 repIndex = (U32)((ip-base) - offset_2);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected we should take it */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
}
|
||||
break;
|
||||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
|
||||
}
|
||||
38
src/borg/algorithms/zstd/lib/compress/zstd_lazy.h
Normal file
38
src/borg/algorithms/zstd/lib/compress/zstd_lazy.h
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LAZY_H
|
||||
#define ZSTD_LAZY_H
|
||||
|
||||
#include "zstd_compress.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls);
|
||||
void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
|
||||
void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LAZY_H */
|
||||
707
src/borg/algorithms/zstd/lib/compress/zstd_ldm.c
Normal file
707
src/borg/algorithms/zstd/lib/compress/zstd_ldm.c
Normal file
|
|
@ -0,0 +1,707 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#include "zstd_ldm.h"
|
||||
|
||||
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
|
||||
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
|
||||
|
||||
#define LDM_BUCKET_SIZE_LOG 3
|
||||
#define LDM_MIN_MATCH_LENGTH 64
|
||||
#define LDM_HASH_RLOG 7
|
||||
#define LDM_HASH_CHAR_OFFSET 10
|
||||
|
||||
size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm)
|
||||
{
|
||||
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
|
||||
params->enableLdm = enableLdm>0;
|
||||
params->hashLog = 0;
|
||||
params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog)
|
||||
{
|
||||
if (params->hashLog == 0) {
|
||||
params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
|
||||
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
||||
}
|
||||
if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) {
|
||||
params->hashEveryLog =
|
||||
windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
|
||||
}
|
||||
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) {
|
||||
size_t const ldmHSize = ((size_t)1) << hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog);
|
||||
size_t const ldmBucketSize =
|
||||
((size_t)1) << (hashLog - ldmBucketSizeLog);
|
||||
return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t)));
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getSmallHash() :
|
||||
* numBits should be <= 32
|
||||
* If numBits==0, returns 0.
|
||||
* @return : the most significant numBits of value. */
|
||||
static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
|
||||
{
|
||||
assert(numBits <= 32);
|
||||
return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getChecksum() :
|
||||
* numBitsToDiscard should be <= 32
|
||||
* @return : the next most significant 32 bits after numBitsToDiscard */
|
||||
static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
|
||||
{
|
||||
assert(numBitsToDiscard <= 32);
|
||||
return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getTag() ;
|
||||
* Given the hash, returns the most significant numTagBits bits
|
||||
* after (32 + hbits) bits.
|
||||
*
|
||||
* If there are not enough bits remaining, return the last
|
||||
* numTagBits bits. */
|
||||
static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
|
||||
{
|
||||
assert(numTagBits < 32 && hbits <= 32);
|
||||
if (32 - hbits < numTagBits) {
|
||||
return hash & (((U32)1 << numTagBits) - 1);
|
||||
} else {
|
||||
return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getBucket() :
|
||||
* Returns a pointer to the start of the bucket associated with hash. */
|
||||
static ldmEntry_t* ZSTD_ldm_getBucket(
|
||||
ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
|
||||
{
|
||||
return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_insertEntry() :
|
||||
* Insert the entry with corresponding hash into the hash table */
|
||||
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
|
||||
size_t const hash, const ldmEntry_t entry,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
BYTE* const bucketOffsets = ldmState->bucketOffsets;
|
||||
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
|
||||
bucketOffsets[hash]++;
|
||||
bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_makeEntryAndInsertByTag() :
|
||||
*
|
||||
* Gets the small hash, checksum, and tag from the rollingHash.
|
||||
*
|
||||
* If the tag matches (1 << ldmParams.hashEveryLog)-1, then
|
||||
* creates an ldmEntry from the offset, and inserts it into the hash table.
|
||||
*
|
||||
* hBits is the length of the small hash, which is the most significant hBits
|
||||
* of rollingHash. The checksum is the next 32 most significant bits, followed
|
||||
* by ldmParams.hashEveryLog bits that make up the tag. */
|
||||
static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
|
||||
U64 const rollingHash,
|
||||
U32 const hBits,
|
||||
U32 const offset,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
|
||||
U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
if (tag == tagMask) {
|
||||
U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
ldmEntry_t entry;
|
||||
entry.offset = offset;
|
||||
entry.checksum = checksum;
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getRollingHash() :
|
||||
* Get a 64-bit hash using the first len bytes from buf.
|
||||
*
|
||||
* Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
|
||||
* H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
|
||||
*
|
||||
* where the constant a is defined to be prime8bytes.
|
||||
*
|
||||
* The implementation adds an offset to each byte, so
|
||||
* H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
|
||||
static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
|
||||
{
|
||||
U64 ret = 0;
|
||||
U32 i;
|
||||
for (i = 0; i < len; i++) {
|
||||
ret *= prime8bytes;
|
||||
ret += buf[i] + LDM_HASH_CHAR_OFFSET;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_ipow() :
|
||||
* Return base^exp. */
|
||||
static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
|
||||
{
|
||||
U64 ret = 1;
|
||||
while (exp) {
|
||||
if (exp & 1) { ret *= base; }
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
|
||||
assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
|
||||
return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_updateHash() :
|
||||
* Updates hash by removing toRemove and adding toAdd. */
|
||||
static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
|
||||
{
|
||||
hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
|
||||
hash *= prime8bytes;
|
||||
hash += toAdd + LDM_HASH_CHAR_OFFSET;
|
||||
return hash;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch() :
|
||||
* Returns the number of bytes that match backwards before pIn and pMatch.
|
||||
*
|
||||
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pBase)
|
||||
{
|
||||
size_t matchLength = 0;
|
||||
while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
|
||||
pIn--;
|
||||
pMatch--;
|
||||
matchLength++;
|
||||
}
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillFastTables() :
|
||||
*
|
||||
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
|
||||
* This is similar to ZSTD_loadDictionaryContent.
|
||||
*
|
||||
* The tables for the other strategies are filled within their
|
||||
* block compressors. */
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end)
|
||||
{
|
||||
const BYTE* const iend = (const BYTE*)end;
|
||||
const U32 mls = zc->appliedParams.cParams.searchLength;
|
||||
|
||||
switch(zc->appliedParams.cParams.strategy)
|
||||
{
|
||||
case ZSTD_fast:
|
||||
ZSTD_fillHashTable(zc, iend, mls);
|
||||
zc->nextToUpdate = (U32)(iend - zc->base);
|
||||
break;
|
||||
|
||||
case ZSTD_dfast:
|
||||
ZSTD_fillDoubleHashTable(zc, iend, mls);
|
||||
zc->nextToUpdate = (U32)(iend - zc->base);
|
||||
break;
|
||||
|
||||
case ZSTD_greedy:
|
||||
case ZSTD_lazy:
|
||||
case ZSTD_lazy2:
|
||||
case ZSTD_btlazy2:
|
||||
case ZSTD_btopt:
|
||||
case ZSTD_btultra:
|
||||
break;
|
||||
default:
|
||||
assert(0); /* not possible : not a valid strategy id */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillLdmHashTable() :
|
||||
*
|
||||
* Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
|
||||
* lastHash is the rolling hash that corresponds to lastHashed.
|
||||
*
|
||||
* Returns the rolling hash corresponding to position iend-1. */
|
||||
static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
|
||||
U64 lastHash, const BYTE* lastHashed,
|
||||
const BYTE* iend, const BYTE* base,
|
||||
U32 hBits, ldmParams_t const ldmParams)
|
||||
{
|
||||
U64 rollingHash = lastHash;
|
||||
const BYTE* cur = lastHashed + 1;
|
||||
|
||||
while (cur < iend) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
|
||||
cur[ldmParams.minMatchLength-1],
|
||||
state->hashPower);
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(state,
|
||||
rollingHash, hBits,
|
||||
(U32)(cur - base), ldmParams);
|
||||
++cur;
|
||||
}
|
||||
return rollingHash;
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_ldm_limitTableUpdate() :
|
||||
*
|
||||
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
|
||||
* if it is far way
|
||||
* (after a long match, only update tables a limited amount). */
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor)
|
||||
{
|
||||
U32 const current = (U32)(anchor - cctx->base);
|
||||
if (current > cctx->nextToUpdate + 1024) {
|
||||
cctx->nextToUpdate =
|
||||
current - MIN(512, current - cctx->nextToUpdate - 1024);
|
||||
}
|
||||
}
|
||||
|
||||
typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
/* defined in zstd_compress.c */
|
||||
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
ldmState_t* const ldmState = &(cctx->ldmState);
|
||||
const ldmParams_t ldmParams = cctx->appliedParams.ldmParams;
|
||||
const U64 hashPower = ldmState->hashPower;
|
||||
const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
|
||||
const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
|
||||
const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
seqStore_t* const seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
|
||||
|
||||
const ZSTD_blockCompressor blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0);
|
||||
U32* const repToConfirm = seqStorePtr->repToConfirm;
|
||||
U32 savedRep[ZSTD_REP_NUM];
|
||||
U64 rollingHash = 0;
|
||||
const BYTE* lastHashed = NULL;
|
||||
size_t i, lastLiterals;
|
||||
|
||||
/* Save seqStorePtr->rep and copy repToConfirm */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
U32 const current = (U32)(ip - base);
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0;
|
||||
ldmEntry_t* bestEntry = NULL;
|
||||
if (ip != istart) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
|
||||
lastHashed[ldmParams.minMatchLength],
|
||||
hashPower);
|
||||
} else {
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
|
||||
}
|
||||
lastHashed = ip;
|
||||
|
||||
/* Do not insert and do not look for a match */
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
|
||||
ldmTagMask) {
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the best entry and compute the match lengths */
|
||||
{
|
||||
ldmEntry_t* const bucket =
|
||||
ZSTD_ldm_getBucket(ldmState,
|
||||
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
||||
ldmParams);
|
||||
ldmEntry_t* cur;
|
||||
size_t bestMatchLength = 0;
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
|
||||
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
||||
const BYTE* const pMatch = cur->offset + base;
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
|
||||
if (curForwardMatchLength < ldmParams.minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
|
||||
ip, anchor, pMatch, lowest);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- continue searching */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
|
||||
hBits, current,
|
||||
ldmParams);
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
ip -= backwardMatchLength;
|
||||
|
||||
/* Call the block compressor on the remaining literals */
|
||||
{
|
||||
U32 const matchIndex = bestEntry->offset;
|
||||
const BYTE* const match = base + matchIndex - backwardMatchLength;
|
||||
U32 const offset = (U32)(ip - match);
|
||||
|
||||
/* Overwrite rep codes */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(cctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(cctx, anchor);
|
||||
|
||||
/* Call block compressor and get remaining literals */
|
||||
lastLiterals = blockCompressor(cctx, anchor, ip - anchor);
|
||||
cctx->nextToUpdate = (U32)(ip - base);
|
||||
|
||||
/* Update repToConfirm with the new offset */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
repToConfirm[i] = repToConfirm[i-1];
|
||||
repToConfirm[0] = offset;
|
||||
|
||||
/* Store the sequence with the leftover literals */
|
||||
ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
|
||||
offset + ZSTD_REP_MOVE, mLength - MINMATCH);
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table */
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
|
||||
assert(ip + backwardMatchLength == lastHashed);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+mLength*/
|
||||
/* Heuristic: don't need to fill the entire table at end of block */
|
||||
if (ip + mLength < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + mLength, base, hBits, ldmParams);
|
||||
lastHashed = ip + mLength - 1;
|
||||
}
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
/* Check immediate repcode */
|
||||
while ( (ip < ilimit)
|
||||
&& ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest))
|
||||
&& (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) {
|
||||
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1],
|
||||
iend) + 4;
|
||||
/* Swap repToConfirm[1] <=> repToConfirm[0] */
|
||||
{
|
||||
U32 const tmpOff = repToConfirm[1];
|
||||
repToConfirm[1] = repToConfirm[0];
|
||||
repToConfirm[0] = tmpOff;
|
||||
}
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+rLength*/
|
||||
if (ip + rLength < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + rLength, base, hBits, ldmParams);
|
||||
lastHashed = ip + rLength - 1;
|
||||
}
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
}
|
||||
}
|
||||
|
||||
/* Overwrite rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
ZSTD_ldm_limitTableUpdate(cctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(cctx, anchor);
|
||||
|
||||
lastLiterals = blockCompressor(cctx, anchor, iend - anchor);
|
||||
cctx->nextToUpdate = (U32)(iend - base);
|
||||
|
||||
/* Restore seqStorePtr->rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = savedRep[i];
|
||||
|
||||
/* Return the last literals size */
|
||||
return lastLiterals;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize);
|
||||
}
|
||||
|
||||
static size_t ZSTD_compressBlock_ldm_extDict_generic(
|
||||
ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
ldmState_t* const ldmState = &(ctx->ldmState);
|
||||
const ldmParams_t ldmParams = ctx->appliedParams.ldmParams;
|
||||
const U64 hashPower = ldmState->hashPower;
|
||||
const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
|
||||
const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
|
||||
const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
seqStore_t* const seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
|
||||
|
||||
const ZSTD_blockCompressor blockCompressor =
|
||||
ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1);
|
||||
U32* const repToConfirm = seqStorePtr->repToConfirm;
|
||||
U32 savedRep[ZSTD_REP_NUM];
|
||||
U64 rollingHash = 0;
|
||||
const BYTE* lastHashed = NULL;
|
||||
size_t i, lastLiterals;
|
||||
|
||||
/* Save seqStorePtr->rep and copy repToConfirm */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++) {
|
||||
savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
|
||||
}
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
size_t mLength;
|
||||
const U32 current = (U32)(ip-base);
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0;
|
||||
ldmEntry_t* bestEntry = NULL;
|
||||
if (ip != istart) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
|
||||
lastHashed[ldmParams.minMatchLength],
|
||||
hashPower);
|
||||
} else {
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
|
||||
}
|
||||
lastHashed = ip;
|
||||
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
|
||||
ldmTagMask) {
|
||||
/* Don't insert and don't look for a match */
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the best entry and compute the match lengths */
|
||||
{
|
||||
ldmEntry_t* const bucket =
|
||||
ZSTD_ldm_getBucket(ldmState,
|
||||
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
||||
ldmParams);
|
||||
ldmEntry_t* cur;
|
||||
size_t bestMatchLength = 0;
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
|
||||
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
||||
const BYTE* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
const BYTE* const pMatch = curMatchBase + cur->offset;
|
||||
const BYTE* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
curForwardMatchLength = ZSTD_count_2segments(
|
||||
ip, pMatch, iend,
|
||||
matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < ldmParams.minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
|
||||
ip, anchor, pMatch, lowMatchPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- continue searching */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
ip -= backwardMatchLength;
|
||||
|
||||
/* Call the block compressor on the remaining literals */
|
||||
{
|
||||
/* ip = current - backwardMatchLength
|
||||
* The match is at (bestEntry->offset - backwardMatchLength) */
|
||||
U32 const matchIndex = bestEntry->offset;
|
||||
U32 const offset = current - matchIndex;
|
||||
|
||||
/* Overwrite rep codes */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
/* Fill the hash table for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(ctx, anchor);
|
||||
|
||||
/* Call block compressor and get remaining literals */
|
||||
lastLiterals = blockCompressor(ctx, anchor, ip - anchor);
|
||||
ctx->nextToUpdate = (U32)(ip - base);
|
||||
|
||||
/* Update repToConfirm with the new offset */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
repToConfirm[i] = repToConfirm[i-1];
|
||||
repToConfirm[0] = offset;
|
||||
|
||||
/* Store the sequence with the leftover literals */
|
||||
ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
|
||||
offset + ZSTD_REP_MOVE, mLength - MINMATCH);
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table */
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+mLength */
|
||||
assert(ip + backwardMatchLength == lastHashed);
|
||||
if (ip + mLength < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + mLength, base, hBits,
|
||||
ldmParams);
|
||||
lastHashed = ip + mLength - 1;
|
||||
}
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip < ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - repToConfirm[1];
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ?
|
||||
dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) &
|
||||
(repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ?
|
||||
dictEnd : iend;
|
||||
size_t const repLength2 =
|
||||
ZSTD_count_2segments(ip+4, repMatch2+4, iend,
|
||||
repEnd2, lowPrefixPtr) + 4;
|
||||
|
||||
U32 tmpOffset = repToConfirm[1];
|
||||
repToConfirm[1] = repToConfirm[0];
|
||||
repToConfirm[0] = tmpOffset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+repLength2*/
|
||||
if (ip + repLength2 < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + repLength2, base, hBits,
|
||||
ldmParams);
|
||||
lastHashed = ip + repLength2 - 1;
|
||||
}
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Overwrite rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
ZSTD_ldm_limitTableUpdate(ctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(ctx, anchor);
|
||||
|
||||
/* Call the block compressor one last time on the last literals */
|
||||
lastLiterals = blockCompressor(ctx, anchor, iend - anchor);
|
||||
ctx->nextToUpdate = (U32)(iend - base);
|
||||
|
||||
/* Restore seqStorePtr->rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = savedRep[i];
|
||||
|
||||
/* Return the last literals size */
|
||||
return lastLiterals;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize);
|
||||
}
|
||||
67
src/borg/algorithms/zstd/lib/compress/zstd_ldm.h
Normal file
67
src/borg/algorithms/zstd/lib/compress/zstd_ldm.h
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LDM_H
|
||||
#define ZSTD_LDM_H
|
||||
|
||||
#include "zstd_compress.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Long distance matching
|
||||
***************************************/
|
||||
|
||||
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
|
||||
#define ZSTD_LDM_HASHEVERYLOG_NOTSET 9999
|
||||
|
||||
/** ZSTD_compressBlock_ldm_generic() :
|
||||
*
|
||||
* This is a block compressor intended for long distance matching.
|
||||
*
|
||||
* The function searches for matches of length at least
|
||||
* ldmParams.minMatchLength using a hash table in cctx->ldmState.
|
||||
* Matches can be at a distance of up to cParams.windowLog.
|
||||
*
|
||||
* Upon finding a match, the unmatched literals are compressed using a
|
||||
* ZSTD_blockCompressor (depending on the strategy in the compression
|
||||
* parameters), which stores the matched sequences. The "long distance"
|
||||
* match is then stored with the remaining literals from the
|
||||
* ZSTD_blockCompressor. */
|
||||
size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* cctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** ZSTD_ldm_initializeParameters() :
|
||||
* Initialize the long distance matching parameters to their default values. */
|
||||
size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Estimate the space needed for long distance matching tables. */
|
||||
size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Return prime8bytes^(minMatchLength-1) */
|
||||
U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
|
||||
|
||||
/** ZSTD_ldm_adjustParameters() :
|
||||
* If the params->hashEveryLog is not set, set it to its default value based on
|
||||
* windowLog and params->hashLog.
|
||||
*
|
||||
* Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
|
||||
* params->hashLog if it is not). */
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
||||
957
src/borg/algorithms/zstd/lib/compress/zstd_opt.c
Normal file
957
src/borg/algorithms/zstd/lib/compress/zstd_opt.c
Normal file
|
|
@ -0,0 +1,957 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_opt.h"
|
||||
#include "zstd_lazy.h"
|
||||
|
||||
|
||||
#define ZSTD_LITFREQ_ADD 2
|
||||
#define ZSTD_FREQ_DIV 4
|
||||
#define ZSTD_MAX_PRICE (1<<30)
|
||||
|
||||
/*-*************************************
|
||||
* Price functions for optimal parser
|
||||
***************************************/
|
||||
static void ZSTD_setLog2Prices(optState_t* optPtr)
|
||||
{
|
||||
optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
|
||||
optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
|
||||
optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
|
||||
optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
|
||||
optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum));
|
||||
}
|
||||
|
||||
|
||||
static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize)
|
||||
{
|
||||
unsigned u;
|
||||
|
||||
optPtr->cachedLiterals = NULL;
|
||||
optPtr->cachedPrice = optPtr->cachedLitLength = 0;
|
||||
optPtr->staticPrices = 0;
|
||||
|
||||
if (optPtr->litLengthSum == 0) {
|
||||
if (srcSize <= 1024) optPtr->staticPrices = 1;
|
||||
|
||||
assert(optPtr->litFreq!=NULL);
|
||||
for (u=0; u<=MaxLit; u++)
|
||||
optPtr->litFreq[u] = 0;
|
||||
for (u=0; u<srcSize; u++)
|
||||
optPtr->litFreq[src[u]]++;
|
||||
|
||||
optPtr->litSum = 0;
|
||||
optPtr->litLengthSum = MaxLL+1;
|
||||
optPtr->matchLengthSum = MaxML+1;
|
||||
optPtr->offCodeSum = (MaxOff+1);
|
||||
optPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
|
||||
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++)
|
||||
optPtr->litLengthFreq[u] = 1;
|
||||
for (u=0; u<=MaxML; u++)
|
||||
optPtr->matchLengthFreq[u] = 1;
|
||||
for (u=0; u<=MaxOff; u++)
|
||||
optPtr->offCodeFreq[u] = 1;
|
||||
} else {
|
||||
optPtr->matchLengthSum = 0;
|
||||
optPtr->litLengthSum = 0;
|
||||
optPtr->offCodeSum = 0;
|
||||
optPtr->matchSum = 0;
|
||||
optPtr->litSum = 0;
|
||||
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++) {
|
||||
optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
optPtr->litLengthSum += optPtr->litLengthFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxML; u++) {
|
||||
optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
|
||||
optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3);
|
||||
}
|
||||
optPtr->matchSum *= ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u<=MaxOff; u++) {
|
||||
optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->offCodeSum += optPtr->offCodeFreq[u];
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_setLog2Prices(optPtr);
|
||||
}
|
||||
|
||||
|
||||
static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals)
|
||||
{
|
||||
U32 price, u;
|
||||
|
||||
if (optPtr->staticPrices)
|
||||
return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
|
||||
|
||||
if (litLength == 0)
|
||||
return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1);
|
||||
|
||||
/* literals */
|
||||
if (optPtr->cachedLiterals == literals) {
|
||||
U32 const additional = litLength - optPtr->cachedLitLength;
|
||||
const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength;
|
||||
price = optPtr->cachedPrice + additional * optPtr->log2litSum;
|
||||
for (u=0; u < additional; u++)
|
||||
price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1);
|
||||
optPtr->cachedPrice = price;
|
||||
optPtr->cachedLitLength = litLength;
|
||||
} else {
|
||||
price = litLength * optPtr->log2litSum;
|
||||
for (u=0; u < litLength; u++)
|
||||
price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
|
||||
|
||||
if (litLength >= 12) {
|
||||
optPtr->cachedLiterals = literals;
|
||||
optPtr->cachedPrice = price;
|
||||
optPtr->cachedLitLength = litLength;
|
||||
}
|
||||
}
|
||||
|
||||
/* literal Length */
|
||||
{ const BYTE LL_deltaCode = 19;
|
||||
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
|
||||
price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
|
||||
}
|
||||
|
||||
return price;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
|
||||
{
|
||||
/* offset */
|
||||
U32 price;
|
||||
BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
|
||||
|
||||
if (optPtr->staticPrices)
|
||||
return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
|
||||
|
||||
price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
|
||||
if (!ultra && offCode >= 20) price += (offCode-19)*2;
|
||||
|
||||
/* match Length */
|
||||
{ const BYTE ML_deltaCode = 36;
|
||||
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
|
||||
price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
|
||||
}
|
||||
|
||||
return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor;
|
||||
}
|
||||
|
||||
|
||||
static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
|
||||
{
|
||||
U32 u;
|
||||
|
||||
/* literals */
|
||||
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u < litLength; u++)
|
||||
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
||||
|
||||
/* literal Length */
|
||||
{ const BYTE LL_deltaCode = 19;
|
||||
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
|
||||
optPtr->litLengthFreq[llCode]++;
|
||||
optPtr->litLengthSum++;
|
||||
}
|
||||
|
||||
/* match offset */
|
||||
{ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
|
||||
optPtr->offCodeSum++;
|
||||
optPtr->offCodeFreq[offCode]++;
|
||||
}
|
||||
|
||||
/* match Length */
|
||||
{ const BYTE ML_deltaCode = 36;
|
||||
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
|
||||
optPtr->matchLengthFreq[mlCode]++;
|
||||
optPtr->matchLengthSum++;
|
||||
}
|
||||
|
||||
ZSTD_setLog2Prices(optPtr);
|
||||
}
|
||||
|
||||
|
||||
#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
|
||||
{ \
|
||||
while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
|
||||
opt[pos].mlen = mlen_; \
|
||||
opt[pos].off = offset_; \
|
||||
opt[pos].litlen = litlen_; \
|
||||
opt[pos].price = price_; \
|
||||
}
|
||||
|
||||
|
||||
/* function safe only for comparisons */
|
||||
static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
||||
{
|
||||
switch (length)
|
||||
{
|
||||
default :
|
||||
case 4 : return MEM_read32(memPtr);
|
||||
case 3 : if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr)<<8;
|
||||
else
|
||||
return MEM_read32(memPtr)>>8;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Update hashTable3 up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
static
|
||||
U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
|
||||
{
|
||||
U32* const hashTable3 = zc->hashTable3;
|
||||
U32 const hashLog3 = zc->hashLog3;
|
||||
const BYTE* const base = zc->base;
|
||||
U32 idx = zc->nextToUpdate3;
|
||||
const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
|
||||
const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
|
||||
|
||||
while(idx < target) {
|
||||
hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
|
||||
idx++;
|
||||
}
|
||||
|
||||
return hashTable3[hash3];
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
static U32 ZSTD_insertBtAndGetAllMatches (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
U32 nbCompares, const U32 mls,
|
||||
U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
|
||||
{
|
||||
const BYTE* const base = zc->base;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 hashLog = zc->appliedParams.cParams.hashLog;
|
||||
const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 matchIndex = hashTable[h];
|
||||
U32* const bt = zc->chainTable;
|
||||
const U32 btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
const U32 btMask= (1U << btLog) - 1;
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
const U32 windowLow = zc->lowLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 mnum = 0;
|
||||
|
||||
const U32 minMatch = (mls == 3) ? 3 : 4;
|
||||
size_t bestLength = minMatchLen-1;
|
||||
|
||||
if (minMatch == 3) { /* HC3 match finder */
|
||||
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
|
||||
if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
|
||||
const BYTE* match;
|
||||
size_t currentMl=0;
|
||||
if ((!extDict) || matchIndex3 >= dictLimit) {
|
||||
match = base + matchIndex3;
|
||||
if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
|
||||
} else {
|
||||
match = dictBase + matchIndex3;
|
||||
if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
|
||||
currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
|
||||
}
|
||||
|
||||
/* save best solution */
|
||||
if (currentMl > bestLength) {
|
||||
bestLength = currentMl;
|
||||
matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
|
||||
matches[mnum].len = (U32)currentMl;
|
||||
mnum++;
|
||||
if (currentMl > ZSTD_OPT_NUM) goto update;
|
||||
if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
if (match[matchLength] == ip[matchLength]) {
|
||||
matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1;
|
||||
}
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
|
||||
bestLength = matchLength;
|
||||
matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
|
||||
matches[mnum].len = (U32)matchLength;
|
||||
mnum++;
|
||||
if (matchLength > ZSTD_OPT_NUM) break;
|
||||
if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
update:
|
||||
zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
|
||||
return mnum;
|
||||
}
|
||||
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static U32 ZSTD_BtGetAllMatches (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
|
||||
}
|
||||
|
||||
|
||||
static U32 ZSTD_BtGetAllMatches_selectMLS (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
const BYTE* ip, const BYTE* const iHighLimit,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
|
||||
default :
|
||||
case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
|
||||
case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
|
||||
}
|
||||
}
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static U32 ZSTD_BtGetAllMatches_extDict (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
|
||||
}
|
||||
|
||||
|
||||
static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
const BYTE* ip, const BYTE* const iHighLimit,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
|
||||
default :
|
||||
case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
|
||||
case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*-*******************************
|
||||
* Optimal parser
|
||||
*********************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize, const int ultra)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
optState_t* optStatePtr = &(ctx->optState);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const prefixStart = base + ctx->dictLimit;
|
||||
|
||||
const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
|
||||
const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* matches = optStatePtr->matchTable;
|
||||
const BYTE* inr;
|
||||
U32 offset, rep[ZSTD_REP_NUM];
|
||||
|
||||
/* init */
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
U32 cur, match_num, last_pos, litlen, price;
|
||||
U32 u, mlen, best_mlen, best_off, litLength;
|
||||
memset(opt, 0, sizeof(ZSTD_optimal_t));
|
||||
last_pos = 0;
|
||||
litlen = (U32)(ip - anchor);
|
||||
|
||||
/* check repCode */
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
|
||||
for (i=(ip == anchor); i<last_i; i++) {
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
|
||||
if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
|
||||
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
|
||||
mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
|
||||
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
|
||||
best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
best_off = i - (ip == anchor);
|
||||
do {
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen--;
|
||||
} while (mlen >= minMatch);
|
||||
} } }
|
||||
|
||||
match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
|
||||
|
||||
if (!last_pos && !match_num) { ip++; continue; }
|
||||
|
||||
if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
|
||||
best_mlen = matches[match_num-1].len;
|
||||
best_off = matches[match_num-1].off;
|
||||
cur = 0;
|
||||
last_pos = 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
/* set prices using matches at position = 0 */
|
||||
best_mlen = (last_pos) ? last_pos : minMatch;
|
||||
for (u = 0; u < match_num; u++) {
|
||||
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
|
||||
best_mlen = matches[u].len;
|
||||
while (mlen <= best_mlen) {
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen++;
|
||||
} }
|
||||
|
||||
if (last_pos < minMatch) { ip++; continue; }
|
||||
|
||||
/* initialize opt[0] */
|
||||
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
|
||||
opt[0].mlen = 1;
|
||||
opt[0].litlen = litlen;
|
||||
|
||||
/* check further positions */
|
||||
for (cur = 1; cur <= last_pos; cur++) {
|
||||
inr = ip + cur;
|
||||
|
||||
if (opt[cur-1].mlen == 1) {
|
||||
litlen = opt[cur-1].litlen + 1;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
|
||||
} else
|
||||
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
|
||||
} else {
|
||||
litlen = 1;
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
|
||||
}
|
||||
|
||||
if (cur > last_pos || price <= opt[cur].price)
|
||||
SET_PRICE(cur, 1, 0, litlen, price);
|
||||
|
||||
if (cur == last_pos) break;
|
||||
|
||||
if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
|
||||
continue;
|
||||
|
||||
mlen = opt[cur].mlen;
|
||||
if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
|
||||
opt[cur].rep[2] = opt[cur-mlen].rep[1];
|
||||
opt[cur].rep[1] = opt[cur-mlen].rep[0];
|
||||
opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
|
||||
} else {
|
||||
opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
|
||||
opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
|
||||
/* If opt[cur].off == ZSTD_REP_MOVE_OPT, then mlen != 1.
|
||||
* offset ZSTD_REP_MOVE_OPT is used for the special case
|
||||
* litLength == 0, where offset 0 means something special.
|
||||
* mlen == 1 means the previous byte was stored as a literal,
|
||||
* so they are mutually exclusive.
|
||||
*/
|
||||
assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1));
|
||||
opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
|
||||
}
|
||||
|
||||
best_mlen = minMatch;
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
|
||||
for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
|
||||
&& (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
|
||||
mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
|
||||
|
||||
if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
|
||||
best_mlen = mlen; best_off = i; last_pos = cur + 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
best_off = i - (opt[cur].mlen != 1);
|
||||
if (mlen > best_mlen) best_mlen = mlen;
|
||||
|
||||
do {
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
} else
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
|
||||
SET_PRICE(cur + mlen, mlen, i, litlen, price);
|
||||
mlen--;
|
||||
} while (mlen >= minMatch);
|
||||
} } }
|
||||
|
||||
match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
|
||||
|
||||
if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
|
||||
best_mlen = matches[match_num-1].len;
|
||||
best_off = matches[match_num-1].off;
|
||||
last_pos = cur + 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
/* set prices using matches at position = cur */
|
||||
for (u = 0; u < match_num; u++) {
|
||||
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
|
||||
best_mlen = matches[u].len;
|
||||
|
||||
while (mlen <= best_mlen) {
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen)
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
else
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
|
||||
SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
|
||||
|
||||
mlen++;
|
||||
} } }
|
||||
|
||||
best_mlen = opt[last_pos].mlen;
|
||||
best_off = opt[last_pos].off;
|
||||
cur = last_pos - best_mlen;
|
||||
|
||||
/* store sequence */
|
||||
_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
opt[0].mlen = 1;
|
||||
|
||||
while (1) {
|
||||
mlen = opt[cur].mlen;
|
||||
offset = opt[cur].off;
|
||||
opt[cur].mlen = best_mlen;
|
||||
opt[cur].off = best_off;
|
||||
best_mlen = mlen;
|
||||
best_off = offset;
|
||||
if (mlen > cur) break;
|
||||
cur -= mlen;
|
||||
}
|
||||
|
||||
for (u = 0; u <= last_pos;) {
|
||||
u += opt[u].mlen;
|
||||
}
|
||||
|
||||
for (cur=0; cur < last_pos; ) {
|
||||
mlen = opt[cur].mlen;
|
||||
if (mlen == 1) { ip++; cur++; continue; }
|
||||
offset = opt[cur].off;
|
||||
cur += mlen;
|
||||
litLength = (U32)(ip - anchor);
|
||||
|
||||
if (offset > ZSTD_REP_MOVE_OPT) {
|
||||
rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = offset - ZSTD_REP_MOVE_OPT;
|
||||
offset--;
|
||||
} else {
|
||||
if (offset != 0) {
|
||||
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
|
||||
if (offset != 1) rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = best_off;
|
||||
}
|
||||
if (litLength==0) offset--;
|
||||
}
|
||||
|
||||
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip = ip + mlen;
|
||||
} } /* for (cur=0; cur < last_pos; ) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize, const int ultra)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
optState_t* optStatePtr = &(ctx->optState);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
|
||||
const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
|
||||
const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* matches = optStatePtr->matchTable;
|
||||
const BYTE* inr;
|
||||
|
||||
/* init */
|
||||
U32 offset, rep[ZSTD_REP_NUM];
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
|
||||
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
U32 cur, match_num, last_pos, litlen, price;
|
||||
U32 u, mlen, best_mlen, best_off, litLength;
|
||||
U32 current = (U32)(ip-base);
|
||||
memset(opt, 0, sizeof(ZSTD_optimal_t));
|
||||
last_pos = 0;
|
||||
opt[0].litlen = (U32)(ip - anchor);
|
||||
|
||||
/* check repCode */
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
|
||||
for (i = (ip==anchor); i<last_i; i++) {
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
|
||||
const U32 repIndex = (U32)(current - repCur);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if ( (repCur > 0 && repCur <= (S32)current)
|
||||
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
|
||||
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
|
||||
/* repcode detected we should take it */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
|
||||
|
||||
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
|
||||
best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
best_off = i - (ip==anchor);
|
||||
litlen = opt[0].litlen;
|
||||
do {
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen--;
|
||||
} while (mlen >= minMatch);
|
||||
} } }
|
||||
|
||||
match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
|
||||
|
||||
if (!last_pos && !match_num) { ip++; continue; }
|
||||
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
|
||||
opt[0].mlen = 1;
|
||||
|
||||
if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
|
||||
best_mlen = matches[match_num-1].len;
|
||||
best_off = matches[match_num-1].off;
|
||||
cur = 0;
|
||||
last_pos = 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
best_mlen = (last_pos) ? last_pos : minMatch;
|
||||
|
||||
/* set prices using matches at position = 0 */
|
||||
for (u = 0; u < match_num; u++) {
|
||||
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
|
||||
best_mlen = matches[u].len;
|
||||
litlen = opt[0].litlen;
|
||||
while (mlen <= best_mlen) {
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
|
||||
mlen++;
|
||||
} }
|
||||
|
||||
if (last_pos < minMatch) {
|
||||
ip++; continue;
|
||||
}
|
||||
|
||||
/* check further positions */
|
||||
for (cur = 1; cur <= last_pos; cur++) {
|
||||
inr = ip + cur;
|
||||
|
||||
if (opt[cur-1].mlen == 1) {
|
||||
litlen = opt[cur-1].litlen + 1;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
|
||||
} else
|
||||
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
|
||||
} else {
|
||||
litlen = 1;
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
|
||||
}
|
||||
|
||||
if (cur > last_pos || price <= opt[cur].price)
|
||||
SET_PRICE(cur, 1, 0, litlen, price);
|
||||
|
||||
if (cur == last_pos) break;
|
||||
|
||||
if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
|
||||
continue;
|
||||
|
||||
mlen = opt[cur].mlen;
|
||||
if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
|
||||
opt[cur].rep[2] = opt[cur-mlen].rep[1];
|
||||
opt[cur].rep[1] = opt[cur-mlen].rep[0];
|
||||
opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
|
||||
} else {
|
||||
opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
|
||||
opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
|
||||
assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1));
|
||||
opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
|
||||
}
|
||||
|
||||
best_mlen = minMatch;
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
|
||||
for (i = (mlen != 1); i<last_i; i++) {
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
const U32 repIndex = (U32)(current+cur - repCur);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if ( (repCur > 0 && repCur <= (S32)(current+cur))
|
||||
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
|
||||
&& (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
|
||||
/* repcode detected */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
|
||||
|
||||
if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
|
||||
best_mlen = mlen; best_off = i; last_pos = cur + 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
best_off = i - (opt[cur].mlen != 1);
|
||||
if (mlen > best_mlen) best_mlen = mlen;
|
||||
|
||||
do {
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
} else
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
|
||||
SET_PRICE(cur + mlen, mlen, i, litlen, price);
|
||||
mlen--;
|
||||
} while (mlen >= minMatch);
|
||||
} } }
|
||||
|
||||
match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
|
||||
|
||||
if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
|
||||
best_mlen = matches[match_num-1].len;
|
||||
best_off = matches[match_num-1].off;
|
||||
last_pos = cur + 1;
|
||||
goto _storeSequence;
|
||||
}
|
||||
|
||||
/* set prices using matches at position = cur */
|
||||
for (u = 0; u < match_num; u++) {
|
||||
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
|
||||
best_mlen = matches[u].len;
|
||||
|
||||
while (mlen <= best_mlen) {
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen)
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
else
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
|
||||
SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
|
||||
|
||||
mlen++;
|
||||
} } } /* for (cur = 1; cur <= last_pos; cur++) */
|
||||
|
||||
best_mlen = opt[last_pos].mlen;
|
||||
best_off = opt[last_pos].off;
|
||||
cur = last_pos - best_mlen;
|
||||
|
||||
/* store sequence */
|
||||
_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
opt[0].mlen = 1;
|
||||
|
||||
while (1) {
|
||||
mlen = opt[cur].mlen;
|
||||
offset = opt[cur].off;
|
||||
opt[cur].mlen = best_mlen;
|
||||
opt[cur].off = best_off;
|
||||
best_mlen = mlen;
|
||||
best_off = offset;
|
||||
if (mlen > cur) break;
|
||||
cur -= mlen;
|
||||
}
|
||||
|
||||
for (u = 0; u <= last_pos; ) {
|
||||
u += opt[u].mlen;
|
||||
}
|
||||
|
||||
for (cur=0; cur < last_pos; ) {
|
||||
mlen = opt[cur].mlen;
|
||||
if (mlen == 1) { ip++; cur++; continue; }
|
||||
offset = opt[cur].off;
|
||||
cur += mlen;
|
||||
litLength = (U32)(ip - anchor);
|
||||
|
||||
if (offset > ZSTD_REP_MOVE_OPT) {
|
||||
rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = offset - ZSTD_REP_MOVE_OPT;
|
||||
offset--;
|
||||
} else {
|
||||
if (offset != 0) {
|
||||
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
|
||||
if (offset != 1) rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = best_off;
|
||||
}
|
||||
|
||||
if (litLength==0) offset--;
|
||||
}
|
||||
|
||||
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip = ip + mlen;
|
||||
} } /* for (cur=0; cur < last_pos; ) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
|
||||
}
|
||||
30
src/borg/algorithms/zstd/lib/compress/zstd_opt.h
Normal file
30
src/borg/algorithms/zstd/lib/compress/zstd_opt.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_OPT_H
|
||||
#define ZSTD_OPT_H
|
||||
|
||||
#include "zstd_compress.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_OPT_H */
|
||||
1099
src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c
Normal file
1099
src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c
Normal file
File diff suppressed because it is too large
Load diff
132
src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h
Normal file
132
src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDMT_COMPRESS_H
|
||||
#define ZSTDMT_COMPRESS_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* Note : This is an internal API.
|
||||
* Some methods are still exposed (ZSTDLIB_API),
|
||||
* because it used to be the only way to invoke MT compression.
|
||||
* Now, it's recommended to use ZSTD_compress_generic() instead.
|
||||
* These methods will stop being exposed in a future version */
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
|
||||
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
|
||||
|
||||
|
||||
/* === Memory management === */
|
||||
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads,
|
||||
ZSTD_customMem cMem);
|
||||
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
/* === Simple buffer-to-butter one-pass function === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
int compressionLevel);
|
||||
|
||||
|
||||
|
||||
/* === Streaming functions === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
||||
ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
||||
|
||||
|
||||
/* === Advanced functions and parameters === */
|
||||
|
||||
#ifndef ZSTDMT_SECTION_SIZE_MIN
|
||||
# define ZSTDMT_SECTION_SIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
|
||||
#endif
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters const params,
|
||||
unsigned overlapLog);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
||||
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
||||
ZSTD_parameters params,
|
||||
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_frameParameters fparams,
|
||||
unsigned long long pledgedSrcSize); /* note : zero means empty */
|
||||
|
||||
/* ZSTDMT_parameter :
|
||||
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
||||
typedef enum {
|
||||
ZSTDMT_p_sectionSize, /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */
|
||||
ZSTDMT_p_overlapSectionLog /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
|
||||
} ZSTDMT_parameter;
|
||||
|
||||
/* ZSTDMT_setMTCtxParameter() :
|
||||
* allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
|
||||
* The function must be called typically after ZSTD_createCCtx().
|
||||
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
|
||||
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
||||
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
|
||||
|
||||
|
||||
/*! ZSTDMT_compressStream_generic() :
|
||||
* Combines ZSTDMT_compressStream() with ZSTDMT_flushStream() or ZSTDMT_endStream()
|
||||
* depending on flush directive.
|
||||
* @return : minimum amount of data still to be flushed
|
||||
* 0 if fully flushed
|
||||
* or an error code */
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp);
|
||||
|
||||
|
||||
/* === Private definitions; never ever use directly === */
|
||||
|
||||
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
|
||||
|
||||
size_t ZSTDMT_initializeCCtxParameters(ZSTD_CCtx_params* params, unsigned nbThreads);
|
||||
|
||||
/*! ZSTDMT_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
* must receive dict, or cdict, or none, but not both.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDMT_COMPRESS_H */
|
||||
996
src/borg/algorithms/zstd/lib/decompress/huf_decompress.c
Normal file
996
src/borg/algorithms/zstd/lib/decompress/huf_decompress.c
Normal file
|
|
@ -0,0 +1,996 @@
|
|||
/* ******************************************************************
|
||||
Huffman decoder, part of New Generation Entropy library
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Dependencies
|
||||
****************************************************************/
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include "bitstream.h" /* BIT_* */
|
||||
#include "compiler.h"
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Byte alignment for workSpace management
|
||||
****************************************************************/
|
||||
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
|
||||
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
||||
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
|
||||
typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
|
||||
|
||||
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
|
||||
{
|
||||
DTableDesc dtd;
|
||||
memcpy(&dtd, table, sizeof(dtd));
|
||||
return dtd;
|
||||
}
|
||||
|
||||
|
||||
/*-***************************/
|
||||
/* single-symbol decoding */
|
||||
/*-***************************/
|
||||
|
||||
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
|
||||
|
||||
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 tableLog = 0;
|
||||
U32 nbSymbols = 0;
|
||||
size_t iSize;
|
||||
void* const dtPtr = DTable + 1;
|
||||
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
|
||||
|
||||
U32* rankVal;
|
||||
BYTE* huffWeight;
|
||||
size_t spaceUsed32 = 0;
|
||||
|
||||
rankVal = (U32 *)workSpace + spaceUsed32;
|
||||
spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
|
||||
huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
||||
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
||||
|
||||
if ((spaceUsed32 << 2) > wkspSize)
|
||||
return ERROR(tableLog_tooLarge);
|
||||
workSpace = (U32 *)workSpace + spaceUsed32;
|
||||
wkspSize -= (spaceUsed32 << 2);
|
||||
|
||||
HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
|
||||
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
|
||||
if (HUF_isError(iSize)) return iSize;
|
||||
|
||||
/* Table header */
|
||||
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
|
||||
dtd.tableType = 0;
|
||||
dtd.tableLog = (BYTE)tableLog;
|
||||
memcpy(DTable, &dtd, sizeof(dtd));
|
||||
}
|
||||
|
||||
/* Calculate starting value for each rank */
|
||||
{ U32 n, nextRankStart = 0;
|
||||
for (n=1; n<tableLog+1; n++) {
|
||||
U32 const current = nextRankStart;
|
||||
nextRankStart += (rankVal[n] << (n-1));
|
||||
rankVal[n] = current;
|
||||
} }
|
||||
|
||||
/* fill DTable */
|
||||
{ U32 n;
|
||||
for (n=0; n<nbSymbols; n++) {
|
||||
U32 const w = huffWeight[n];
|
||||
U32 const length = (1 << w) >> 1;
|
||||
U32 u;
|
||||
HUF_DEltX2 D;
|
||||
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
|
||||
for (u = rankVal[w]; u < rankVal[w] + length; u++)
|
||||
dt[u] = D;
|
||||
rankVal[w] += length;
|
||||
} }
|
||||
|
||||
return iSize;
|
||||
}
|
||||
|
||||
size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_readDTableX2_wksp(DTable, src, srcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
|
||||
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
|
||||
BYTE const c = dt[val].byte;
|
||||
BIT_skipBits(Dstream, dt[val].nbBits);
|
||||
return c;
|
||||
}
|
||||
|
||||
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
|
||||
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
|
||||
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
|
||||
if (MEM_64bits()) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
/* up to 4 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) {
|
||||
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
}
|
||||
|
||||
/* closer to the end */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
|
||||
/* no more data to retrieve from bitstream, hence no need to reload */
|
||||
while (p < pEnd)
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
|
||||
return pEnd-pStart;
|
||||
}
|
||||
|
||||
static size_t HUF_decompress1X2_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + dstSize;
|
||||
const void* dtPtr = DTable + 1;
|
||||
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
|
||||
BIT_DStream_t bitD;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
|
||||
HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
|
||||
|
||||
return dstSize;
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 0) return ERROR(GENERIC);
|
||||
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_decompress4X2_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
/* Check */
|
||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||
|
||||
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable + 1;
|
||||
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
|
||||
|
||||
/* Init */
|
||||
BIT_DStream_t bitD1;
|
||||
BIT_DStream_t bitD2;
|
||||
BIT_DStream_t bitD3;
|
||||
BIT_DStream_t bitD4;
|
||||
size_t const length1 = MEM_readLE16(istart);
|
||||
size_t const length2 = MEM_readLE16(istart+2);
|
||||
size_t const length3 = MEM_readLE16(istart+4);
|
||||
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
|
||||
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
||||
const BYTE* const istart2 = istart1 + length1;
|
||||
const BYTE* const istart3 = istart2 + length2;
|
||||
const BYTE* const istart4 = istart3 + length3;
|
||||
const size_t segmentSize = (dstSize+3) / 4;
|
||||
BYTE* const opStart2 = ostart + segmentSize;
|
||||
BYTE* const opStart3 = opStart2 + segmentSize;
|
||||
BYTE* const opStart4 = opStart3 + segmentSize;
|
||||
BYTE* op1 = ostart;
|
||||
BYTE* op2 = opStart2;
|
||||
BYTE* op3 = opStart3;
|
||||
BYTE* op4 = opStart4;
|
||||
U32 endSignal;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
|
||||
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) {
|
||||
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
}
|
||||
|
||||
/* check corruption */
|
||||
if (op1 > opStart2) return ERROR(corruption_detected);
|
||||
if (op2 > opStart3) return ERROR(corruption_detected);
|
||||
if (op3 > opStart4) return ERROR(corruption_detected);
|
||||
/* note : op4 supposed already verified within main loop */
|
||||
|
||||
/* finish bitStreams one by one */
|
||||
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
|
||||
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
|
||||
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
|
||||
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endSignal) return ERROR(corruption_detected);
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X2_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 0) return ERROR(GENERIC);
|
||||
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
|
||||
workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
|
||||
/* *************************/
|
||||
/* double-symbols decoding */
|
||||
/* *************************/
|
||||
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
|
||||
|
||||
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
|
||||
|
||||
/* HUF_fillDTableX4Level2() :
|
||||
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
|
||||
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
|
||||
const U32* rankValOrigin, const int minWeight,
|
||||
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
|
||||
U32 nbBitsBaseline, U16 baseSeq)
|
||||
{
|
||||
HUF_DEltX4 DElt;
|
||||
U32 rankVal[HUF_TABLELOG_MAX + 1];
|
||||
|
||||
/* get pre-calculated rankVal */
|
||||
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
|
||||
|
||||
/* fill skipped values */
|
||||
if (minWeight>1) {
|
||||
U32 i, skipSize = rankVal[minWeight];
|
||||
MEM_writeLE16(&(DElt.sequence), baseSeq);
|
||||
DElt.nbBits = (BYTE)(consumed);
|
||||
DElt.length = 1;
|
||||
for (i = 0; i < skipSize; i++)
|
||||
DTable[i] = DElt;
|
||||
}
|
||||
|
||||
/* fill DTable */
|
||||
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
|
||||
const U32 symbol = sortedSymbols[s].symbol;
|
||||
const U32 weight = sortedSymbols[s].weight;
|
||||
const U32 nbBits = nbBitsBaseline - weight;
|
||||
const U32 length = 1 << (sizeLog-nbBits);
|
||||
const U32 start = rankVal[weight];
|
||||
U32 i = start;
|
||||
const U32 end = start + length;
|
||||
|
||||
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
|
||||
DElt.nbBits = (BYTE)(nbBits + consumed);
|
||||
DElt.length = 2;
|
||||
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
|
||||
|
||||
rankVal[weight] += length;
|
||||
} }
|
||||
}
|
||||
|
||||
typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
|
||||
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
|
||||
|
||||
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
|
||||
const sortedSymbol_t* sortedList, const U32 sortedListSize,
|
||||
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
|
||||
const U32 nbBitsBaseline)
|
||||
{
|
||||
U32 rankVal[HUF_TABLELOG_MAX + 1];
|
||||
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
|
||||
const U32 minBits = nbBitsBaseline - maxWeight;
|
||||
U32 s;
|
||||
|
||||
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
|
||||
|
||||
/* fill DTable */
|
||||
for (s=0; s<sortedListSize; s++) {
|
||||
const U16 symbol = sortedList[s].symbol;
|
||||
const U32 weight = sortedList[s].weight;
|
||||
const U32 nbBits = nbBitsBaseline - weight;
|
||||
const U32 start = rankVal[weight];
|
||||
const U32 length = 1 << (targetLog-nbBits);
|
||||
|
||||
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
|
||||
U32 sortedRank;
|
||||
int minWeight = nbBits + scaleLog;
|
||||
if (minWeight < 1) minWeight = 1;
|
||||
sortedRank = rankStart[minWeight];
|
||||
HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
|
||||
rankValOrigin[nbBits], minWeight,
|
||||
sortedList+sortedRank, sortedListSize-sortedRank,
|
||||
nbBitsBaseline, symbol);
|
||||
} else {
|
||||
HUF_DEltX4 DElt;
|
||||
MEM_writeLE16(&(DElt.sequence), symbol);
|
||||
DElt.nbBits = (BYTE)(nbBits);
|
||||
DElt.length = 1;
|
||||
{ U32 const end = start + length;
|
||||
U32 u;
|
||||
for (u = start; u < end; u++) DTable[u] = DElt;
|
||||
} }
|
||||
rankVal[weight] += length;
|
||||
}
|
||||
}
|
||||
|
||||
size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
|
||||
size_t srcSize, void* workSpace,
|
||||
size_t wkspSize)
|
||||
{
|
||||
U32 tableLog, maxW, sizeOfSort, nbSymbols;
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const maxTableLog = dtd.maxTableLog;
|
||||
size_t iSize;
|
||||
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
|
||||
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
|
||||
U32 *rankStart;
|
||||
|
||||
rankValCol_t* rankVal;
|
||||
U32* rankStats;
|
||||
U32* rankStart0;
|
||||
sortedSymbol_t* sortedSymbol;
|
||||
BYTE* weightList;
|
||||
size_t spaceUsed32 = 0;
|
||||
|
||||
rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
|
||||
spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
|
||||
rankStats = (U32 *)workSpace + spaceUsed32;
|
||||
spaceUsed32 += HUF_TABLELOG_MAX + 1;
|
||||
rankStart0 = (U32 *)workSpace + spaceUsed32;
|
||||
spaceUsed32 += HUF_TABLELOG_MAX + 2;
|
||||
sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
|
||||
spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
|
||||
weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
||||
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
||||
|
||||
if ((spaceUsed32 << 2) > wkspSize)
|
||||
return ERROR(tableLog_tooLarge);
|
||||
workSpace = (U32 *)workSpace + spaceUsed32;
|
||||
wkspSize -= (spaceUsed32 << 2);
|
||||
|
||||
rankStart = rankStart0 + 1;
|
||||
memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
|
||||
|
||||
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
|
||||
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
|
||||
if (HUF_isError(iSize)) return iSize;
|
||||
|
||||
/* check result */
|
||||
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
|
||||
|
||||
/* find maxWeight */
|
||||
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
|
||||
|
||||
/* Get start index of each weight */
|
||||
{ U32 w, nextRankStart = 0;
|
||||
for (w=1; w<maxW+1; w++) {
|
||||
U32 current = nextRankStart;
|
||||
nextRankStart += rankStats[w];
|
||||
rankStart[w] = current;
|
||||
}
|
||||
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
|
||||
sizeOfSort = nextRankStart;
|
||||
}
|
||||
|
||||
/* sort symbols by weight */
|
||||
{ U32 s;
|
||||
for (s=0; s<nbSymbols; s++) {
|
||||
U32 const w = weightList[s];
|
||||
U32 const r = rankStart[w]++;
|
||||
sortedSymbol[r].symbol = (BYTE)s;
|
||||
sortedSymbol[r].weight = (BYTE)w;
|
||||
}
|
||||
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
|
||||
}
|
||||
|
||||
/* Build rankVal */
|
||||
{ U32* const rankVal0 = rankVal[0];
|
||||
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
|
||||
U32 nextRankVal = 0;
|
||||
U32 w;
|
||||
for (w=1; w<maxW+1; w++) {
|
||||
U32 current = nextRankVal;
|
||||
nextRankVal += rankStats[w] << (w+rescale);
|
||||
rankVal0[w] = current;
|
||||
} }
|
||||
{ U32 const minBits = tableLog+1 - maxW;
|
||||
U32 consumed;
|
||||
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
|
||||
U32* const rankValPtr = rankVal[consumed];
|
||||
U32 w;
|
||||
for (w = 1; w < maxW+1; w++) {
|
||||
rankValPtr[w] = rankVal0[w] >> consumed;
|
||||
} } } }
|
||||
|
||||
HUF_fillDTableX4(dt, maxTableLog,
|
||||
sortedSymbol, sizeOfSort,
|
||||
rankStart0, rankVal, maxW,
|
||||
tableLog+1);
|
||||
|
||||
dtd.tableLog = (BYTE)maxTableLog;
|
||||
dtd.tableType = 1;
|
||||
memcpy(DTable, &dtd, sizeof(dtd));
|
||||
return iSize;
|
||||
}
|
||||
|
||||
size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_readDTableX4_wksp(DTable, src, srcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 2);
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
return dt[val].length;
|
||||
}
|
||||
|
||||
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 1);
|
||||
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
|
||||
else {
|
||||
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
|
||||
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
|
||||
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
|
||||
} }
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
|
||||
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
|
||||
if (MEM_64bits()) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
HINT_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
/* up to 8 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
}
|
||||
|
||||
/* closer to end : up to 2 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
|
||||
while (p <= pEnd-2)
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
|
||||
|
||||
if (p < pEnd)
|
||||
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
|
||||
|
||||
return p-pStart;
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_decompress1X4_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
BIT_DStream_t bitD;
|
||||
|
||||
/* Init */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||
if (HUF_isError(errorCode)) return errorCode;
|
||||
}
|
||||
|
||||
/* decode */
|
||||
{ BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
|
||||
}
|
||||
|
||||
/* check */
|
||||
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X4_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 1) return ERROR(GENERIC);
|
||||
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
|
||||
workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
static size_t HUF_decompress4X4_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||
|
||||
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1;
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
|
||||
/* Init */
|
||||
BIT_DStream_t bitD1;
|
||||
BIT_DStream_t bitD2;
|
||||
BIT_DStream_t bitD3;
|
||||
BIT_DStream_t bitD4;
|
||||
size_t const length1 = MEM_readLE16(istart);
|
||||
size_t const length2 = MEM_readLE16(istart+2);
|
||||
size_t const length3 = MEM_readLE16(istart+4);
|
||||
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
|
||||
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
||||
const BYTE* const istart2 = istart1 + length1;
|
||||
const BYTE* const istart3 = istart2 + length2;
|
||||
const BYTE* const istart4 = istart3 + length3;
|
||||
size_t const segmentSize = (dstSize+3) / 4;
|
||||
BYTE* const opStart2 = ostart + segmentSize;
|
||||
BYTE* const opStart3 = opStart2 + segmentSize;
|
||||
BYTE* const opStart4 = opStart3 + segmentSize;
|
||||
BYTE* op1 = ostart;
|
||||
BYTE* op2 = opStart2;
|
||||
BYTE* op3 = opStart3;
|
||||
BYTE* op4 = opStart4;
|
||||
U32 endSignal;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
|
||||
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
|
||||
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
}
|
||||
|
||||
/* check corruption */
|
||||
if (op1 > opStart2) return ERROR(corruption_detected);
|
||||
if (op2 > opStart3) return ERROR(corruption_detected);
|
||||
if (op3 > opStart4) return ERROR(corruption_detected);
|
||||
/* note : op4 already verified within main loop */
|
||||
|
||||
/* finish bitStreams one by one */
|
||||
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
|
||||
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
|
||||
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
|
||||
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endCheck) return ERROR(corruption_detected); }
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X4_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 1) return ERROR(GENERIC);
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
|
||||
workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
|
||||
/* ********************************/
|
||||
/* Generic decompression selector */
|
||||
/* ********************************/
|
||||
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
|
||||
HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
|
||||
HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
|
||||
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
|
||||
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
|
||||
{
|
||||
/* single, double, quad */
|
||||
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
|
||||
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
|
||||
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
|
||||
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
|
||||
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
|
||||
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
|
||||
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
|
||||
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
|
||||
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
|
||||
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
|
||||
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
|
||||
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
|
||||
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
|
||||
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
|
||||
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
|
||||
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
|
||||
};
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-determined metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < cSrcSize, dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
||||
{
|
||||
/* decoder timing evaluation */
|
||||
U32 const Q = cSrcSize >= dstSize ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
||||
U32 const D256 = (U32)(dstSize >> 8);
|
||||
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
|
||||
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
||||
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
|
||||
|
||||
return DTime1 < DTime0;
|
||||
}
|
||||
|
||||
|
||||
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
|
||||
size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 };
|
||||
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
|
||||
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
|
||||
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
|
||||
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
|
||||
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
|
||||
HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
|
||||
}
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
|
||||
size_t dstSize, const void* cSrc,
|
||||
size_t cSrcSize, void* workSpace,
|
||||
size_t wkspSize)
|
||||
{
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if (cSrcSize == 0) return ERROR(corruption_detected);
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
|
||||
HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
|
||||
}
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
|
||||
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
|
||||
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
||||
cSrcSize, workSpace, wkspSize):
|
||||
HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
||||
cSrcSize, workSpace, wkspSize);
|
||||
}
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
2655
src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c
Normal file
2655
src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c
Normal file
File diff suppressed because it is too large
Load diff
213
src/borg/algorithms/zstd/lib/deprecated/zbuff.h
Normal file
213
src/borg/algorithms/zstd/lib/deprecated/zbuff.h
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* ***************************************************************
|
||||
* NOTES/WARNINGS
|
||||
******************************************************************/
|
||||
/* The streaming API defined here is deprecated.
|
||||
* Consider migrating towards ZSTD_compressStream() API in `zstd.h`
|
||||
* See 'lib/README.md'.
|
||||
*****************************************************************/
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_BUFFERED_H_23987
|
||||
#define ZSTD_BUFFERED_H_23987
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Compiler specifics
|
||||
*****************************************************************/
|
||||
/* Deprecation warnings */
|
||||
/* Should these warnings be a problem,
|
||||
it is generally possible to disable them,
|
||||
typically with -Wno-deprecated-declarations for gcc
|
||||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
|
||||
#else
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
|
||||
# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler")
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API
|
||||
# endif
|
||||
#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
/* This is the easier "buffered" streaming API,
|
||||
* using an internal buffer to lift all restrictions on user-provided buffers
|
||||
* which can be any size, any place, for both input and output.
|
||||
* ZBUFF and ZSTD are 100% interoperable,
|
||||
* frames created by one can be decoded by the other one */
|
||||
|
||||
typedef ZSTD_CStream ZBUFF_CCtx;
|
||||
ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
|
||||
ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
|
||||
ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
|
||||
ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
|
||||
|
||||
/*-*************************************************
|
||||
* Streaming compression - howto
|
||||
*
|
||||
* A ZBUFF_CCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
|
||||
* ZBUFF_CCtx objects can be reused multiple times.
|
||||
*
|
||||
* Start by initializing ZBUF_CCtx.
|
||||
* Use ZBUFF_compressInit() to start a new compression operation.
|
||||
* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
|
||||
*
|
||||
* Use ZBUFF_compressContinue() repetitively to consume input stream.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
|
||||
* The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
|
||||
* Note that the function cannot output more than *dstCapacityPtr,
|
||||
* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressEnd() instructs to finish a frame.
|
||||
* It will perform a flush and write frame epilogue.
|
||||
* The epilogue is required for decoders to consider a frame completed.
|
||||
* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
|
||||
* In which case, call again ZBUFF_compressFlush() to complete the flush.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
|
||||
* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
|
||||
* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
|
||||
* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
|
||||
* **************************************************/
|
||||
|
||||
|
||||
typedef ZSTD_DStream ZBUFF_DCtx;
|
||||
ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFF_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
|
||||
* Use ZBUFF_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFF_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFF_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : 0 when a frame is completely decoded and fully flushed,
|
||||
* 1 when there is still some data left within internal buffer to flush,
|
||||
* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
|
||||
* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFF_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode);
|
||||
ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void);
|
||||
|
||||
#endif /* ZSTD_BUFFERED_H_23987 */
|
||||
|
||||
|
||||
#ifdef ZBUFF_STATIC_LINKING_ONLY
|
||||
#ifndef ZBUFF_STATIC_H_30298098432
|
||||
#define ZBUFF_STATIC_H_30298098432
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used in association with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
||||
/*--- Dependency ---*/
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */
|
||||
#include "zstd.h"
|
||||
|
||||
|
||||
/*--- Custom memory allocator ---*/
|
||||
/*! ZBUFF_createCCtx_advanced() :
|
||||
* Create a ZBUFF compression context using external alloc and free functions */
|
||||
ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
/*! ZBUFF_createDCtx_advanced() :
|
||||
* Create a ZBUFF decompression context using external alloc and free functions */
|
||||
ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
|
||||
/*--- Advanced Streaming Initialization ---*/
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
#endif /* ZBUFF_STATIC_H_30298098432 */
|
||||
#endif /* ZBUFF_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
26
src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c
Normal file
26
src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "error_private.h"
|
||||
#include "zbuff.h"
|
||||
|
||||
/*-****************************************
|
||||
* ZBUFF Error Management (deprecated)
|
||||
******************************************/
|
||||
|
||||
/*! ZBUFF_isError() :
|
||||
* tells if a return value is an error code */
|
||||
unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
|
||||
/*! ZBUFF_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
|
||||
146
src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c
Normal file
146
src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZBUFF_STATIC_LINKING_ONLY
|
||||
#include "zbuff.h"
|
||||
|
||||
|
||||
/*-***********************************************************
|
||||
* Streaming compression
|
||||
*
|
||||
* A ZBUFF_CCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
|
||||
* Use ZBUFF_compressInit() to start a new compression operation.
|
||||
* ZBUFF_CCtx objects can be reused multiple times.
|
||||
*
|
||||
* Use ZBUFF_compressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
|
||||
* The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.
|
||||
* Note that it will not output more than *dstCapacityPtr.
|
||||
* Therefore, some content might still be left into its internal buffer if dst buffer is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressEnd() instructs to finish a frame.
|
||||
* It will perform a flush and write frame epilogue.
|
||||
* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory)
|
||||
* input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.
|
||||
* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
|
||||
* ***********************************************************/
|
||||
|
||||
ZBUFF_CCtx* ZBUFF_createCCtx(void)
|
||||
{
|
||||
return ZSTD_createCStream();
|
||||
}
|
||||
|
||||
ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)
|
||||
{
|
||||
return ZSTD_createCStream_advanced(customMem);
|
||||
}
|
||||
|
||||
size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)
|
||||
{
|
||||
return ZSTD_freeCStream(zbc);
|
||||
}
|
||||
|
||||
|
||||
/* ====== Initialization ====== */
|
||||
|
||||
size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);
|
||||
}
|
||||
|
||||
|
||||
size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)
|
||||
{
|
||||
return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);
|
||||
}
|
||||
|
||||
size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)
|
||||
{
|
||||
return ZSTD_initCStream(zbc, compressionLevel);
|
||||
}
|
||||
|
||||
/* ====== Compression ====== */
|
||||
|
||||
|
||||
size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
ZSTD_inBuffer inBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
inBuff.src = src;
|
||||
inBuff.pos = 0;
|
||||
inBuff.size = *srcSizePtr;
|
||||
result = ZSTD_compressStream(zbc, &outBuff, &inBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
*srcSizePtr = inBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ====== Finalize ====== */
|
||||
|
||||
size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
result = ZSTD_flushStream(zbc, &outBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
result = ZSTD_endStream(zbc, &outBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); }
|
||||
size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }
|
||||
75
src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c
Normal file
75
src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZBUFF_STATIC_LINKING_ONLY
|
||||
#include "zbuff.h"
|
||||
|
||||
|
||||
ZBUFF_DCtx* ZBUFF_createDCtx(void)
|
||||
{
|
||||
return ZSTD_createDStream();
|
||||
}
|
||||
|
||||
ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)
|
||||
{
|
||||
return ZSTD_createDStream_advanced(customMem);
|
||||
}
|
||||
|
||||
size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)
|
||||
{
|
||||
return ZSTD_freeDStream(zbd);
|
||||
}
|
||||
|
||||
|
||||
/* *** Initialization *** */
|
||||
|
||||
size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)
|
||||
{
|
||||
return ZSTD_initDStream_usingDict(zbd, dict, dictSize);
|
||||
}
|
||||
|
||||
size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)
|
||||
{
|
||||
return ZSTD_initDStream(zbd);
|
||||
}
|
||||
|
||||
|
||||
/* *** Decompression *** */
|
||||
|
||||
size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr)
|
||||
{
|
||||
ZSTD_outBuffer outBuff;
|
||||
ZSTD_inBuffer inBuff;
|
||||
size_t result;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
inBuff.src = src;
|
||||
inBuff.pos = 0;
|
||||
inBuff.size = *srcSizePtr;
|
||||
result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
*srcSizePtr = inBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); }
|
||||
size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }
|
||||
1045
src/borg/algorithms/zstd/lib/dictBuilder/cover.c
Normal file
1045
src/borg/algorithms/zstd/lib/dictBuilder/cover.c
Normal file
File diff suppressed because it is too large
Load diff
1913
src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c
Normal file
1913
src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c
Normal file
File diff suppressed because it is too large
Load diff
67
src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h
Normal file
67
src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* divsufsort.h for libdivsufsort-lite
|
||||
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DIVSUFSORT_H
|
||||
#define _DIVSUFSORT_H 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
||||
/*- Prototypes -*/
|
||||
|
||||
/**
|
||||
* Constructs the suffix array of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param SA [0..n-1] The output array of suffixes.
|
||||
* @param n The length of the given string.
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return 0 if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divsufsort(const unsigned char *T, int *SA, int n, int openMP);
|
||||
|
||||
/**
|
||||
* Constructs the burrows-wheeler transformed string of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param U [0..n-1] The output string. (can be T)
|
||||
* @param A [0..n-1] The temporary array. (can be NULL)
|
||||
* @param n The length of the given string.
|
||||
* @param num_indexes The length of secondary indexes array. (can be NULL)
|
||||
* @param indexes The secondary indexes array. (can be NULL)
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return The primary index if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _DIVSUFSORT_H */
|
||||
1075
src/borg/algorithms/zstd/lib/dictBuilder/zdict.c
Normal file
1075
src/borg/algorithms/zstd/lib/dictBuilder/zdict.c
Normal file
File diff suppressed because it is too large
Load diff
211
src/borg/algorithms/zstd/lib/dictBuilder/zdict.h
Normal file
211
src/borg/algorithms/zstd/lib/dictBuilder/zdict.h
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef DICTBUILDER_H_001
|
||||
#define DICTBUILDER_H_001
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*====== Dependencies ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZDICTLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZDICTLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZDICTLIB_API ZDICTLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
|
||||
/*! ZDICT_trainFromBuffer():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Uses ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */
|
||||
ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
|
||||
ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
|
||||
|
||||
|
||||
|
||||
#ifdef ZDICT_STATIC_LINKING_ONLY
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
||||
typedef struct {
|
||||
int compressionLevel; /* 0 means default; target a specific zstd compression level */
|
||||
unsigned notificationLevel; /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
|
||||
unsigned dictID; /* 0 means auto mode (32-bits random value); other : force dictID value */
|
||||
} ZDICT_params_t;
|
||||
|
||||
/*! ZDICT_cover_params_t:
|
||||
* For all values 0 means default.
|
||||
* k and d are the only required parameters.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
|
||||
unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
|
||||
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
|
||||
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_cover_params_t;
|
||||
|
||||
|
||||
/*! ZDICT_trainFromBuffer_cover():
|
||||
* Train a dictionary from an array of samples using the COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
|
||||
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
|
||||
const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t parameters);
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_cover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations and picks the best parameters.
|
||||
* `*parameters` is filled with the best parameters found, and the dictionary
|
||||
* constructed with those parameters is stored in `dictBuffer`.
|
||||
*
|
||||
* All of the parameters d, k, steps are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
||||
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
|
||||
const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t *parameters);
|
||||
|
||||
/*! ZDICT_finalizeDictionary():
|
||||
* Given a custom content as a basis for dictionary, and a set of samples,
|
||||
* finalize dictionary by adding headers and statistics.
|
||||
*
|
||||
* Samples must be stored concatenated in a flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
|
||||
*
|
||||
* dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
|
||||
* maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
|
||||
* or an error code, which can be tested by ZDICT_isError().
|
||||
* Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
|
||||
* Note 2: dictBuffer and dictContent can overlap
|
||||
*/
|
||||
#define ZDICT_CONTENTSIZE_MIN 128
|
||||
#define ZDICT_DICTSIZE_MIN 256
|
||||
ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* dictContent, size_t dictContentSize,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_params_t parameters);
|
||||
|
||||
typedef struct {
|
||||
unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_legacy_params_t;
|
||||
|
||||
/*! ZDICT_trainFromBuffer_legacy():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* `parameters` is optional and can be provided with values set to 0 to mean "default".
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
|
||||
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
|
||||
const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters);
|
||||
|
||||
/* Deprecation warnings */
|
||||
/* It is generally possible to disable deprecation warnings from compiler,
|
||||
for example with -Wno-deprecated-declarations for gcc
|
||||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
|
||||
#else
|
||||
# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
|
||||
# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
|
||||
# elif (ZDICT_GCC_VERSION >= 301)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API
|
||||
# endif
|
||||
#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
|
||||
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
#endif /* ZDICT_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DICTBUILDER_H_001 */
|
||||
379
src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h
Normal file
379
src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LEGACY_H
|
||||
#define ZSTD_LEGACY_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include "mem.h" /* MEM_STATIC */
|
||||
#include "error_private.h" /* ERROR */
|
||||
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer */
|
||||
|
||||
#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
|
||||
# undef ZSTD_LEGACY_SUPPORT
|
||||
# define ZSTD_LEGACY_SUPPORT 8
|
||||
#endif
|
||||
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
# include "zstd_v01.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
# include "zstd_v02.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
# include "zstd_v03.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
# include "zstd_v04.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
# include "zstd_v05.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
# include "zstd_v06.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
# include "zstd_v07.h"
|
||||
#endif
|
||||
|
||||
/** ZSTD_isLegacy() :
|
||||
@return : > 0 if supported by legacy decoder. 0 otherwise.
|
||||
return value is the version.
|
||||
*/
|
||||
MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
|
||||
{
|
||||
U32 magicNumberLE;
|
||||
if (srcSize<4) return 0;
|
||||
magicNumberLE = MEM_readLE32(src);
|
||||
switch(magicNumberLE)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case ZSTDv01_magicNumberLE:return 1;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case ZSTDv02_magicNumber : return 2;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case ZSTDv03_magicNumber : return 3;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case ZSTDv04_magicNumber : return 4;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case ZSTDv05_MAGICNUMBER : return 5;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case ZSTDv06_MAGICNUMBER : return 6;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case ZSTDv07_MAGICNUMBER : return 7;
|
||||
#endif
|
||||
default : return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, srcSize);
|
||||
if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
if (version==5) {
|
||||
ZSTDv05_parameters fParams;
|
||||
size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.srcSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
if (version==6) {
|
||||
ZSTDv06_frameParams fParams;
|
||||
size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
if (version==7) {
|
||||
ZSTDv07_frameParams fParams;
|
||||
size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
return 0; /* should not be possible */
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize,
|
||||
const void* dict,size_t dictSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
(void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{ size_t result;
|
||||
ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv05_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{ size_t result;
|
||||
ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv06_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{ size_t result;
|
||||
ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv07_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
|
||||
size_t compressedSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
return ZSTDv05_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
return ZSTDv06_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
return ZSTDv07_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
|
||||
{
|
||||
switch(version)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)legacyContext;
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
|
||||
const void* dict, size_t dictSize)
|
||||
{
|
||||
if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
|
||||
switch(newVersion)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)dict; (void)dictSize;
|
||||
return 0;
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv04_decompressInit(dctx);
|
||||
ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
||||
{
|
||||
switch(version)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)legacyContext; (void)output; (void)input;
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LEGACY_H */
|
||||
2127
src/borg/algorithms/zstd/lib/legacy/zstd_v01.c
Normal file
2127
src/borg/algorithms/zstd/lib/legacy/zstd_v01.c
Normal file
File diff suppressed because it is too large
Load diff
89
src/borg/algorithms/zstd/lib/legacy/zstd_v01.h
Normal file
89
src/borg/algorithms/zstd/lib/legacy/zstd_v01.h
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V01_H_28739879432
|
||||
#define ZSTD_V01_H_28739879432
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv01_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
|
||||
ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
|
||||
size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv01_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
|
||||
size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */
|
||||
#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V01_H_28739879432 */
|
||||
3556
src/borg/algorithms/zstd/lib/legacy/zstd_v02.c
Normal file
3556
src/borg/algorithms/zstd/lib/legacy/zstd_v02.c
Normal file
File diff suppressed because it is too large
Load diff
88
src/borg/algorithms/zstd/lib/legacy/zstd_v02.h
Normal file
88
src/borg/algorithms/zstd/lib/legacy/zstd_v02.h
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V02_H_4174539423
|
||||
#define ZSTD_V02_H_4174539423
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.2.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv02_isError())
|
||||
*/
|
||||
size_t ZSTDv02_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv02_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
|
||||
ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
|
||||
size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv02_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
|
||||
size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V02_H_4174539423 */
|
||||
3197
src/borg/algorithms/zstd/lib/legacy/zstd_v03.c
Normal file
3197
src/borg/algorithms/zstd/lib/legacy/zstd_v03.c
Normal file
File diff suppressed because it is too large
Load diff
88
src/borg/algorithms/zstd/lib/legacy/zstd_v03.h
Normal file
88
src/borg/algorithms/zstd/lib/legacy/zstd_v03.h
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V03_H_298734209782
|
||||
#define ZSTD_V03_H_298734209782
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.3.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv03_isError())
|
||||
*/
|
||||
size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv03_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
|
||||
ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
|
||||
size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv03_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
|
||||
size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V03_H_298734209782 */
|
||||
3824
src/borg/algorithms/zstd/lib/legacy/zstd_v04.c
Normal file
3824
src/borg/algorithms/zstd/lib/legacy/zstd_v04.c
Normal file
File diff suppressed because it is too large
Load diff
137
src/borg/algorithms/zstd/lib/legacy/zstd_v04.h
Normal file
137
src/borg/algorithms/zstd/lib/legacy/zstd_v04.h
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V04_H_91868324769238
|
||||
#define ZSTD_V04_H_91868324769238
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.4.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv04_isError())
|
||||
*/
|
||||
size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv04_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
|
||||
ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
|
||||
size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Direct Streaming
|
||||
***************************************/
|
||||
size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
|
||||
size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Buffered Streaming
|
||||
***************************************/
|
||||
typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
|
||||
ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
|
||||
size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
|
||||
|
||||
size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
|
||||
size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
|
||||
|
||||
/** ************************************************
|
||||
* Streaming decompression
|
||||
*
|
||||
* A ZBUFF_DCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
|
||||
* Use ZBUFF_decompressInit() to start a new decompression operation.
|
||||
* ZBUFF_DCtx objects can be reused multiple times.
|
||||
*
|
||||
* Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
|
||||
* It must be the same content as the one set during compression phase.
|
||||
* Dictionary content must remain accessible during the decompression process.
|
||||
*
|
||||
* Use ZBUFF_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *maxDstSizePtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
|
||||
* or 0 when a frame is completely decoded
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
|
||||
* output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
|
||||
* input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* **************************************************/
|
||||
unsigned ZBUFFv04_isError(size_t errorCode);
|
||||
const char* ZBUFFv04_getErrorName(size_t errorCode);
|
||||
|
||||
|
||||
/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are not compulsory, they just tend to offer better latency */
|
||||
size_t ZBUFFv04_recommendedDInSize(void);
|
||||
size_t ZBUFFv04_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V04_H_91868324769238 */
|
||||
4083
src/borg/algorithms/zstd/lib/legacy/zstd_v05.c
Normal file
4083
src/borg/algorithms/zstd/lib/legacy/zstd_v05.c
Normal file
File diff suppressed because it is too large
Load diff
157
src/borg/algorithms/zstd/lib/legacy/zstd_v05.h
Normal file
157
src/borg/algorithms/zstd/lib/legacy/zstd_v05.h
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv05_H
|
||||
#define ZSTDv05_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "mem.h" /* U64, U32 */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple functions
|
||||
***************************************/
|
||||
/*! ZSTDv05_decompress() :
|
||||
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
|
||||
`dstCapacity` must be large enough, equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
|
||||
size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv05_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv05_isError())
|
||||
*/
|
||||
size_t ZSTDv05_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/* Error Management */
|
||||
unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
|
||||
ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
|
||||
size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv05_decompressDCtx() :
|
||||
* Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
|
||||
size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* Simple Dictionary API
|
||||
*************************/
|
||||
/*! ZSTDv05_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
|
||||
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
|
||||
size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
/*-************************
|
||||
* Advanced Streaming API
|
||||
***************************/
|
||||
typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
|
||||
typedef struct {
|
||||
U64 srcSize;
|
||||
U32 windowLog; /* the only useful information to retrieve */
|
||||
U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
|
||||
} ZSTDv05_parameters;
|
||||
size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
|
||||
|
||||
size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
|
||||
size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
|
||||
size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* ZBUFF API
|
||||
*************************/
|
||||
typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
|
||||
ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
|
||||
size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
|
||||
|
||||
size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
|
||||
size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression
|
||||
*
|
||||
* A ZBUFFv05_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv05_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv05_DCtx objects can be reused multiple times.
|
||||
*
|
||||
* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
|
||||
* or 0 when a frame is completely decoded
|
||||
* or an error code, which can be tested using ZBUFFv05_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
|
||||
* output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
unsigned ZBUFFv05_isError(size_t errorCode);
|
||||
const char* ZBUFFv05_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, and tend to offer better latency */
|
||||
size_t ZBUFFv05_recommendedDInSize(void);
|
||||
size_t ZBUFFv05_recommendedDOutSize(void);
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */
|
||||
|
||||
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv0505_H */
|
||||
4200
src/borg/algorithms/zstd/lib/legacy/zstd_v06.c
Normal file
4200
src/borg/algorithms/zstd/lib/legacy/zstd_v06.c
Normal file
File diff suppressed because it is too large
Load diff
167
src/borg/algorithms/zstd/lib/legacy/zstd_v06.h
Normal file
167
src/borg/algorithms/zstd/lib/legacy/zstd_v06.h
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv06_H
|
||||
#define ZSTDv06_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*====== Dependency ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/*====== Export for Windows ======*/
|
||||
/*!
|
||||
* ZSTDv06_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
*/
|
||||
#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
|
||||
# define ZSTDLIBv06_API __declspec(dllexport)
|
||||
#else
|
||||
# define ZSTDLIBv06_API
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple functions
|
||||
***************************************/
|
||||
/*! ZSTDv06_decompress() :
|
||||
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
|
||||
`dstCapacity` must be large enough, equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv06_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv06_isError())
|
||||
*/
|
||||
size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
|
||||
|
||||
/* Error Management */
|
||||
ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
|
||||
ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
|
||||
ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv06_decompressDCtx() :
|
||||
* Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* Dictionary API
|
||||
*************************/
|
||||
/*! ZSTDv06_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
|
||||
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
|
||||
/*-************************
|
||||
* Advanced Streaming API
|
||||
***************************/
|
||||
struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
|
||||
typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
|
||||
|
||||
ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
|
||||
|
||||
ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* ZBUFF API
|
||||
***************************************/
|
||||
|
||||
typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
|
||||
ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
|
||||
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFFv06_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv06_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
|
||||
* or 0 when a frame is completely decoded,
|
||||
* or an error code, which can be tested using ZBUFFv06_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
|
||||
* output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
|
||||
ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */
|
||||
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv06_BUFFERED_H */
|
||||
4578
src/borg/algorithms/zstd/lib/legacy/zstd_v07.c
Normal file
4578
src/borg/algorithms/zstd/lib/legacy/zstd_v07.c
Normal file
File diff suppressed because it is too large
Load diff
182
src/borg/algorithms/zstd/lib/legacy/zstd_v07.h
Normal file
182
src/borg/algorithms/zstd/lib/legacy/zstd_v07.h
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv07_H_235446
|
||||
#define ZSTDv07_H_235446
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*====== Dependency ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/*====== Export for Windows ======*/
|
||||
/*!
|
||||
* ZSTDv07_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
*/
|
||||
#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
|
||||
# define ZSTDLIBv07_API __declspec(dllexport)
|
||||
#else
|
||||
# define ZSTDLIBv07_API
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple API
|
||||
***************************************/
|
||||
/*! ZSTDv07_getDecompressedSize() :
|
||||
* @return : decompressed size if known, 0 otherwise.
|
||||
note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
|
||||
note 2 : decompressed size could be wrong or intentionally modified !
|
||||
always ensure results fit within application's authorized limits */
|
||||
unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTDv07_decompress() :
|
||||
`compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
|
||||
`dstCapacity` must be equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv07_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv07_isError())
|
||||
*/
|
||||
size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
|
||||
ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
|
||||
ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv07_decompressDCtx() :
|
||||
* Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-************************
|
||||
* Simple dictionary API
|
||||
***************************/
|
||||
/*! ZSTDv07_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression.
|
||||
* Note : This function load the dictionary, resulting in a significant startup time */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
|
||||
/*-**************************
|
||||
* Advanced Dictionary API
|
||||
****************************/
|
||||
/*! ZSTDv07_createDDict() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* `dict` can be released after creation */
|
||||
typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
|
||||
ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
|
||||
ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
|
||||
|
||||
/*! ZSTDv07_decompress_usingDDict() :
|
||||
* Decompression using a pre-digested Dictionary
|
||||
* Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTDv07_DDict* ddict);
|
||||
|
||||
typedef struct {
|
||||
unsigned long long frameContentSize;
|
||||
unsigned windowSize;
|
||||
unsigned dictID;
|
||||
unsigned checksumFlag;
|
||||
} ZSTDv07_frameParams;
|
||||
|
||||
ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
|
||||
ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
|
||||
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFFv07_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv07_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
|
||||
* or 0 when a frame is completely decoded,
|
||||
* or an error code, which can be tested using ZBUFFv07_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
|
||||
* output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
|
||||
ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv07_H_235446 */
|
||||
1386
src/borg/algorithms/zstd/lib/zstd.h
Normal file
1386
src/borg/algorithms/zstd/lib/zstd.h
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Reference in a new issue