Skip to content

Commit

Permalink
first v4 commit: tests are not ready.
Browse files Browse the repository at this point in the history
  • Loading branch information
pierrec committed Apr 20, 2020
1 parent 08cb7fb commit fb4a2ec
Show file tree
Hide file tree
Showing 24 changed files with 1,142 additions and 911 deletions.
6 changes: 3 additions & 3 deletions bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func BenchmarkCompressHC(b *testing.B) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
_, _ = lz4.CompressBlockHC(pg1661, buf, 16)
_, _ = lz4.CompressBlockHC(pg1661, buf, 16, nil)
}
}

Expand Down Expand Up @@ -128,7 +128,7 @@ func BenchmarkSkipBytesRand(b *testing.B) { benchmarkSkipBytes(b, randomLZ4) }

func benchmarkCompress(b *testing.B, uncompressed []byte) {
w := bytes.NewBuffer(nil)
zw := lz4.NewWriter(w)
zw, _ := lz4.NewWriter(w)
r := bytes.NewReader(uncompressed)

// Determine the compressed size of testfile.
Expand Down Expand Up @@ -161,7 +161,7 @@ func BenchmarkCompressRand(b *testing.B) { benchmarkCompress(b, random) }
func BenchmarkWriterReset(b *testing.B) {
b.ReportAllocs()

zw := lz4.NewWriter(nil)
zw, _ := lz4.NewWriter(nil)
src := mustLoadFile("testdata/gettysburg.txt")
var buf bytes.Buffer

Expand Down
42 changes: 26 additions & 16 deletions block.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,15 @@ import (
"sync"
)

// Pool of hash tables for CompressBlock.
var htPool = sync.Pool{New: func() interface{} { return make([]int, htSize) }}

func recoverBlock(e *error) {
if r := recover(); r != nil && *e == nil {
*e = ErrInvalidSourceShortBuffer
}
}

// blockHash hashes the lower 6 bytes into a value < htSize.
func blockHash(x uint64) uint32 {
const prime6bytes = 227718039650203
Expand Down Expand Up @@ -56,14 +65,13 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
// This significantly speeds up incompressible data and usually has very small impact on compression.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
if len(hashTable) < htSize {
htIface := htPool.Get()
defer htPool.Put(htIface)
hashTable = (*(htIface).(*[htSize]int))[:]
if cap(hashTable) < htSize {
hashTable = htPool.Get().([]int)
defer htPool.Put(hashTable)
} else {
hashTable = hashTable[:htSize]
}
// Prove to the compiler the table has at least htSize elements.
// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
hashTable = hashTable[:htSize]
_ = hashTable[htSize-1]

// si: Current position of the search.
// anchor: Position of the current literals.
Expand Down Expand Up @@ -225,13 +233,6 @@ lastLiterals:
return di, nil
}

// Pool of hash tables for CompressBlock.
var htPool = sync.Pool{
New: func() interface{} {
return new([htSize]int)
},
}

// blockHash hashes 4 bytes into a value < winSize.
func blockHashHC(x uint32) uint32 {
const hasher uint32 = 2654435761 // Knuth multiplicative hash.
Expand All @@ -249,7 +250,7 @@ func blockHashHC(x uint32) uint32 {
// the compressed size is 0 and no error, then the data is incompressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
defer recoverBlock(&err)

// Return 0, nil only if the destination buffer size is < CompressBlockBound.
Expand All @@ -264,7 +265,16 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {

// hashTable: stores the last position found for a given hash
// chainTable: stores previous positions for a given hash
var hashTable, chainTable [winSize]int
if cap(hashTable) < htSize {
hashTable = htPool.Get().([]int)
defer htPool.Put(hashTable)
} else {
hashTable = hashTable[:htSize]
}
_ = hashTable[htSize-1]
chainTable := htPool.Get().([]int)
defer htPool.Put(chainTable)
_ = chainTable[htSize-1]

if depth <= 0 {
depth = winSize
Expand Down
4 changes: 2 additions & 2 deletions block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func TestCompressUncompressBlock(t *testing.T) {
t.Run(fmt.Sprintf("%s HC", tc.file), func(t *testing.T) {
// t.Parallel()
nhc = run(t, tc, func(src, dst []byte) (int, error) {
return lz4.CompressBlockHC(src, dst, -1)
return lz4.CompressBlockHC(src, dst, 16, nil)
})
})
})
Expand Down Expand Up @@ -153,7 +153,7 @@ func TestCompressCornerCase_CopyDstUpperBound(t *testing.T) {
t.Run(fmt.Sprintf("%s HC", file), func(t *testing.T) {
t.Parallel()
run(src, func(src, dst []byte) (int, error) {
return lz4.CompressBlockHC(src, dst, -1)
return lz4.CompressBlockHC(src, dst, 16, nil)
})
})
}
Expand Down
23 changes: 0 additions & 23 deletions debug.go

This file was deleted.

7 changes: 0 additions & 7 deletions debug_stub.go

This file was deleted.

30 changes: 0 additions & 30 deletions errors.go

This file was deleted.

5 changes: 2 additions & 3 deletions example_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func Example() {

// The pipe will uncompress the data from the writer.
pr, pw := io.Pipe()
zw := lz4.NewWriter(pw)
zw, _ := lz4.NewWriter(pw)
zr := lz4.NewReader(pr)

go func() {
Expand All @@ -36,9 +36,8 @@ func ExampleCompressBlock() {
s := "hello world"
data := []byte(strings.Repeat(s, 100))
buf := make([]byte, len(data))
ht := make([]int, 64<<10) // buffer for the compression table

n, err := lz4.CompressBlock(data, buf, ht)
n, err := lz4.CompressBlock(data, buf, nil)
if err != nil {
fmt.Println(err)
}
Expand Down
Loading

0 comments on commit fb4a2ec

Please sign in to comment.