Skip to content

Commit

Permalink
Fix typos (#986)
Browse files Browse the repository at this point in the history
  • Loading branch information
deining authored Aug 12, 2024
1 parent 8b81499 commit 3868468
Show file tree
Hide file tree
Showing 17 changed files with 70 additions and 70 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799

* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
Expand Down Expand Up @@ -136,7 +136,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
Expand Down Expand Up @@ -339,7 +339,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.

* Feb 25, 2021 (v1.11.8)
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
Expand Down Expand Up @@ -518,7 +518,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
Expand Down
32 changes: 16 additions & 16 deletions compressible_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (

func BenchmarkEstimate(b *testing.B) {
b.ReportAllocs()
// (predictable, low entropy distibution)
// (predictable, low entropy distribution)
b.Run("zeroes-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
b.SetBytes(int64(len(testData)))
Expand All @@ -19,7 +19,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (predictable, high entropy distibution)
// (predictable, high entropy distribution)
b.Run("predictable-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
for i := range testData {
Expand All @@ -33,7 +33,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-500b", func(b *testing.B) {
var testData = make([]byte, 500)
rand.Read(testData)
Expand All @@ -45,7 +45,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
Expand All @@ -57,7 +57,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-50k", func(b *testing.B) {
var testData = make([]byte, 50000)
rand.Read(testData)
Expand All @@ -69,7 +69,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-500k", func(b *testing.B) {
var testData = make([]byte, 500000)
rand.Read(testData)
Expand All @@ -81,7 +81,7 @@ func BenchmarkEstimate(b *testing.B) {
b.Log(Estimate(testData))
})

// (not predictable, medium entropy distibution)
// (not predictable, medium entropy distribution)
b.Run("base-32-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
Expand All @@ -95,7 +95,7 @@ func BenchmarkEstimate(b *testing.B) {
}
b.Log(Estimate(testData))
})
// (medium predictable, medium entropy distibution)
// (medium predictable, medium entropy distribution)
b.Run("text", func(b *testing.B) {
var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks.
This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process:
Expand All @@ -122,7 +122,7 @@ Thoughts?`)

func BenchmarkSnannonEntropyBits(b *testing.B) {
b.ReportAllocs()
// (predictable, low entropy distibution)
// (predictable, low entropy distribution)
b.Run("zeroes-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
b.SetBytes(int64(len(testData)))
Expand All @@ -133,7 +133,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (predictable, high entropy distibution)
// (predictable, high entropy distribution)
b.Run("predictable-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
for i := range testData {
Expand All @@ -147,7 +147,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-500b", func(b *testing.B) {
var testData = make([]byte, 500)
rand.Read(testData)
Expand All @@ -159,7 +159,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
Expand All @@ -171,7 +171,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-50k", func(b *testing.B) {
var testData = make([]byte, 50000)
rand.Read(testData)
Expand All @@ -183,7 +183,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (not predictable, high entropy distibution)
// (not predictable, high entropy distribution)
b.Run("random-500k", func(b *testing.B) {
var testData = make([]byte, 500000)
rand.Read(testData)
Expand All @@ -195,7 +195,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
b.Log(ShannonEntropyBits(testData))
})

// (not predictable, medium entropy distibution)
// (not predictable, medium entropy distribution)
b.Run("base-32-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
Expand All @@ -209,7 +209,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) {
}
b.Log(ShannonEntropyBits(testData))
})
// (medium predictable, medium entropy distibution)
// (medium predictable, medium entropy distribution)
b.Run("text", func(b *testing.B) {
var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks.
This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process:
Expand Down
2 changes: 1 addition & 1 deletion flate/deflate.go
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) {
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompresssion.
// level was NoCompression or ConstantCompression.
d.windowEnd = 0
default:
s := d.state
Expand Down
2 changes: 1 addition & 1 deletion fse/decompress.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
// Use a predefined Scrach to set maximum acceptable output size.
// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion gzhttp/transport_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func TestTransportInvalid(t *testing.T) {
server := httptest.NewServer(newTestHandler(bin))

c := http.Client{Transport: Transport(http.DefaultTransport)}
// Serves json as gzippped...
// Serves json as gzipped...
resp, err := c.Get(server.URL + "/gzipped")
if err != nil {
t.Fatal(err)
Expand Down
4 changes: 2 additions & 2 deletions huff0/decompress.go
Original file line number Diff line number Diff line change
Expand Up @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
Expand All @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
fmt.Fprintf(w, "%d errros, stopping\n", errs)
fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
Expand Down
6 changes: 3 additions & 3 deletions s2/_generate/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -1920,7 +1920,7 @@ func (o options) emitLiteral(name string, litLen, retval, dstBase, litBase reg.G
return
}

// genEmitRepeat generates a standlone emitRepeat.
// genEmitRepeat generates a standalone emitRepeat.
func (o options) genEmitRepeat() {
TEXT("emitRepeat", NOSPLIT, "func(dst []byte, offset, length int) int")
Doc("emitRepeat writes a repeat chunk and returns the number of bytes written.",
Expand Down Expand Up @@ -2088,7 +2088,7 @@ func (o options) emitRepeat(name string, length reg.GPVirtual, offset reg.GPVirt
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24

// genEmitCopy generates a standlone emitCopy
// genEmitCopy generates a standalone emitCopy
func (o options) genEmitCopy() {
TEXT("emitCopy", NOSPLIT, "func(dst []byte, offset, length int) int")
Doc("emitCopy writes a copy chunk and returns the number of bytes written.", "",
Expand Down Expand Up @@ -2118,7 +2118,7 @@ func (o options) genEmitCopy() {
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24

// genEmitCopy generates a standlone emitCopy
// genEmitCopy generates a standalone emitCopy
func (o options) genEmitCopyNoRepeat() {
TEXT("emitCopyNoRepeat", NOSPLIT, "func(dst []byte, offset, length int) int")
Doc("emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.", "",
Expand Down
2 changes: 1 addition & 1 deletion s2/cmd/internal/readahead/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ func (a *seekable) Seek(offset int64, whence int) (res int64, err error) {
}
//Seek the actual Seeker
if res, err = seeker.Seek(offset, whence); err == nil {
//If the seek was successful, reinitalize ourselves (with the new position).
//If the seek was successful, reinitialize ourselves (with the new position).
a.initBuffers(a.in, a.bufs, a.size)
}
return
Expand Down
2 changes: 1 addition & 1 deletion s2/cmd/s2c/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ Options:`)
fmt.Printf(" %d -> %d [%.02f%%]; %v, %.01fMB/s", len(compressed), len(decomp), pct, ms, mbpersec)
}
if !bytes.Equal(decomp, b) {
exitErr(fmt.Errorf("decompresed data mismatch"))
exitErr(fmt.Errorf("decompressed data mismatch"))
}
if !*quiet {
fmt.Print("... Verified ok.")
Expand Down
2 changes: 1 addition & 1 deletion s2/decode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func TestDecoderMaxBlockSize(t *testing.T) {
return
}
if enc.pad > 0 && buf.Len()%enc.pad != 0 {
t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad))
t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad))
return
}
encoded := buf.Bytes()
Expand Down
8 changes: 4 additions & 4 deletions s2/writer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func TestEncoderRegression(t *testing.T) {
}
comp := buf.Bytes()
if enc.pad > 0 && len(comp)%enc.pad != 0 {
t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, len(comp), len(comp)%enc.pad))
t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, len(comp), len(comp)%enc.pad))
return
}
var got []byte
Expand Down Expand Up @@ -203,7 +203,7 @@ func TestEncoderRegression(t *testing.T) {
return
}
if enc.pad > 0 && buf.Len()%enc.pad != 0 {
t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad))
t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad))
return
}
if !strings.Contains(name, "-snappy") {
Expand Down Expand Up @@ -433,7 +433,7 @@ func TestWriterPadding(t *testing.T) {
}

if dst.Len()%padding != 0 {
t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding)
t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding)
}
var got bytes.Buffer
d.Reset(&dst)
Expand All @@ -457,7 +457,7 @@ func TestWriterPadding(t *testing.T) {
t.Fatal(err)
}
if dst.Len()%padding != 0 {
t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding)
t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding)
}

got.Reset()
Expand Down
2 changes: 1 addition & 1 deletion zstd/_generate/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func (o options) generateBody(name string, executeSingleTriple func(ctx *execute
ADDQ(tmp, ec.histBasePtr) // Note: we always copy from &hist[len(hist) - v]
}

Comment("Calculate poiter to s.out[cap(s.out)] (a past-end pointer)")
Comment("Calculate pointer to s.out[cap(s.out)] (a past-end pointer)")
ADDQ(ec.outBase, ec.outEndPtr)

Comment("outBase += outPosition")
Expand Down
Loading

0 comments on commit 3868468

Please sign in to comment.