diff options
author | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
---|---|---|
committer | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
commit | a16e30d162c1c7408db7821e7b9513cefa09c6ca (patch) | |
tree | af752ba9ba44c547df39bb0af9bff79f610ba9d5 /src/compress | |
parent | 91e4d2d57bc341dd82c98247117114c851380aef (diff) | |
parent | cf6cfba4d5358404dd890f6025e573a4b2156543 (diff) | |
download | go-git-dev.link.tar.gz |
[dev.link] all: merge branch 'master' into dev.linkdev.link
Clean merge.
Change-Id: Ia7b2808bc649790198d34c226a61d9e569084dc5
Diffstat (limited to 'src/compress')
-rw-r--r-- | src/compress/bzip2/bzip2.go | 8 | ||||
-rw-r--r-- | src/compress/bzip2/bzip2_test.go | 6 | ||||
-rw-r--r-- | src/compress/flate/deflate_test.go | 81 | ||||
-rw-r--r-- | src/compress/flate/deflatefast.go | 11 | ||||
-rw-r--r-- | src/compress/flate/dict_decoder.go | 6 | ||||
-rw-r--r-- | src/compress/flate/flate_test.go | 5 | ||||
-rw-r--r-- | src/compress/flate/huffman_bit_writer.go | 4 | ||||
-rw-r--r-- | src/compress/flate/inflate_test.go | 3 | ||||
-rw-r--r-- | src/compress/flate/reader_test.go | 4 | ||||
-rw-r--r-- | src/compress/flate/writer_test.go | 9 | ||||
-rw-r--r-- | src/compress/gzip/gunzip_test.go | 9 | ||||
-rw-r--r-- | src/compress/gzip/gzip_test.go | 9 | ||||
-rw-r--r-- | src/compress/gzip/issue14937_test.go | 3 | ||||
-rw-r--r-- | src/compress/lzw/reader_test.go | 4 | ||||
-rw-r--r-- | src/compress/lzw/writer_test.go | 10 | ||||
-rw-r--r-- | src/compress/zlib/writer_test.go | 4 |
16 files changed, 116 insertions, 60 deletions
diff --git a/src/compress/bzip2/bzip2.go b/src/compress/bzip2/bzip2.go index c40129b982..0d8c286c16 100644 --- a/src/compress/bzip2/bzip2.go +++ b/src/compress/bzip2/bzip2.go @@ -29,8 +29,8 @@ type reader struct { setupDone bool // true if we have parsed the bzip2 header. blockSize int // blockSize in bytes, i.e. 900 * 1000. eof bool - c [256]uint // the `C' array for the inverse BWT. - tt []uint32 // mirrors the `tt' array in the bzip2 source and contains the P array in the upper 24 bits. + c [256]uint // the ``C'' array for the inverse BWT. + tt []uint32 // mirrors the ``tt'' array in the bzip2 source and contains the P array in the upper 24 bits. tPos uint32 // Index of the next output byte in tt. preRLE []uint32 // contains the RLE data still to be processed. @@ -447,11 +447,11 @@ func (bz2 *reader) readBlock() (err error) { // inverseBWT implements the inverse Burrows-Wheeler transform as described in // http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf, section 4.2. -// In that document, origPtr is called `I' and c is the `C' array after the +// In that document, origPtr is called ``I'' and c is the ``C'' array after the // first pass over the data. It's an argument here because we merge the first // pass with the Huffman decoding. // -// This also implements the `single array' method from the bzip2 source code +// This also implements the ``single array'' method from the bzip2 source code // which leaves the output, still shuffled, in the bottom 8 bits of tt with the // index of the next byte in the top 24-bits. The index of the first byte is // returned. diff --git a/src/compress/bzip2/bzip2_test.go b/src/compress/bzip2/bzip2_test.go index c432bb5226..98477791b3 100644 --- a/src/compress/bzip2/bzip2_test.go +++ b/src/compress/bzip2/bzip2_test.go @@ -133,7 +133,7 @@ func TestReader(t *testing.T) { for i, v := range vectors { rd := NewReader(bytes.NewReader(v.input)) - buf, err := ioutil.ReadAll(rd) + buf, err := io.ReadAll(rd) if fail := bool(err != nil); fail != v.fail { if fail { @@ -220,7 +220,7 @@ var ( func benchmarkDecode(b *testing.B, compressed []byte) { // Determine the uncompressed size of testfile. - uncompressedSize, err := io.Copy(ioutil.Discard, NewReader(bytes.NewReader(compressed))) + uncompressedSize, err := io.Copy(io.Discard, NewReader(bytes.NewReader(compressed))) if err != nil { b.Fatal(err) } @@ -231,7 +231,7 @@ func benchmarkDecode(b *testing.B, compressed []byte) { for i := 0; i < b.N; i++ { r := bytes.NewReader(compressed) - io.Copy(ioutil.Discard, NewReader(r)) + io.Copy(io.Discard, NewReader(r)) } } diff --git a/src/compress/flate/deflate_test.go b/src/compress/flate/deflate_test.go index 49a0345fd1..6fc5abf4d5 100644 --- a/src/compress/flate/deflate_test.go +++ b/src/compress/flate/deflate_test.go @@ -11,6 +11,7 @@ import ( "internal/testenv" "io" "io/ioutil" + "math/rand" "reflect" "runtime/debug" "sync" @@ -156,7 +157,7 @@ func TestVeryLongSparseChunk(t *testing.T) { if testing.Short() { t.Skip("skipping sparse chunk during short test") } - w, err := NewWriter(ioutil.Discard, 1) + w, err := NewWriter(io.Discard, 1) if err != nil { t.Errorf("NewWriter: %v", err) return @@ -293,7 +294,7 @@ func testSync(t *testing.T, level int, input []byte, name string) { // stream should work for ordinary reader too r = NewReader(buf1) - out, err = ioutil.ReadAll(r) + out, err = io.ReadAll(r) if err != nil { t.Errorf("testSync: read: %s", err) return @@ -321,7 +322,7 @@ func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name str t.Logf("level: %d, size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len()) } r := NewReader(&buffer) - out, err := ioutil.ReadAll(r) + out, err := io.ReadAll(r) if err != nil { t.Errorf("read: %s", err) return @@ -414,7 +415,7 @@ func TestReaderDict(t *testing.T) { w.Close() r := NewReaderDict(&b, []byte(dict)) - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { t.Fatal(err) } @@ -455,7 +456,7 @@ func TestRegression2508(t *testing.T) { t.Logf("test disabled with -short") return } - w, err := NewWriter(ioutil.Discard, 1) + w, err := NewWriter(io.Discard, 1) if err != nil { t.Fatalf("NewWriter: %v", err) } @@ -474,7 +475,7 @@ func TestWriterReset(t *testing.T) { if testing.Short() && level > 1 { break } - w, err := NewWriter(ioutil.Discard, level) + w, err := NewWriter(io.Discard, level) if err != nil { t.Fatalf("NewWriter: %v", err) } @@ -486,9 +487,9 @@ func TestWriterReset(t *testing.T) { for i := 0; i < n; i++ { w.Write(buf) } - w.Reset(ioutil.Discard) + w.Reset(io.Discard) - wref, err := NewWriter(ioutil.Discard, level) + wref, err := NewWriter(io.Discard, level) if err != nil { t.Fatalf("NewWriter: %v", err) } @@ -653,7 +654,7 @@ func TestBestSpeed(t *testing.T) { } r := NewReader(buf) - got, err := ioutil.ReadAll(r) + got, err := io.ReadAll(r) if err != nil { t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err) continue @@ -880,7 +881,7 @@ func TestBestSpeedMaxMatchOffset(t *testing.T) { } r := NewReader(buf) - dst, err := ioutil.ReadAll(r) + dst, err := io.ReadAll(r) r.Close() if err != nil { report("ReadAll: ", err) @@ -896,6 +897,62 @@ func TestBestSpeedMaxMatchOffset(t *testing.T) { } } +func TestBestSpeedShiftOffsets(t *testing.T) { + // Test if shiftoffsets properly preserves matches and resets out-of-range matches + // seen in https://github.com/golang/go/issues/4142 + enc := newDeflateFast() + + // testData may not generate internal matches. + testData := make([]byte, 32) + rng := rand.New(rand.NewSource(0)) + for i := range testData { + testData[i] = byte(rng.Uint32()) + } + + // Encode the testdata with clean state. + // Second part should pick up matches from the first block. + wantFirstTokens := len(enc.encode(nil, testData)) + wantSecondTokens := len(enc.encode(nil, testData)) + + if wantFirstTokens <= wantSecondTokens { + t.Fatalf("test needs matches between inputs to be generated") + } + // Forward the current indicator to before wraparound. + enc.cur = bufferReset - int32(len(testData)) + + // Part 1 before wrap, should match clean state. + got := len(enc.encode(nil, testData)) + if wantFirstTokens != got { + t.Errorf("got %d, want %d tokens", got, wantFirstTokens) + } + + // Verify we are about to wrap. + if enc.cur != bufferReset { + t.Errorf("got %d, want e.cur to be at bufferReset (%d)", enc.cur, bufferReset) + } + + // Part 2 should match clean state as well even if wrapped. + got = len(enc.encode(nil, testData)) + if wantSecondTokens != got { + t.Errorf("got %d, want %d token", got, wantSecondTokens) + } + + // Verify that we wrapped. + if enc.cur >= bufferReset { + t.Errorf("want e.cur to be < bufferReset (%d), got %d", bufferReset, enc.cur) + } + + // Forward the current buffer, leaving the matches at the bottom. + enc.cur = bufferReset + enc.shiftOffsets() + + // Ensure that no matches were picked up. + got = len(enc.encode(nil, testData)) + if wantFirstTokens != got { + t.Errorf("got %d, want %d tokens", got, wantFirstTokens) + } +} + func TestMaxStackSize(t *testing.T) { // This test must not run in parallel with other tests as debug.SetMaxStack // affects all goroutines. @@ -911,7 +968,7 @@ func TestMaxStackSize(t *testing.T) { wg.Add(1) go func(level int) { defer wg.Done() - zw, err := NewWriter(ioutil.Discard, level) + zw, err := NewWriter(io.Discard, level) if err != nil { t.Errorf("level %d, NewWriter() = %v, want nil", level, err) } @@ -921,7 +978,7 @@ func TestMaxStackSize(t *testing.T) { if err := zw.Close(); err != nil { t.Errorf("level %d, Close() = %v, want nil", level, err) } - zw.Reset(ioutil.Discard) + zw.Reset(io.Discard) }(level) } } diff --git a/src/compress/flate/deflatefast.go b/src/compress/flate/deflatefast.go index 24f8be9d5d..6aa439f13d 100644 --- a/src/compress/flate/deflatefast.go +++ b/src/compress/flate/deflatefast.go @@ -270,6 +270,7 @@ func (e *deflateFast) matchLen(s, t int32, src []byte) int32 { func (e *deflateFast) reset() { e.prev = e.prev[:0] // Bump the offset, so all matches will fail distance check. + // Nothing should be >= e.cur in the table. e.cur += maxMatchOffset // Protect against e.cur wraparound. @@ -288,17 +289,21 @@ func (e *deflateFast) shiftOffsets() { for i := range e.table[:] { e.table[i] = tableEntry{} } - e.cur = maxMatchOffset + e.cur = maxMatchOffset + 1 return } // Shift down everything in the table that isn't already too far away. for i := range e.table[:] { - v := e.table[i].offset - e.cur + maxMatchOffset + v := e.table[i].offset - e.cur + maxMatchOffset + 1 if v < 0 { + // We want to reset e.cur to maxMatchOffset + 1, so we need to shift + // all table entries down by (e.cur - (maxMatchOffset + 1)). + // Because we ignore matches > maxMatchOffset, we can cap + // any negative offsets at 0. v = 0 } e.table[i].offset = v } - e.cur = maxMatchOffset + e.cur = maxMatchOffset + 1 } diff --git a/src/compress/flate/dict_decoder.go b/src/compress/flate/dict_decoder.go index 71c75a065e..3b59d48351 100644 --- a/src/compress/flate/dict_decoder.go +++ b/src/compress/flate/dict_decoder.go @@ -160,10 +160,8 @@ func (dd *dictDecoder) tryWriteCopy(dist, length int) int { srcPos := dstPos - dist // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) } dd.wrPos = dstPos diff --git a/src/compress/flate/flate_test.go b/src/compress/flate/flate_test.go index 1e45077bd5..23f4c47b03 100644 --- a/src/compress/flate/flate_test.go +++ b/src/compress/flate/flate_test.go @@ -12,7 +12,6 @@ import ( "bytes" "encoding/hex" "io" - "io/ioutil" "strings" "testing" ) @@ -243,7 +242,7 @@ func TestStreams(t *testing.T) { if err != nil { t.Fatal(err) } - data, err = ioutil.ReadAll(NewReader(bytes.NewReader(data))) + data, err = io.ReadAll(NewReader(bytes.NewReader(data))) if tc.want == "fail" { if err == nil { t.Errorf("#%d (%s): got nil error, want non-nil", i, tc.desc) @@ -266,7 +265,7 @@ func TestTruncatedStreams(t *testing.T) { for i := 0; i < len(data)-1; i++ { r := NewReader(strings.NewReader(data[:i])) - _, err := io.Copy(ioutil.Discard, r) + _, err := io.Copy(io.Discard, r) if err != io.ErrUnexpectedEOF { t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) } diff --git a/src/compress/flate/huffman_bit_writer.go b/src/compress/flate/huffman_bit_writer.go index f111f9f592..b3ae76d082 100644 --- a/src/compress/flate/huffman_bit_writer.go +++ b/src/compress/flate/huffman_bit_writer.go @@ -75,7 +75,8 @@ type huffmanBitWriter struct { writer io.Writer // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. + // and then the low nbits of bits. Data is always written + // sequentially into the bytes array. bits uint64 nbits uint bytes [bufferSize]byte @@ -105,7 +106,6 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.bytes = [bufferSize]byte{} } func (w *huffmanBitWriter) flush() { diff --git a/src/compress/flate/inflate_test.go b/src/compress/flate/inflate_test.go index 951decd775..9575be1cf2 100644 --- a/src/compress/flate/inflate_test.go +++ b/src/compress/flate/inflate_test.go @@ -7,7 +7,6 @@ package flate import ( "bytes" "io" - "io/ioutil" "strings" "testing" ) @@ -57,7 +56,7 @@ func TestReaderTruncated(t *testing.T) { for i, v := range vectors { r := strings.NewReader(v.input) zr := NewReader(r) - b, err := ioutil.ReadAll(zr) + b, err := io.ReadAll(zr) if err != io.ErrUnexpectedEOF { t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err) } diff --git a/src/compress/flate/reader_test.go b/src/compress/flate/reader_test.go index 9d2943a540..eb32c89184 100644 --- a/src/compress/flate/reader_test.go +++ b/src/compress/flate/reader_test.go @@ -16,7 +16,7 @@ import ( func TestNlitOutOfRange(t *testing.T) { // Trying to decode this bogus flate data, which has a Huffman table // with nlit=288, should not panic. - io.Copy(ioutil.Discard, NewReader(strings.NewReader( + io.Copy(io.Discard, NewReader(strings.NewReader( "\xfc\xfe\x36\xe7\x5e\x1c\xef\xb3\x55\x58\x77\xb6\x56\xb5\x43\xf4"+ "\x6f\xf2\xd2\xe6\x3d\x99\xa0\x85\x8c\x48\xeb\xf8\xda\x83\x04\x2a"+ "\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c"))) @@ -54,7 +54,7 @@ func BenchmarkDecode(b *testing.B) { runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { - io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1))) + io.Copy(io.Discard, NewReader(bytes.NewReader(buf1))) } }) } diff --git a/src/compress/flate/writer_test.go b/src/compress/flate/writer_test.go index 881cb71cc3..c413735cd2 100644 --- a/src/compress/flate/writer_test.go +++ b/src/compress/flate/writer_test.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "math/rand" "runtime" "testing" @@ -27,14 +26,14 @@ func BenchmarkEncode(b *testing.B) { copy(buf1[i:], buf0) } buf0 = nil - w, err := NewWriter(ioutil.Discard, level) + w, err := NewWriter(io.Discard, level) if err != nil { b.Fatal(err) } runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { - w.Reset(ioutil.Discard) + w.Reset(io.Discard) w.Write(buf1) w.Close() } @@ -96,7 +95,7 @@ func TestWriteError(t *testing.T) { t.Fatal("Level", l, "Expected an error on close") } - w.Reset(ioutil.Discard) + w.Reset(io.Discard) n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) if err != nil { t.Fatal("Level", l, "Got unexpected error after reset:", err) @@ -206,7 +205,7 @@ func TestDeflateFast_Reset(t *testing.T) { w.Close() for ; offset <= 256; offset *= 2 { - w, err := NewWriter(ioutil.Discard, level) + w, err := NewWriter(io.Discard, level) if err != nil { t.Fatalf("NewWriter: level %d: %v", level, err) } diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go index 1b01404169..17c23e8a9b 100644 --- a/src/compress/gzip/gunzip_test.go +++ b/src/compress/gzip/gunzip_test.go @@ -9,7 +9,6 @@ import ( "compress/flate" "encoding/base64" "io" - "io/ioutil" "os" "strings" "testing" @@ -430,7 +429,7 @@ func TestIssue6550(t *testing.T) { defer gzip.Close() done := make(chan bool, 1) go func() { - _, err := io.Copy(ioutil.Discard, gzip) + _, err := io.Copy(io.Discard, gzip) if err == nil { t.Errorf("Copy succeeded") } else { @@ -467,7 +466,7 @@ Found: const hello = "hello world\n" r.Multistream(false) - data, err := ioutil.ReadAll(&r) + data, err := io.ReadAll(&r) if string(data) != hello || err != nil { t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) } @@ -476,7 +475,7 @@ Found: t.Fatalf("second reset: %v", err) } r.Multistream(false) - data, err = ioutil.ReadAll(&r) + data, err = io.ReadAll(&r) if string(data) != hello || err != nil { t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) } @@ -507,7 +506,7 @@ func TestTruncatedStreams(t *testing.T) { } continue } - _, err = io.Copy(ioutil.Discard, r) + _, err = io.Copy(io.Discard, r) if ferr, ok := err.(*flate.ReadError); ok { err = ferr.Err } diff --git a/src/compress/gzip/gzip_test.go b/src/compress/gzip/gzip_test.go index f18c5cb454..12c8e18207 100644 --- a/src/compress/gzip/gzip_test.go +++ b/src/compress/gzip/gzip_test.go @@ -8,7 +8,6 @@ import ( "bufio" "bytes" "io" - "io/ioutil" "reflect" "testing" "time" @@ -29,7 +28,7 @@ func TestEmpty(t *testing.T) { if want := (Header{OS: 255}); !reflect.DeepEqual(r.Header, want) { t.Errorf("Header mismatch:\ngot %#v\nwant %#v", r.Header, want) } - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { t.Fatalf("ReadAll: %v", err) } @@ -62,7 +61,7 @@ func TestRoundTrip(t *testing.T) { if err != nil { t.Fatalf("NewReader: %v", err) } - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { t.Fatalf("ReadAll: %v", err) } @@ -147,7 +146,7 @@ func TestLatin1RoundTrip(t *testing.T) { t.Errorf("NewReader: %v", err) continue } - _, err = ioutil.ReadAll(r) + _, err = io.ReadAll(r) if err != nil { t.Errorf("ReadAll: %v", err) continue @@ -217,7 +216,7 @@ func TestConcat(t *testing.T) { if err != nil { t.Fatal(err) } - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if string(data) != "hello world\n" || err != nil { t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world") } diff --git a/src/compress/gzip/issue14937_test.go b/src/compress/gzip/issue14937_test.go index 7a19672d57..24db3641aa 100644 --- a/src/compress/gzip/issue14937_test.go +++ b/src/compress/gzip/issue14937_test.go @@ -2,6 +2,7 @@ package gzip import ( "internal/testenv" + "io/fs" "os" "path/filepath" "runtime" @@ -30,7 +31,7 @@ func TestGZIPFilesHaveZeroMTimes(t *testing.T) { t.Fatal("error evaluating GOROOT: ", err) } var files []string - err = filepath.Walk(goroot, func(path string, info os.FileInfo, err error) error { + err = filepath.Walk(goroot, func(path string, info fs.FileInfo, err error) error { if err != nil { return err } diff --git a/src/compress/lzw/reader_test.go b/src/compress/lzw/reader_test.go index 98bbfbb763..6d91dd806f 100644 --- a/src/compress/lzw/reader_test.go +++ b/src/compress/lzw/reader_test.go @@ -206,7 +206,7 @@ func TestNoLongerSavingPriorExpansions(t *testing.T) { in = append(in, 0x80, 0xff, 0x0f, 0x08) r := NewReader(bytes.NewReader(in), LSB, 8) - nDecoded, err := io.Copy(ioutil.Discard, r) + nDecoded, err := io.Copy(io.Discard, r) if err != nil { t.Fatalf("Copy: %v", err) } @@ -246,7 +246,7 @@ func BenchmarkDecoder(b *testing.B) { runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { - io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1), LSB, 8)) + io.Copy(io.Discard, NewReader(bytes.NewReader(buf1), LSB, 8)) } }) } diff --git a/src/compress/lzw/writer_test.go b/src/compress/lzw/writer_test.go index 4979f8b352..33a28bdd3a 100644 --- a/src/compress/lzw/writer_test.go +++ b/src/compress/lzw/writer_test.go @@ -67,8 +67,8 @@ func testFile(t *testing.T, fn string, order Order, litWidth int) { defer lzwr.Close() // Compare the two. - b0, err0 := ioutil.ReadAll(golden) - b1, err1 := ioutil.ReadAll(lzwr) + b0, err0 := io.ReadAll(golden) + b1, err1 := io.ReadAll(lzwr) if err0 != nil { t.Errorf("%s (order=%d litWidth=%d): %v", fn, order, litWidth, err0) return @@ -107,7 +107,7 @@ func TestWriter(t *testing.T) { } func TestWriterReturnValues(t *testing.T) { - w := NewWriter(ioutil.Discard, LSB, 8) + w := NewWriter(io.Discard, LSB, 8) n, err := w.Write([]byte("asdf")) if n != 4 || err != nil { t.Errorf("got %d, %v, want 4, nil", n, err) @@ -115,7 +115,7 @@ func TestWriterReturnValues(t *testing.T) { } func TestSmallLitWidth(t *testing.T) { - w := NewWriter(ioutil.Discard, LSB, 2) + w := NewWriter(io.Discard, LSB, 2) if _, err := w.Write([]byte{0x03}); err != nil { t.Fatalf("write a byte < 1<<2: %v", err) } @@ -148,7 +148,7 @@ func BenchmarkEncoder(b *testing.B) { b.Run(fmt.Sprint("1e", e), func(b *testing.B) { b.SetBytes(int64(n)) for i := 0; i < b.N; i++ { - w := NewWriter(ioutil.Discard, LSB, 8) + w := NewWriter(io.Discard, LSB, 8) w.Write(buf1) w.Close() } diff --git a/src/compress/zlib/writer_test.go b/src/compress/zlib/writer_test.go index d501974078..c518729146 100644 --- a/src/compress/zlib/writer_test.go +++ b/src/compress/zlib/writer_test.go @@ -34,7 +34,7 @@ func testFileLevelDict(t *testing.T, fn string, level int, d string) { return } defer golden.Close() - b0, err0 := ioutil.ReadAll(golden) + b0, err0 := io.ReadAll(golden) if err0 != nil { t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err0) return @@ -74,7 +74,7 @@ func testLevelDict(t *testing.T, fn string, b0 []byte, level int, d string) { defer zlibr.Close() // Compare the decompressed data. - b1, err1 := ioutil.ReadAll(zlibr) + b1, err1 := io.ReadAll(zlibr) if err1 != nil { t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err1) return |