replace zxq.co/ripple/hanayo

This commit is contained in:
Alicia
2019-02-23 13:29:15 +00:00
commit c3d206c173
5871 changed files with 1353715 additions and 0 deletions

View File

@@ -0,0 +1,128 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip_test
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"log"
"os"
"time"
)
func Example_writerReader() {
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
// Setting the Header fields is optional.
zw.Name = "a-new-hope.txt"
zw.Comment = "an epic space opera by George Lucas"
zw.ModTime = time.Date(1977, time.May, 25, 0, 0, 0, 0, time.UTC)
_, err := zw.Write([]byte("A long time ago in a galaxy far, far away..."))
if err != nil {
log.Fatal(err)
}
if err := zw.Close(); err != nil {
log.Fatal(err)
}
zr, err := gzip.NewReader(&buf)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC())
if _, err := io.Copy(os.Stdout, zr); err != nil {
log.Fatal(err)
}
if err := zr.Close(); err != nil {
log.Fatal(err)
}
// Output:
// Name: a-new-hope.txt
// Comment: an epic space opera by George Lucas
// ModTime: 1977-05-25 00:00:00 +0000 UTC
//
// A long time ago in a galaxy far, far away...
}
func ExampleReader_Multistream() {
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
var files = []struct {
name string
comment string
modTime time.Time
data string
}{
{"file-1.txt", "file-header-1", time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC), "Hello Gophers - 1"},
{"file-2.txt", "file-header-2", time.Date(2007, time.March, 2, 4, 5, 6, 1, time.UTC), "Hello Gophers - 2"},
}
for _, file := range files {
zw.Name = file.name
zw.Comment = file.comment
zw.ModTime = file.modTime
if _, err := zw.Write([]byte(file.data)); err != nil {
log.Fatal(err)
}
if err := zw.Close(); err != nil {
log.Fatal(err)
}
zw.Reset(&buf)
}
zr, err := gzip.NewReader(&buf)
if err != nil {
log.Fatal(err)
}
for {
zr.Multistream(false)
fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC())
if _, err := io.Copy(os.Stdout, zr); err != nil {
log.Fatal(err)
}
fmt.Println("\n")
err = zr.Reset(&buf)
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
}
if err := zr.Close(); err != nil {
log.Fatal(err)
}
// Output:
// Name: file-1.txt
// Comment: file-header-1
// ModTime: 2006-02-01 03:04:05 +0000 UTC
//
// Hello Gophers - 1
//
// Name: file-2.txt
// Comment: file-header-2
// ModTime: 2007-03-02 04:05:06 +0000 UTC
//
// Hello Gophers - 2
}

344
vendor/github.com/klauspost/compress/gzip/gunzip.go generated vendored Normal file
View File

@@ -0,0 +1,344 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gzip implements reading and writing of gzip format compressed files,
// as specified in RFC 1952.
package gzip
import (
"bufio"
"encoding/binary"
"errors"
"io"
"time"
"github.com/klauspost/compress/flate"
"github.com/klauspost/crc32"
)
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 8
flagText = 1 << 0
flagHdrCrc = 1 << 1
flagExtra = 1 << 2
flagName = 1 << 3
flagComment = 1 << 4
)
var (
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
ErrChecksum = errors.New("gzip: invalid checksum")
// ErrHeader is returned when reading GZIP data that has an invalid header.
ErrHeader = errors.New("gzip: invalid header")
)
var le = binary.LittleEndian
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
func noEOF(err error) error {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
// The gzip file stores a header giving metadata about the compressed file.
// That header is exposed as the fields of the Writer and Reader structs.
//
// Strings must be UTF-8 encoded and may only contain Unicode code points
// U+0001 through U+00FF, due to limitations of the GZIP file format.
type Header struct {
Comment string // comment
Extra []byte // "extra data"
ModTime time.Time // modification time
Name string // file name
OS byte // operating system type
}
// A Reader is an io.Reader that can be read to retrieve
// uncompressed data from a gzip-format compressed file.
//
// In general, a gzip file can be a concatenation of gzip files,
// each with its own header. Reads from the Reader
// return the concatenation of the uncompressed data of each.
// Only the first header is recorded in the Reader fields.
//
// Gzip files store a length and checksum of the uncompressed data.
// The Reader will return a ErrChecksum when Read
// reaches the end of the uncompressed data if it does not
// have the expected length or checksum. Clients should treat data
// returned by Read as tentative until they receive the io.EOF
// marking the end of the data.
type Reader struct {
Header // valid after NewReader or Reader.Reset
r flate.Reader
decompressor io.ReadCloser
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
buf [512]byte
err error
multistream bool
}
// NewReader creates a new Reader reading the given reader.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
//
// It is the caller's responsibility to call Close on the Reader when done.
//
// The Reader.Header fields will be valid in the Reader returned.
func NewReader(r io.Reader) (*Reader, error) {
z := new(Reader)
if err := z.Reset(r); err != nil {
return nil, err
}
return z, nil
}
// Reset discards the Reader z's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) error {
*z = Reader{
decompressor: z.decompressor,
multistream: true,
}
if rr, ok := r.(flate.Reader); ok {
z.r = rr
} else {
z.r = bufio.NewReader(r)
}
z.Header, z.err = z.readHeader()
return z.err
}
// Multistream controls whether the reader supports multistream files.
//
// If enabled (the default), the Reader expects the input to be a sequence
// of individually gzipped data streams, each with its own header and
// trailer, ending at EOF. The effect is that the concatenation of a sequence
// of gzipped files is treated as equivalent to the gzip of the concatenation
// of the sequence. This is standard behavior for gzip readers.
//
// Calling Multistream(false) disables this behavior; disabling the behavior
// can be useful when reading file formats that distinguish individual gzip
// data streams or mix gzip data streams with other data streams.
// In this mode, when the Reader reaches the end of the data stream,
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
// it will be left positioned just after the gzip stream.
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
// If there is no next stream, z.Reset(r) will return io.EOF.
func (z *Reader) Multistream(ok bool) {
z.multistream = ok
}
// readString reads a NUL-terminated string from z.r.
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
// will output a string encoded using UTF-8.
// This method always updates z.digest with the data read.
func (z *Reader) readString() (string, error) {
var err error
needConv := false
for i := 0; ; i++ {
if i >= len(z.buf) {
return "", ErrHeader
}
z.buf[i], err = z.r.ReadByte()
if err != nil {
return "", err
}
if z.buf[i] > 0x7f {
needConv = true
}
if z.buf[i] == 0 {
// Digest covers the NUL terminator.
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
if needConv {
s := make([]rune, 0, i)
for _, v := range z.buf[:i] {
s = append(s, rune(v))
}
return string(s), nil
}
return string(z.buf[:i]), nil
}
}
}
// readHeader reads the GZIP header according to section 2.3.1.
// This method does not set z.err.
func (z *Reader) readHeader() (hdr Header, err error) {
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
// RFC 1952, section 2.2, says the following:
// A gzip file consists of a series of "members" (compressed data sets).
//
// Other than this, the specification does not clarify whether a
// "series" is defined as "one or more" or "zero or more". To err on the
// side of caution, Go interprets this to mean "zero or more".
// Thus, it is okay to return io.EOF here.
return hdr, err
}
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
return hdr, ErrHeader
}
flg := z.buf[3]
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
// z.buf[8] is XFL and is currently ignored.
hdr.OS = z.buf[9]
z.digest = crc32.ChecksumIEEE(z.buf[:10])
if flg&flagExtra != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
data := make([]byte, le.Uint16(z.buf[:2]))
if _, err = io.ReadFull(z.r, data); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
hdr.Extra = data
}
var s string
if flg&flagName != 0 {
if s, err = z.readString(); err != nil {
return hdr, err
}
hdr.Name = s
}
if flg&flagComment != 0 {
if s, err = z.readString(); err != nil {
return hdr, err
}
hdr.Comment = s
}
if flg&flagHdrCrc != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
digest := le.Uint16(z.buf[:2])
if digest != uint16(z.digest) {
return hdr, ErrHeader
}
}
z.digest = 0
if z.decompressor == nil {
z.decompressor = flate.NewReader(z.r)
} else {
z.decompressor.(flate.Resetter).Reset(z.r, nil)
}
return hdr, nil
}
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
func (z *Reader) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
n, z.err = z.decompressor.Read(p)
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
z.size += uint32(n)
if z.err != io.EOF {
// In the normal case we return here.
return n, z.err
}
// Finished file; check checksum and size.
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
z.err = noEOF(err)
return n, z.err
}
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return n, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return n, io.EOF
}
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
return n, z.err
}
// Read from next file, if necessary.
if n > 0 {
return n, nil
}
return z.Read(p)
}
// Support the io.WriteTo interface for io.Copy and friends.
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
crcWriter := crc32.NewIEEE()
for {
if z.err != nil {
if z.err == io.EOF {
return total, nil
}
return total, z.err
}
// We write both to output and digest.
mw := io.MultiWriter(w, crcWriter)
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
total += n
z.size += uint32(n)
if err != nil {
z.err = err
return total, z.err
}
// Finished file; check checksum + size.
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
z.err = err
return total, err
}
z.digest = crcWriter.Sum32()
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return total, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return total, nil
}
crcWriter.Reset()
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
if z.err == io.EOF {
return total, nil
}
return total, z.err
}
}
}
// Close closes the Reader. It does not close the underlying io.Reader.
// In order for the GZIP checksum to be verified, the reader must be
// fully consumed until the io.EOF.
func (z *Reader) Close() error { return z.decompressor.Close() }

View File

@@ -0,0 +1,682 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip
import (
"bytes"
oldgz "compress/gzip"
"crypto/rand"
"io"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"github.com/klauspost/compress/flate"
)
type gunzipTest struct {
name string
desc string
raw string
gzip []byte
err error
}
var gunzipTests = []gunzipTest{
{ // has 1 empty fixed-huffman block
"empty.txt",
"empty.txt",
"",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xf7, 0x5e, 0x14, 0x4a,
0x00, 0x03, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
0x74, 0x78, 0x74, 0x00, 0x03, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
nil,
},
{
"",
"empty - with no file name",
"",
[]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88,
0x00, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
nil,
},
{ // has 1 non-empty fixed huffman block
"hello.txt",
"hello.txt",
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00,
},
nil,
},
{ // concatenation
"hello.txt",
"hello.txt x2",
"hello world\n" +
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00,
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00,
},
nil,
},
{ // has a fixed huffman block with some length-distance pairs
"shesells.txt",
"shesells.txt",
"she sells seashells by the seashore\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0x72, 0x66, 0x8b, 0x4a,
0x00, 0x03, 0x73, 0x68, 0x65, 0x73, 0x65, 0x6c,
0x6c, 0x73, 0x2e, 0x74, 0x78, 0x74, 0x00, 0x2b,
0xce, 0x48, 0x55, 0x28, 0x4e, 0xcd, 0xc9, 0x29,
0x06, 0x92, 0x89, 0xc5, 0x19, 0x60, 0x56, 0x52,
0xa5, 0x42, 0x09, 0x58, 0x18, 0x28, 0x90, 0x5f,
0x94, 0xca, 0x05, 0x00, 0x76, 0xb0, 0x3b, 0xeb,
0x24, 0x00, 0x00, 0x00,
},
nil,
},
{ // has dynamic huffman blocks
"gettysburg",
"gettysburg",
" Four score and seven years ago our fathers brought forth on\n" +
"this continent, a new nation, conceived in Liberty, and dedicated\n" +
"to the proposition that all men are created equal.\n" +
" Now we are engaged in a great Civil War, testing whether that\n" +
"nation, or any nation so conceived and so dedicated, can long\n" +
"endure.\n" +
" We are met on a great battle-field of that war.\n" +
" We have come to dedicate a portion of that field, as a final\n" +
"resting place for those who here gave their lives that that\n" +
"nation might live. It is altogether fitting and proper that\n" +
"we should do this.\n" +
" But, in a larger sense, we can not dedicate — we can not\n" +
"consecrate — we can not hallow — this ground.\n" +
" The brave men, living and dead, who struggled here, have\n" +
"consecrated it, far above our poor power to add or detract.\n" +
"The world will little note, nor long remember what we say here,\n" +
"but it can never forget what they did here.\n" +
" It is for us the living, rather, to be dedicated here to the\n" +
"unfinished work which they who fought here have thus far so\n" +
"nobly advanced. It is rather for us to be here dedicated to\n" +
"the great task remaining before us — that from these honored\n" +
"dead we take increased devotion to that cause for which they\n" +
"gave the last full measure of devotion —\n" +
" that we here highly resolve that these dead shall not have\n" +
"died in vain — that this nation, under God, shall have a new\n" +
"birth of freedom — and that government of the people, by the\n" +
"people, for the people, shall not perish from this earth.\n" +
"\n" +
"Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xd1, 0x12, 0x2b, 0x4a,
0x00, 0x03, 0x67, 0x65, 0x74, 0x74, 0x79, 0x73,
0x62, 0x75, 0x72, 0x67, 0x00, 0x65, 0x54, 0xcd,
0x6e, 0xd4, 0x30, 0x10, 0xbe, 0xfb, 0x29, 0xe6,
0x01, 0x42, 0xa5, 0x0a, 0x09, 0xc1, 0x11, 0x90,
0x40, 0x48, 0xa8, 0xe2, 0x80, 0xd4, 0xf3, 0x24,
0x9e, 0x24, 0x56, 0xbd, 0x9e, 0xc5, 0x76, 0x76,
0x95, 0x1b, 0x0f, 0xc1, 0x13, 0xf2, 0x24, 0x7c,
0x63, 0x77, 0x9b, 0x4a, 0x5c, 0xaa, 0x6e, 0x6c,
0xcf, 0x7c, 0x7f, 0x33, 0x44, 0x5f, 0x74, 0xcb,
0x54, 0x26, 0xcd, 0x42, 0x9c, 0x3c, 0x15, 0xb9,
0x48, 0xa2, 0x5d, 0x38, 0x17, 0xe2, 0x45, 0xc9,
0x4e, 0x67, 0xae, 0xab, 0xe0, 0xf7, 0x98, 0x75,
0x5b, 0xd6, 0x4a, 0xb3, 0xe6, 0xba, 0x92, 0x26,
0x57, 0xd7, 0x50, 0x68, 0xd2, 0x54, 0x43, 0x92,
0x54, 0x07, 0x62, 0x4a, 0x72, 0xa5, 0xc4, 0x35,
0x68, 0x1a, 0xec, 0x60, 0x92, 0x70, 0x11, 0x4f,
0x21, 0xd1, 0xf7, 0x30, 0x4a, 0xae, 0xfb, 0xd0,
0x9a, 0x78, 0xf1, 0x61, 0xe2, 0x2a, 0xde, 0x55,
0x25, 0xd4, 0xa6, 0x73, 0xd6, 0xb3, 0x96, 0x60,
0xef, 0xf0, 0x9b, 0x2b, 0x71, 0x8c, 0x74, 0x02,
0x10, 0x06, 0xac, 0x29, 0x8b, 0xdd, 0x25, 0xf9,
0xb5, 0x71, 0xbc, 0x73, 0x44, 0x0f, 0x7a, 0xa5,
0xab, 0xb4, 0x33, 0x49, 0x0b, 0x2f, 0xbd, 0x03,
0xd3, 0x62, 0x17, 0xe9, 0x73, 0xb8, 0x84, 0x48,
0x8f, 0x9c, 0x07, 0xaa, 0x52, 0x00, 0x6d, 0xa1,
0xeb, 0x2a, 0xc6, 0xa0, 0x95, 0x76, 0x37, 0x78,
0x9a, 0x81, 0x65, 0x7f, 0x46, 0x4b, 0x45, 0x5f,
0xe1, 0x6d, 0x42, 0xe8, 0x01, 0x13, 0x5c, 0x38,
0x51, 0xd4, 0xb4, 0x38, 0x49, 0x7e, 0xcb, 0x62,
0x28, 0x1e, 0x3b, 0x82, 0x93, 0x54, 0x48, 0xf1,
0xd2, 0x7d, 0xe4, 0x5a, 0xa3, 0xbc, 0x99, 0x83,
0x44, 0x4f, 0x3a, 0x77, 0x36, 0x57, 0xce, 0xcf,
0x2f, 0x56, 0xbe, 0x80, 0x90, 0x9e, 0x84, 0xea,
0x51, 0x1f, 0x8f, 0xcf, 0x90, 0xd4, 0x60, 0xdc,
0x5e, 0xb4, 0xf7, 0x10, 0x0b, 0x26, 0xe0, 0xff,
0xc4, 0xd1, 0xe5, 0x67, 0x2e, 0xe7, 0xc8, 0x93,
0x98, 0x05, 0xb8, 0xa8, 0x45, 0xc0, 0x4d, 0x09,
0xdc, 0x84, 0x16, 0x2b, 0x0d, 0x9a, 0x21, 0x53,
0x04, 0x8b, 0xd2, 0x0b, 0xbd, 0xa2, 0x4c, 0xa7,
0x60, 0xee, 0xd9, 0xe1, 0x1d, 0xd1, 0xb7, 0x4a,
0x30, 0x8f, 0x63, 0xd5, 0xa5, 0x8b, 0x33, 0x87,
0xda, 0x1a, 0x18, 0x79, 0xf3, 0xe3, 0xa6, 0x17,
0x94, 0x2e, 0xab, 0x6e, 0xa0, 0xe3, 0xcd, 0xac,
0x50, 0x8c, 0xca, 0xa7, 0x0d, 0x76, 0x37, 0xd1,
0x23, 0xe7, 0x05, 0x57, 0x8b, 0xa4, 0x22, 0x83,
0xd9, 0x62, 0x52, 0x25, 0xad, 0x07, 0xbb, 0xbf,
0xbf, 0xff, 0xbc, 0xfa, 0xee, 0x20, 0x73, 0x91,
0x29, 0xff, 0x7f, 0x02, 0x71, 0x62, 0x84, 0xb5,
0xf6, 0xb5, 0x25, 0x6b, 0x41, 0xde, 0x92, 0xb7,
0x76, 0x3f, 0x91, 0x91, 0x31, 0x1b, 0x41, 0x84,
0x62, 0x30, 0x0a, 0x37, 0xa4, 0x5e, 0x18, 0x3a,
0x99, 0x08, 0xa5, 0xe6, 0x6d, 0x59, 0x22, 0xec,
0x33, 0x39, 0x86, 0x26, 0xf5, 0xab, 0x66, 0xc8,
0x08, 0x20, 0xcf, 0x0c, 0xd7, 0x47, 0x45, 0x21,
0x0b, 0xf6, 0x59, 0xd5, 0xfe, 0x5c, 0x8d, 0xaa,
0x12, 0x7b, 0x6f, 0xa1, 0xf0, 0x52, 0x33, 0x4f,
0xf5, 0xce, 0x59, 0xd3, 0xab, 0x66, 0x10, 0xbf,
0x06, 0xc4, 0x31, 0x06, 0x73, 0xd6, 0x80, 0xa2,
0x78, 0xc2, 0x45, 0xcb, 0x03, 0x65, 0x39, 0xc9,
0x09, 0xd1, 0x06, 0x04, 0x33, 0x1a, 0x5a, 0xf1,
0xde, 0x01, 0xb8, 0x71, 0x83, 0xc4, 0xb5, 0xb3,
0xc3, 0x54, 0x65, 0x33, 0x0d, 0x5a, 0xf7, 0x9b,
0x90, 0x7c, 0x27, 0x1f, 0x3a, 0x58, 0xa3, 0xd8,
0xfd, 0x30, 0x5f, 0xb7, 0xd2, 0x66, 0xa2, 0x93,
0x1c, 0x28, 0xb7, 0xe9, 0x1b, 0x0c, 0xe1, 0x28,
0x47, 0x26, 0xbb, 0xe9, 0x7d, 0x7e, 0xdc, 0x96,
0x10, 0x92, 0x50, 0x56, 0x7c, 0x06, 0xe2, 0x27,
0xb4, 0x08, 0xd3, 0xda, 0x7b, 0x98, 0x34, 0x73,
0x9f, 0xdb, 0xf6, 0x62, 0xed, 0x31, 0x41, 0x13,
0xd3, 0xa2, 0xa8, 0x4b, 0x3a, 0xc6, 0x1d, 0xe4,
0x2f, 0x8c, 0xf8, 0xfb, 0x97, 0x64, 0xf4, 0xb6,
0x2f, 0x80, 0x5a, 0xf3, 0x56, 0xe0, 0x40, 0x50,
0xd5, 0x19, 0xd0, 0x1e, 0xfc, 0xca, 0xe5, 0xc9,
0xd4, 0x60, 0x00, 0x81, 0x2e, 0xa3, 0xcc, 0xb6,
0x52, 0xf0, 0xb4, 0xdb, 0x69, 0x99, 0xce, 0x7a,
0x32, 0x4c, 0x08, 0xed, 0xaa, 0x10, 0x10, 0xe3,
0x6f, 0xee, 0x99, 0x68, 0x95, 0x9f, 0x04, 0x71,
0xb2, 0x49, 0x2f, 0x62, 0xa6, 0x5e, 0xb4, 0xef,
0x02, 0xed, 0x4f, 0x27, 0xde, 0x4a, 0x0f, 0xfd,
0xc1, 0xcc, 0xdd, 0x02, 0x8f, 0x08, 0x16, 0x54,
0xdf, 0xda, 0xca, 0xe0, 0x82, 0xf1, 0xb4, 0x31,
0x7a, 0xa9, 0x81, 0xfe, 0x90, 0xb7, 0x3e, 0xdb,
0xd3, 0x35, 0xc0, 0x20, 0x80, 0x33, 0x46, 0x4a,
0x63, 0xab, 0xd1, 0x0d, 0x29, 0xd2, 0xe2, 0x84,
0xb8, 0xdb, 0xfa, 0xe9, 0x89, 0x44, 0x86, 0x7c,
0xe8, 0x0b, 0xe6, 0x02, 0x6a, 0x07, 0x9b, 0x96,
0xd0, 0xdb, 0x2e, 0x41, 0x4c, 0xa1, 0xd5, 0x57,
0x45, 0x14, 0xfb, 0xe3, 0xa6, 0x72, 0x5b, 0x87,
0x6e, 0x0c, 0x6d, 0x5b, 0xce, 0xe0, 0x2f, 0xe2,
0x21, 0x81, 0x95, 0xb0, 0xe8, 0xb6, 0x32, 0x0b,
0xb2, 0x98, 0x13, 0x52, 0x5d, 0xfb, 0xec, 0x63,
0x17, 0x8a, 0x9e, 0x23, 0x22, 0x36, 0xee, 0xcd,
0xda, 0xdb, 0xcf, 0x3e, 0xf1, 0xc7, 0xf1, 0x01,
0x12, 0x93, 0x0a, 0xeb, 0x6f, 0xf2, 0x02, 0x15,
0x96, 0x77, 0x5d, 0xef, 0x9c, 0xfb, 0x88, 0x91,
0x59, 0xf9, 0x84, 0xdd, 0x9b, 0x26, 0x8d, 0x80,
0xf9, 0x80, 0x66, 0x2d, 0xac, 0xf7, 0x1f, 0x06,
0xba, 0x7f, 0xff, 0xee, 0xed, 0x40, 0x5f, 0xa5,
0xd6, 0xbd, 0x8c, 0x5b, 0x46, 0xd2, 0x7e, 0x48,
0x4a, 0x65, 0x8f, 0x08, 0x42, 0x60, 0xf7, 0x0f,
0xb9, 0x16, 0x0b, 0x0c, 0x1a, 0x06, 0x00, 0x00,
},
nil,
},
{ // has 1 non-empty fixed huffman block then garbage
"hello.txt",
"hello.txt + garbage",
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!',
},
ErrHeader,
},
{ // has 1 non-empty fixed huffman block not enough header
"hello.txt",
"hello.txt + garbage",
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00, gzipID1,
},
io.ErrUnexpectedEOF,
},
{ // has 1 non-empty fixed huffman block but corrupt checksum
"hello.txt",
"hello.txt + corrupt checksum",
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00,
0x00, 0x00,
},
ErrChecksum,
},
{ // has 1 non-empty fixed huffman block but corrupt size
"hello.txt",
"hello.txt + corrupt size",
"hello world\n",
[]byte{
0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00,
0x00, 0x00,
},
ErrChecksum,
},
{
"f1l3n4m3.tXt",
"header with all fields used",
"",
[]byte{
0x1f, 0x8b, 0x08, 0x1e, 0x70, 0xf0, 0xf9, 0x4a,
0x00, 0xaa, 0x09, 0x00, 0x7a, 0x7a, 0x05, 0x00,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x31, 0x6c,
0x33, 0x6e, 0x34, 0x6d, 0x33, 0x2e, 0x74, 0x58,
0x74, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e,
0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36,
0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e,
0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46,
0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e,
0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56,
0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e,
0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e,
0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86,
0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e,
0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e,
0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6,
0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae,
0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe,
0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6,
0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce,
0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6,
0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde,
0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6,
0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee,
0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6,
0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe,
0xff, 0x00, 0x92, 0xfd, 0x01, 0x00, 0x00, 0xff,
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00,
},
nil,
},
{
"",
"truncated gzip file amid raw-block",
"hello",
[]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f,
},
io.ErrUnexpectedEOF,
},
{
"",
"truncated gzip file amid fixed-block",
"He",
[]byte{
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xf2, 0x48, 0xcd,
},
io.ErrUnexpectedEOF,
},
}
func TestDecompressor(t *testing.T) {
b := new(bytes.Buffer)
for _, tt := range gunzipTests {
in := bytes.NewReader(tt.gzip)
gzip, err := NewReader(in)
if err != nil {
t.Errorf("%s: NewReader: %s", tt.name, err)
continue
}
defer gzip.Close()
if tt.name != gzip.Name {
t.Errorf("%s: got name %s", tt.name, gzip.Name)
}
b.Reset()
n, err := io.Copy(b, gzip)
if err != tt.err {
t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err)
}
s := b.String()
if s != tt.raw {
t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw)
}
// Test Reader Reset.
in = bytes.NewReader(tt.gzip)
err = gzip.Reset(in)
if err != nil {
t.Errorf("%s: Reset: %s", tt.name, err)
continue
}
if tt.name != gzip.Name {
t.Errorf("%s: got name %s", tt.name, gzip.Name)
}
b.Reset()
n, err = io.Copy(b, gzip)
if err != tt.err {
t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err)
}
s = b.String()
if s != tt.raw {
t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw)
}
}
}
func TestIssue6550(t *testing.T) {
f, err := os.Open("testdata/issue6550.gz")
if err != nil {
t.Fatal(err)
}
gzip, err := NewReader(f)
if err != nil {
t.Fatalf("NewReader(testdata/issue6550.gz): %v", err)
}
defer gzip.Close()
done := make(chan bool, 1)
go func() {
_, err := io.Copy(ioutil.Discard, gzip)
if err == nil {
t.Errorf("Copy succeeded")
} else {
t.Logf("Copy failed (correctly): %v", err)
}
done <- true
}()
select {
case <-time.After(1 * time.Second):
t.Errorf("Copy hung")
case <-done:
// ok
}
}
func TestInitialReset(t *testing.T) {
var r Reader
if err := r.Reset(bytes.NewReader(gunzipTests[1].gzip)); err != nil {
t.Error(err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, &r); err != nil {
t.Error(err)
}
if s := buf.String(); s != gunzipTests[1].raw {
t.Errorf("got %q want %q", s, gunzipTests[1].raw)
}
}
func TestMultistreamFalse(t *testing.T) {
// Find concatenation test.
var tt gunzipTest
for _, tt = range gunzipTests {
if strings.HasSuffix(tt.desc, " x2") {
goto Found
}
}
t.Fatal("cannot find hello.txt x2 in gunzip tests")
Found:
br := bytes.NewReader(tt.gzip)
var r Reader
if err := r.Reset(br); err != nil {
t.Fatalf("first reset: %v", err)
}
// Expect two streams with "hello world\n", then real EOF.
const hello = "hello world\n"
r.Multistream(false)
data, err := ioutil.ReadAll(&r)
if string(data) != hello || err != nil {
t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil)
}
if err := r.Reset(br); err != nil {
t.Fatalf("second reset: %v", err)
}
r.Multistream(false)
data, err = ioutil.ReadAll(&r)
if string(data) != hello || err != nil {
t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil)
}
if err := r.Reset(br); err != io.EOF {
t.Fatalf("third reset: err=%v, want io.EOF", err)
}
}
func TestWriteTo(t *testing.T) {
input := make([]byte, 100000)
n, err := rand.Read(input)
if err != nil {
t.Fatal(err)
}
if n != len(input) {
t.Fatal("did not fill buffer")
}
compressed := &bytes.Buffer{}
// Do it twice to test MultiStream functionality
for i := 0; i < 2; i++ {
w, err := NewWriterLevel(compressed, -2)
if err != nil {
t.Fatal(err)
}
n, err = w.Write(input)
if err != nil {
t.Fatal(err)
}
if n != len(input) {
t.Fatal("did not fill buffer")
}
w.Close()
}
input = append(input, input...)
buf := compressed.Bytes()
dec, err := NewReader(bytes.NewBuffer(buf))
if err != nil {
t.Fatal(err)
}
// ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure.
readall, err := ioutil.ReadAll(ioutil.NopCloser(dec))
if err != nil {
t.Fatal(err)
}
if len(readall) != len(input) {
t.Errorf("did not decompress everything, want %d, got %d", len(input), len(readall))
}
if bytes.Compare(readall, input) != 0 {
t.Error("output did not match input")
}
dec, err = NewReader(bytes.NewBuffer(buf))
if err != nil {
t.Fatal(err)
}
wtbuf := &bytes.Buffer{}
written, err := dec.WriteTo(wtbuf)
if err != nil {
t.Fatal(err)
}
if written != int64(len(input)) {
t.Error("Returned length did not match, expected", len(input), "got", written)
}
if wtbuf.Len() != len(input) {
t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len())
}
if bytes.Compare(wtbuf.Bytes(), input) != 0 {
t.Fatal("output did not match input")
}
}
func TestNilStream(t *testing.T) {
// Go liberally interprets RFC 1952 section 2.2 to mean that a gzip file
// consist of zero or more members. Thus, we test that a nil stream is okay.
_, err := NewReader(bytes.NewReader(nil))
if err != io.EOF {
t.Fatalf("NewReader(nil) on empty stream: got %v, want io.EOF", err)
}
}
func TestTruncatedStreams(t *testing.T) {
const data = "\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00"
// Intentionally iterate starting with at least one byte in the stream.
for i := 1; i < len(data)-1; i++ {
r, err := NewReader(strings.NewReader(data[:i]))
if err != nil {
if err != io.ErrUnexpectedEOF {
t.Errorf("NewReader(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF)
}
continue
}
_, err = io.Copy(ioutil.Discard, r)
if ferr, ok := err.(*flate.ReadError); ok {
err = ferr.Err
}
if err != io.ErrUnexpectedEOF {
t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF)
}
}
}
func BenchmarkGunzipCopy(b *testing.B) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dst := &bytes.Buffer{}
w, _ := NewWriterLevel(dst, 1)
_, err := w.Write(dat)
if err != nil {
b.Fatal(err)
}
w.Close()
input := dst.Bytes()
b.SetBytes(int64(len(dat)))
b.ResetTimer()
for n := 0; n < b.N; n++ {
r, err := NewReader(bytes.NewBuffer(input))
if err != nil {
b.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, r)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkGunzipNoWriteTo(b *testing.B) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dst := &bytes.Buffer{}
w, _ := NewWriterLevel(dst, 1)
_, err := w.Write(dat)
if err != nil {
b.Fatal(err)
}
w.Close()
input := dst.Bytes()
r, err := NewReader(bytes.NewBuffer(input))
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(dat)))
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := r.Reset(bytes.NewBuffer(input))
if err != nil {
b.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, ioutil.NopCloser(r))
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkGunzipStdlib(b *testing.B) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dst := &bytes.Buffer{}
w, _ := NewWriterLevel(dst, 1)
_, err := w.Write(dat)
if err != nil {
b.Fatal(err)
}
w.Close()
input := dst.Bytes()
r, err := oldgz.NewReader(bytes.NewBuffer(input))
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(dat)))
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := r.Reset(bytes.NewBuffer(input))
if err != nil {
b.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, r)
if err != nil {
b.Fatal(err)
}
}
}

251
vendor/github.com/klauspost/compress/gzip/gzip.go generated vendored Normal file
View File

@@ -0,0 +1,251 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip
import (
"errors"
"fmt"
"io"
"github.com/klauspost/compress/flate"
"github.com/klauspost/crc32"
)
// These constants are copied from the flate package, so that code that imports
// "compress/gzip" does not also have to import "compress/flate".
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
ConstantCompression = flate.ConstantCompression
HuffmanOnly = flate.HuffmanOnly
)
// A Writer is an io.WriteCloser.
// Writes to a Writer are compressed and written to w.
type Writer struct {
Header // written at first call to Write, Flush, or Close
w io.Writer
level int
wroteHeader bool
compressor *flate.Writer
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
closed bool
buf [10]byte
err error
}
// NewWriter returns a new Writer.
// Writes to the returned writer are compressed and written to w.
//
// It is the caller's responsibility to call Close on the WriteCloser when done.
// Writes may be buffered and not flushed until Close.
//
// Callers that wish to set the fields in Writer.Header must do so before
// the first call to Write, Flush, or Close.
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevel(w, DefaultCompression)
return z
}
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
//
// The compression level can be DefaultCompression, NoCompression, or any
// integer value between BestSpeed and BestCompression inclusive. The error
// returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
}
z := new(Writer)
z.init(w, level)
return z, nil
}
func (z *Writer) init(w io.Writer, level int) {
compressor := z.compressor
if compressor != nil {
compressor.Reset(w)
}
*z = Writer{
Header: Header{
OS: 255, // unknown
},
w: w,
level: level,
compressor: compressor,
}
}
// Reset discards the Writer z's state and makes it equivalent to the
// result of its original state from NewWriter or NewWriterLevel, but
// writing to w instead. This permits reusing a Writer rather than
// allocating a new one.
func (z *Writer) Reset(w io.Writer) {
z.init(w, z.level)
}
// writeBytes writes a length-prefixed byte slice to z.w.
func (z *Writer) writeBytes(b []byte) error {
if len(b) > 0xffff {
return errors.New("gzip.Write: Extra data is too large")
}
le.PutUint16(z.buf[:2], uint16(len(b)))
_, err := z.w.Write(z.buf[:2])
if err != nil {
return err
}
_, err = z.w.Write(b)
return err
}
// writeString writes a UTF-8 string s in GZIP's format to z.w.
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
func (z *Writer) writeString(s string) (err error) {
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
needconv := false
for _, v := range s {
if v == 0 || v > 0xff {
return errors.New("gzip.Write: non-Latin-1 header string")
}
if v > 0x7f {
needconv = true
}
}
if needconv {
b := make([]byte, 0, len(s))
for _, v := range s {
b = append(b, byte(v))
}
_, err = z.w.Write(b)
} else {
_, err = io.WriteString(z.w, s)
}
if err != nil {
return err
}
// GZIP strings are NUL-terminated.
z.buf[0] = 0
_, err = z.w.Write(z.buf[:1])
return err
}
// Write writes a compressed form of p to the underlying io.Writer. The
// compressed bytes are not necessarily flushed until the Writer is closed.
func (z *Writer) Write(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
var n int
// Write the GZIP header lazily.
if !z.wroteHeader {
z.wroteHeader = true
z.buf[0] = gzipID1
z.buf[1] = gzipID2
z.buf[2] = gzipDeflate
z.buf[3] = 0
if z.Extra != nil {
z.buf[3] |= 0x04
}
if z.Name != "" {
z.buf[3] |= 0x08
}
if z.Comment != "" {
z.buf[3] |= 0x10
}
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
if z.level == BestCompression {
z.buf[8] = 2
} else if z.level == BestSpeed {
z.buf[8] = 4
} else {
z.buf[8] = 0
}
z.buf[9] = z.OS
n, z.err = z.w.Write(z.buf[:10])
if z.err != nil {
return n, z.err
}
if z.Extra != nil {
z.err = z.writeBytes(z.Extra)
if z.err != nil {
return n, z.err
}
}
if z.Name != "" {
z.err = z.writeString(z.Name)
if z.err != nil {
return n, z.err
}
}
if z.Comment != "" {
z.err = z.writeString(z.Comment)
if z.err != nil {
return n, z.err
}
}
if z.compressor == nil {
z.compressor, _ = flate.NewWriter(z.w, z.level)
}
}
z.size += uint32(len(p))
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
n, z.err = z.compressor.Write(p)
return n, z.err
}
// Flush flushes any pending compressed data to the underlying writer.
//
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet. Flush does
// not return until the data has been written. If the underlying
// writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Flush()
return z.err
}
// Close closes the Writer, flushing any unwritten data to the underlying
// io.Writer, but does not close the underlying io.Writer.
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
z.closed = true
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Close()
if z.err != nil {
return z.err
}
le.PutUint32(z.buf[:4], z.digest)
le.PutUint32(z.buf[4:8], z.size)
_, z.err = z.w.Write(z.buf[:8])
return z.err
}

519
vendor/github.com/klauspost/compress/gzip/gzip_test.go generated vendored Normal file
View File

@@ -0,0 +1,519 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip
import (
"bufio"
"bytes"
oldgz "compress/gzip"
"io"
"io/ioutil"
"math/rand"
"testing"
"time"
)
// TestEmpty tests that an empty payload still forms a valid GZIP stream.
func TestEmpty(t *testing.T) {
buf := new(bytes.Buffer)
if err := NewWriter(buf).Close(); err != nil {
t.Fatalf("Writer.Close: %v", err)
}
r, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader: %v", err)
}
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if len(b) != 0 {
t.Fatalf("got %d bytes, want 0", len(b))
}
if err := r.Close(); err != nil {
t.Fatalf("Reader.Close: %v", err)
}
}
// TestRoundTrip tests that gzipping and then gunzipping is the identity
// function.
func TestRoundTrip(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w.Comment = "comment"
w.Extra = []byte("extra")
w.ModTime = time.Unix(1e8, 0)
w.Name = "name"
if _, err := w.Write([]byte("payload")); err != nil {
t.Fatalf("Write: %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("Writer.Close: %v", err)
}
r, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader: %v", err)
}
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if string(b) != "payload" {
t.Fatalf("payload is %q, want %q", string(b), "payload")
}
if r.Comment != "comment" {
t.Fatalf("comment is %q, want %q", r.Comment, "comment")
}
if string(r.Extra) != "extra" {
t.Fatalf("extra is %q, want %q", r.Extra, "extra")
}
if r.ModTime.Unix() != 1e8 {
t.Fatalf("mtime is %d, want %d", r.ModTime.Unix(), uint32(1e8))
}
if r.Name != "name" {
t.Fatalf("name is %q, want %q", r.Name, "name")
}
if err := r.Close(); err != nil {
t.Fatalf("Reader.Close: %v", err)
}
}
// TestLatin1 tests the internal functions for converting to and from Latin-1.
func TestLatin1(t *testing.T) {
latin1 := []byte{0xc4, 'u', 0xdf, 'e', 'r', 'u', 'n', 'g', 0}
utf8 := "Äußerung"
z := Reader{r: bufio.NewReader(bytes.NewReader(latin1))}
s, err := z.readString()
if err != nil {
t.Fatalf("readString: %v", err)
}
if s != utf8 {
t.Fatalf("read latin-1: got %q, want %q", s, utf8)
}
buf := bytes.NewBuffer(make([]byte, 0, len(latin1)))
c := Writer{w: buf}
if err = c.writeString(utf8); err != nil {
t.Fatalf("writeString: %v", err)
}
s = buf.String()
if s != string(latin1) {
t.Fatalf("write utf-8: got %q, want %q", s, string(latin1))
}
}
// TestLatin1RoundTrip tests that metadata that is representable in Latin-1
// survives a round trip.
func TestLatin1RoundTrip(t *testing.T) {
testCases := []struct {
name string
ok bool
}{
{"", true},
{"ASCII is OK", true},
{"unless it contains a NUL\x00", false},
{"no matter where \x00 occurs", false},
{"\x00\x00\x00", false},
{"Látin-1 also passes (U+00E1)", true},
{"but LĀtin Extended-A (U+0100) does not", false},
{"neither does 日本語", false},
{"invalid UTF-8 also \xffails", false},
{"\x00 as does Látin-1 with NUL", false},
}
for _, tc := range testCases {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w.Name = tc.name
err := w.Close()
if (err == nil) != tc.ok {
t.Errorf("Writer.Close: name = %q, err = %v", tc.name, err)
continue
}
if !tc.ok {
continue
}
r, err := NewReader(buf)
if err != nil {
t.Errorf("NewReader: %v", err)
continue
}
_, err = ioutil.ReadAll(r)
if err != nil {
t.Errorf("ReadAll: %v", err)
continue
}
if r.Name != tc.name {
t.Errorf("name is %q, want %q", r.Name, tc.name)
continue
}
if err := r.Close(); err != nil {
t.Errorf("Reader.Close: %v", err)
continue
}
}
}
func TestWriterFlush(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w.Comment = "comment"
w.Extra = []byte("extra")
w.ModTime = time.Unix(1e8, 0)
w.Name = "name"
n0 := buf.Len()
if n0 != 0 {
t.Fatalf("buffer size = %d before writes; want 0", n0)
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
n1 := buf.Len()
if n1 == 0 {
t.Fatal("no data after first flush")
}
w.Write([]byte("x"))
n2 := buf.Len()
if n1 != n2 {
t.Fatalf("after writing a single byte, size changed from %d to %d; want no change", n1, n2)
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
n3 := buf.Len()
if n2 == n3 {
t.Fatal("Flush didn't flush any data")
}
}
// Multiple gzip files concatenated form a valid gzip file.
func TestConcat(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
w.Write([]byte("hello "))
w.Close()
w = NewWriter(&buf)
w.Write([]byte("world\n"))
w.Close()
r, err := NewReader(&buf)
data, err := ioutil.ReadAll(r)
if string(data) != "hello world\n" || err != nil {
t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world")
}
}
func TestWriterReset(t *testing.T) {
buf := new(bytes.Buffer)
buf2 := new(bytes.Buffer)
z := NewWriter(buf)
msg := []byte("hello world")
z.Write(msg)
z.Close()
z.Reset(buf2)
z.Write(msg)
z.Close()
if buf.String() != buf2.String() {
t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String())
}
}
var testbuf []byte
func testFile(i, level int, t *testing.T) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dl := len(dat)
if len(testbuf) != i*dl {
// Make results predictable
testbuf = make([]byte, i*dl)
for j := 0; j < i; j++ {
copy(testbuf[j*dl:j*dl+dl], dat)
}
}
br := bytes.NewBuffer(testbuf)
var buf bytes.Buffer
w, err := NewWriterLevel(&buf, DefaultCompression)
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(w, br)
if err != nil {
t.Fatal(err)
}
if int(n) != len(testbuf) {
t.Fatal("Short write:", n, "!=", testbuf)
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
r, err := NewReader(&buf)
if err != nil {
t.Fatal(err.Error())
}
decoded, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err.Error())
}
if !bytes.Equal(testbuf, decoded) {
t.Errorf("decoded content does not match.")
}
}
func TestFile1xM2(t *testing.T) { testFile(1, -2, t) }
func TestFile1xM1(t *testing.T) { testFile(1, -1, t) }
func TestFile1x0(t *testing.T) { testFile(1, 0, t) }
func TestFile1x1(t *testing.T) { testFile(1, 1, t) }
func TestFile1x2(t *testing.T) { testFile(1, 2, t) }
func TestFile1x3(t *testing.T) { testFile(1, 3, t) }
func TestFile1x4(t *testing.T) { testFile(1, 4, t) }
func TestFile1x5(t *testing.T) { testFile(1, 5, t) }
func TestFile1x6(t *testing.T) { testFile(1, 6, t) }
func TestFile1x7(t *testing.T) { testFile(1, 7, t) }
func TestFile1x8(t *testing.T) { testFile(1, 8, t) }
func TestFile1x9(t *testing.T) { testFile(1, 9, t) }
func TestFile10(t *testing.T) { testFile(10, DefaultCompression, t) }
func TestFile50(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
testFile(50, DefaultCompression, t)
}
func TestFile200(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
testFile(200, BestSpeed, t)
}
func testBigGzip(i int, t *testing.T) {
if len(testbuf) != i {
// Make results predictable
rand.Seed(1337)
testbuf = make([]byte, i)
for idx := range testbuf {
testbuf[idx] = byte(65 + rand.Intn(20))
}
}
c := BestCompression
if testing.Short() {
c = BestSpeed
}
br := bytes.NewBuffer(testbuf)
var buf bytes.Buffer
w, err := NewWriterLevel(&buf, c)
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(w, br)
if err != nil {
t.Fatal(err)
}
if int(n) != len(testbuf) {
t.Fatal("Short write:", n, "!=", len(testbuf))
}
err = w.Close()
if err != nil {
t.Fatal(err.Error())
}
r, err := NewReader(&buf)
if err != nil {
t.Fatal(err.Error())
}
decoded, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err.Error())
}
if !bytes.Equal(testbuf, decoded) {
t.Errorf("decoded content does not match.")
}
}
func TestGzip1K(t *testing.T) { testBigGzip(1000, t) }
func TestGzip100K(t *testing.T) { testBigGzip(100000, t) }
func TestGzip1M(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
testBigGzip(1000000, t)
}
func TestGzip10M(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
testBigGzip(10000000, t)
}
// Test if two runs produce identical results.
func TestDeterministicLM2(t *testing.T) { testDeterm(-2, t) }
// Level 0 is not deterministic since it depends on the size of each write.
// func TestDeterministicL0(t *testing.T) { testDeterm(0, t) }
func TestDeterministicL1(t *testing.T) { testDeterm(1, t) }
func TestDeterministicL2(t *testing.T) { testDeterm(2, t) }
func TestDeterministicL3(t *testing.T) { testDeterm(3, t) }
func TestDeterministicL4(t *testing.T) { testDeterm(4, t) }
func TestDeterministicL5(t *testing.T) { testDeterm(5, t) }
func TestDeterministicL6(t *testing.T) { testDeterm(6, t) }
func TestDeterministicL7(t *testing.T) { testDeterm(7, t) }
func TestDeterministicL8(t *testing.T) { testDeterm(8, t) }
func TestDeterministicL9(t *testing.T) { testDeterm(9, t) }
func testDeterm(i int, t *testing.T) {
var length = 500000
if testing.Short() {
length = 100000
}
rand.Seed(1337)
t1 := make([]byte, length)
for idx := range t1 {
t1[idx] = byte(65 + rand.Intn(8))
}
br := bytes.NewBuffer(t1)
var b1 bytes.Buffer
w, err := NewWriterLevel(&b1, i)
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(w, br)
if err != nil {
t.Fatal(err)
}
w.Flush()
w.Close()
// We recreate the buffer, so we have a goos chance of getting a
// different memory address.
rand.Seed(1337)
t2 := make([]byte, length)
for idx := range t2 {
t2[idx] = byte(65 + rand.Intn(8))
}
br2 := bytes.NewBuffer(t2)
var b2 bytes.Buffer
w2, err := NewWriterLevel(&b2, i)
if err != nil {
t.Fatal(err)
}
// We write the same data, but with a different size than
// the default copy.
for {
_, err = io.CopyN(w2, br2, 1234)
if err == io.EOF {
err = nil
break
} else if err != nil {
break
}
}
if err != nil {
t.Fatal(err)
}
w2.Flush()
w2.Close()
b1b := b1.Bytes()
b2b := b2.Bytes()
if bytes.Compare(b1b, b2b) != 0 {
t.Fatalf("Level %d did not produce deterministric result, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b))
}
}
func BenchmarkGzipLM2(b *testing.B) { benchmarkGzipN(b, -2) }
func BenchmarkGzipL1(b *testing.B) { benchmarkGzipN(b, 1) }
func BenchmarkGzipL2(b *testing.B) { benchmarkGzipN(b, 2) }
func BenchmarkGzipL3(b *testing.B) { benchmarkGzipN(b, 3) }
func BenchmarkGzipL4(b *testing.B) { benchmarkGzipN(b, 4) }
func BenchmarkGzipL5(b *testing.B) { benchmarkGzipN(b, 5) }
func BenchmarkGzipL6(b *testing.B) { benchmarkGzipN(b, 6) }
func BenchmarkGzipL7(b *testing.B) { benchmarkGzipN(b, 7) }
func BenchmarkGzipL8(b *testing.B) { benchmarkGzipN(b, 8) }
func BenchmarkGzipL9(b *testing.B) { benchmarkGzipN(b, 9) }
func benchmarkGzipN(b *testing.B, level int) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
b.SetBytes(int64(len(dat)))
w, _ := NewWriterLevel(ioutil.Discard, level)
b.ResetTimer()
for n := 0; n < b.N; n++ {
w.Reset(ioutil.Discard)
n, err := w.Write(dat)
if n != len(dat) {
panic("short write")
}
if err != nil {
panic(err)
}
err = w.Close()
if err != nil {
panic(err)
}
}
}
func BenchmarkOldGzipL1(b *testing.B) { benchmarkOldGzipN(b, 1) }
func BenchmarkOldGzipL2(b *testing.B) { benchmarkOldGzipN(b, 2) }
func BenchmarkOldGzipL3(b *testing.B) { benchmarkOldGzipN(b, 3) }
func BenchmarkOldGzipL4(b *testing.B) { benchmarkOldGzipN(b, 4) }
func BenchmarkOldGzipL5(b *testing.B) { benchmarkOldGzipN(b, 5) }
func BenchmarkOldGzipL6(b *testing.B) { benchmarkOldGzipN(b, 6) }
func BenchmarkOldGzipL7(b *testing.B) { benchmarkOldGzipN(b, 7) }
func BenchmarkOldGzipL8(b *testing.B) { benchmarkOldGzipN(b, 8) }
func BenchmarkOldGzipL9(b *testing.B) { benchmarkOldGzipN(b, 9) }
func benchmarkOldGzipN(b *testing.B, level int) {
dat, _ := ioutil.ReadFile("testdata/test.json")
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
dat = append(dat, dat...)
b.SetBytes(int64(len(dat)))
w, _ := oldgz.NewWriterLevel(ioutil.Discard, level)
b.ResetTimer()
for n := 0; n < b.N; n++ {
w.Reset(ioutil.Discard)
n, err := w.Write(dat)
if n != len(dat) {
panic("short write")
}
if err != nil {
panic(err)
}
err = w.Close()
if err != nil {
panic(err)
}
}
}

Binary file not shown.

File diff suppressed because it is too large Load Diff