package fasthttp

import (
	
	
	
	
	

	
	
	
	
	
)

// Supported compression levels.
const (
	CompressNoCompression      = flate.NoCompression
	CompressBestSpeed          = flate.BestSpeed
	CompressBestCompression    = flate.BestCompression
	CompressDefaultCompression = 6  // flate.DefaultCompression
	CompressHuffmanOnly        = -2 // flate.HuffmanOnly
)

func acquireGzipReader( io.Reader) (*gzip.Reader, error) {
	 := gzipReaderPool.Get()
	if  == nil {
		return gzip.NewReader()
	}
	 := .(*gzip.Reader)
	if  := .Reset();  != nil {
		return nil, 
	}
	return , nil
}

func releaseGzipReader( *gzip.Reader) {
	.Close()
	gzipReaderPool.Put()
}

var gzipReaderPool sync.Pool

func acquireFlateReader( io.Reader) (io.ReadCloser, error) {
	 := flateReaderPool.Get()
	if  == nil {
		,  := zlib.NewReader()
		if  != nil {
			return nil, 
		}
		return , nil
	}
	 := .(io.ReadCloser)
	if  := resetFlateReader(, );  != nil {
		return nil, 
	}
	return , nil
}

func releaseFlateReader( io.ReadCloser) {
	.Close()
	flateReaderPool.Put()
}

func resetFlateReader( io.ReadCloser,  io.Reader) error {
	,  := .(zlib.Resetter)
	if ! {
		// sanity check. should only be called with a zlib.Reader
		panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
	}
	return .Reset(, nil)
}

var flateReaderPool sync.Pool

func acquireStacklessGzipWriter( io.Writer,  int) stackless.Writer {
	 := normalizeCompressLevel()
	 := stacklessGzipWriterPoolMap[]
	 := .Get()
	if  == nil {
		return stackless.NewWriter(, func( io.Writer) stackless.Writer {
			return acquireRealGzipWriter(, )
		})
	}
	 := .(stackless.Writer)
	.Reset()
	return 
}

func releaseStacklessGzipWriter( stackless.Writer,  int) {
	.Close()
	 := normalizeCompressLevel()
	 := stacklessGzipWriterPoolMap[]
	.Put()
}

func acquireRealGzipWriter( io.Writer,  int) *gzip.Writer {
	 := normalizeCompressLevel()
	 := realGzipWriterPoolMap[]
	 := .Get()
	if  == nil {
		,  := gzip.NewWriterLevel(, )
		if  != nil {
			// gzip.NewWriterLevel only errors for invalid
			// compression levels. Clamp it to be min or max.
			if  < gzip.HuffmanOnly {
				 = gzip.HuffmanOnly
			} else {
				 = gzip.BestCompression
			}
			, _ = gzip.NewWriterLevel(, )
		}
		return 
	}
	 := .(*gzip.Writer)
	.Reset()
	return 
}

func releaseRealGzipWriter( *gzip.Writer,  int) {
	.Close()
	 := normalizeCompressLevel()
	 := realGzipWriterPoolMap[]
	.Put()
}

var (
	stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
	realGzipWriterPoolMap      = newCompressWriterPoolMap()
)

// AppendGzipBytesLevel appends gzipped src to dst using the given
// compression level and returns the resulting dst.
//
// Supported compression levels are:
//
//   - CompressNoCompression
//   - CompressBestSpeed
//   - CompressBestCompression
//   - CompressDefaultCompression
//   - CompressHuffmanOnly
func (,  []byte,  int) []byte {
	 := &byteSliceWriter{}
	WriteGzipLevel(, , ) //nolint:errcheck
	return .b
}

// WriteGzipLevel writes gzipped p to w using the given compression level
// and returns the number of compressed bytes written to w.
//
// Supported compression levels are:
//
//   - CompressNoCompression
//   - CompressBestSpeed
//   - CompressBestCompression
//   - CompressDefaultCompression
//   - CompressHuffmanOnly
func ( io.Writer,  []byte,  int) (int, error) {
	switch .(type) {
	case *byteSliceWriter,
		*bytes.Buffer,
		*bytebufferpool.ByteBuffer:
		// These writers don't block, so we can just use stacklessWriteGzip
		 := &compressCtx{
			w:     ,
			p:     ,
			level: ,
		}
		stacklessWriteGzip()
		return len(), nil
	default:
		 := acquireStacklessGzipWriter(, )
		,  := .Write()
		releaseStacklessGzipWriter(, )
		return , 
	}
}

var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)

func nonblockingWriteGzip( interface{}) {
	 := .(*compressCtx)
	 := acquireRealGzipWriter(.w, .level)

	.Write(.p) //nolint:errcheck // no way to handle this error anyway

	releaseRealGzipWriter(, .level)
}

// WriteGzip writes gzipped p to w and returns the number of compressed
// bytes written to w.
func ( io.Writer,  []byte) (int, error) {
	return WriteGzipLevel(, , CompressDefaultCompression)
}

// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
func (,  []byte) []byte {
	return AppendGzipBytesLevel(, , CompressDefaultCompression)
}

// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
// bytes written to w.
func ( io.Writer,  []byte) (int, error) {
	 := &byteSliceReader{}
	,  := acquireGzipReader()
	if  != nil {
		return 0, 
	}
	,  := copyZeroAlloc(, )
	releaseGzipReader()
	 := int()
	if int64() !=  {
		return 0, fmt.Errorf("too much data gunzipped: %d", )
	}
	return , 
}

// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
func (,  []byte) ([]byte, error) {
	 := &byteSliceWriter{}
	,  := WriteGunzip(, )
	return .b, 
}

// AppendDeflateBytesLevel appends deflated src to dst using the given
// compression level and returns the resulting dst.
//
// Supported compression levels are:
//
//   - CompressNoCompression
//   - CompressBestSpeed
//   - CompressBestCompression
//   - CompressDefaultCompression
//   - CompressHuffmanOnly
func (,  []byte,  int) []byte {
	 := &byteSliceWriter{}
	WriteDeflateLevel(, , ) //nolint:errcheck
	return .b
}

// WriteDeflateLevel writes deflated p to w using the given compression level
// and returns the number of compressed bytes written to w.
//
// Supported compression levels are:
//
//   - CompressNoCompression
//   - CompressBestSpeed
//   - CompressBestCompression
//   - CompressDefaultCompression
//   - CompressHuffmanOnly
func ( io.Writer,  []byte,  int) (int, error) {
	switch .(type) {
	case *byteSliceWriter,
		*bytes.Buffer,
		*bytebufferpool.ByteBuffer:
		// These writers don't block, so we can just use stacklessWriteDeflate
		 := &compressCtx{
			w:     ,
			p:     ,
			level: ,
		}
		stacklessWriteDeflate()
		return len(), nil
	default:
		 := acquireStacklessDeflateWriter(, )
		,  := .Write()
		releaseStacklessDeflateWriter(, )
		return , 
	}
}

var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)

func nonblockingWriteDeflate( interface{}) {
	 := .(*compressCtx)
	 := acquireRealDeflateWriter(.w, .level)

	.Write(.p) //nolint:errcheck // no way to handle this error anyway

	releaseRealDeflateWriter(, .level)
}

type compressCtx struct {
	w     io.Writer
	p     []byte
	level int
}

// WriteDeflate writes deflated p to w and returns the number of compressed
// bytes written to w.
func ( io.Writer,  []byte) (int, error) {
	return WriteDeflateLevel(, , CompressDefaultCompression)
}

// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
func (,  []byte) []byte {
	return AppendDeflateBytesLevel(, , CompressDefaultCompression)
}

// WriteInflate writes inflated p to w and returns the number of uncompressed
// bytes written to w.
func ( io.Writer,  []byte) (int, error) {
	 := &byteSliceReader{}
	,  := acquireFlateReader()
	if  != nil {
		return 0, 
	}
	,  := copyZeroAlloc(, )
	releaseFlateReader()
	 := int()
	if int64() !=  {
		return 0, fmt.Errorf("too much data inflated: %d", )
	}
	return , 
}

// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
func (,  []byte) ([]byte, error) {
	 := &byteSliceWriter{}
	,  := WriteInflate(, )
	return .b, 
}

type byteSliceWriter struct {
	b []byte
}

func ( *byteSliceWriter) ( []byte) (int, error) {
	.b = append(.b, ...)
	return len(), nil
}

type byteSliceReader struct {
	b []byte
}

func ( *byteSliceReader) ( []byte) (int, error) {
	if len(.b) == 0 {
		return 0, io.EOF
	}
	 := copy(, .b)
	.b = .b[:]
	return , nil
}

func ( *byteSliceReader) () (byte, error) {
	if len(.b) == 0 {
		return 0, io.EOF
	}
	 := .b[0]
	.b = .b[1:]
	return , nil
}

func acquireStacklessDeflateWriter( io.Writer,  int) stackless.Writer {
	 := normalizeCompressLevel()
	 := stacklessDeflateWriterPoolMap[]
	 := .Get()
	if  == nil {
		return stackless.NewWriter(, func( io.Writer) stackless.Writer {
			return acquireRealDeflateWriter(, )
		})
	}
	 := .(stackless.Writer)
	.Reset()
	return 
}

func releaseStacklessDeflateWriter( stackless.Writer,  int) {
	.Close()
	 := normalizeCompressLevel()
	 := stacklessDeflateWriterPoolMap[]
	.Put()
}

func acquireRealDeflateWriter( io.Writer,  int) *zlib.Writer {
	 := normalizeCompressLevel()
	 := realDeflateWriterPoolMap[]
	 := .Get()
	if  == nil {
		,  := zlib.NewWriterLevel(, )
		if  != nil {
			// zlib.NewWriterLevel only errors for invalid
			// compression levels. Clamp it to be min or max.
			if  < zlib.HuffmanOnly {
				 = zlib.HuffmanOnly
			} else {
				 = zlib.BestCompression
			}
			, _ = zlib.NewWriterLevel(, )
		}
		return 
	}
	 := .(*zlib.Writer)
	.Reset()
	return 
}

func releaseRealDeflateWriter( *zlib.Writer,  int) {
	.Close()
	 := normalizeCompressLevel()
	 := realDeflateWriterPoolMap[]
	.Put()
}

var (
	stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
	realDeflateWriterPoolMap      = newCompressWriterPoolMap()
)

func newCompressWriterPoolMap() []*sync.Pool {
	// Initialize pools for all the compression levels defined
	// in https://pkg.go.dev/compress/flate#pkg-constants .
	// Compression levels are normalized with normalizeCompressLevel,
	// so the fit [0..11].
	var  []*sync.Pool
	for  := 0;  < 12; ++ {
		 = append(, &sync.Pool{})
	}
	return 
}

func isFileCompressible( *os.File,  float64) bool {
	// Try compressing the first 4kb of the file
	// and see if it can be compressed by more than
	// the given minCompressRatio.
	 := bytebufferpool.Get()
	 := acquireStacklessGzipWriter(, CompressDefaultCompression)
	 := &io.LimitedReader{
		R: ,
		N: 4096,
	}
	,  := copyZeroAlloc(, )
	releaseStacklessGzipWriter(, CompressDefaultCompression)
	.Seek(0, 0) //nolint:errcheck
	if  != nil {
		return false
	}

	 := 4096 - .N
	 := len(.B)
	bytebufferpool.Put()
	return float64() < float64()*
}

// normalizes compression level into [0..11], so it could be used as an index
// in *PoolMap.
func normalizeCompressLevel( int) int {
	// -2 is the lowest compression level - CompressHuffmanOnly
	// 9 is the highest compression level - CompressBestCompression
	if  < -2 ||  > 9 {
		 = CompressDefaultCompression
	}
	return  + 2
}