// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Modified for deflate by Klaus Post (c) 2015.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package flate

import (
	
	
)

type fastEnc interface {
	Encode(dst *tokens, src []byte)
	Reset()
}

func newFastEnc( int) fastEnc {
	switch  {
	case 1:
		return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
	case 2:
		return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
	case 3:
		return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
	case 4:
		return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
	case 5:
		return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
	case 6:
		return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
	default:
		panic("invalid level specified")
	}
}

const (
	tableBits       = 15             // Bits used in the table
	tableSize       = 1 << tableBits // Size of the table
	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
	baseMatchOffset = 1              // The smallest match offset
	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5
	maxMatchOffset  = 1 << 15        // The largest match offset

	bTableBits   = 17                                               // Bits used in the big tables
	bTableSize   = 1 << bTableBits                                  // Size of the table
	allocHistory = maxStoreBlockSize * 5                            // Size to preallocate for history.
	bufferReset  = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)

const (
	prime3bytes = 506832829
	prime4bytes = 2654435761
	prime5bytes = 889523592379
	prime6bytes = 227718039650203
	prime7bytes = 58295818150454627
	prime8bytes = 0xcf1bbcdcb7a56463
)

func load3232( []byte,  int32) uint32 {
	return binary.LittleEndian.Uint32([:])
}

func load6432( []byte,  int32) uint64 {
	return binary.LittleEndian.Uint64([:])
}

type tableEntry struct {
	offset int32
}

// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastGen struct {
	hist []byte
	cur  int32
}

func ( *fastGen) ( []byte) int32 {
	// check if we have space already
	if len(.hist)+len() > cap(.hist) {
		if cap(.hist) == 0 {
			.hist = make([]byte, 0, allocHistory)
		} else {
			if cap(.hist) < maxMatchOffset*2 {
				panic("unexpected buffer size")
			}
			// Move down
			 := int32(len(.hist)) - maxMatchOffset
			// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
			*(*[maxMatchOffset]byte)(.hist) = *(*[maxMatchOffset]byte)(.hist[:])
			.cur += 
			.hist = .hist[:maxMatchOffset]
		}
	}
	 := int32(len(.hist))
	.hist = append(.hist, ...)
	return 
}

type tableEntryPrev struct {
	Cur  tableEntry
	Prev tableEntry
}

// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7( uint64,  uint8) uint32 {
	return uint32((( << (64 - 56)) * prime7bytes) >> ((64 - ) & reg8SizeMask64))
}

// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen( uint64, ,  uint8) uint32 {
	switch  {
	case 3:
		return (uint32(<<8) * prime3bytes) >> (32 - )
	case 5:
		return uint32((( << (64 - 40)) * prime5bytes) >> (64 - ))
	case 6:
		return uint32((( << (64 - 48)) * prime6bytes) >> (64 - ))
	case 7:
		return uint32((( << (64 - 56)) * prime7bytes) >> (64 - ))
	case 8:
		return uint32(( * prime8bytes) >> (64 - ))
	default:
		return (uint32() * prime4bytes) >> (32 - )
	}
}

// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func ( *fastGen) (,  int32,  []byte) int32 {
	if debugDecode {
		if  >=  {
			panic(fmt.Sprint("t >=s:", , ))
		}
		if int() >= len() {
			panic(fmt.Sprint("s >= len(src):", , len()))
		}
		if  < 0 {
			panic(fmt.Sprint("t < 0:", ))
		}
		if - > maxMatchOffset {
			panic(fmt.Sprint(, "-", , "(", -, ") > maxMatchLength (", maxMatchOffset, ")"))
		}
	}
	 := int() + maxMatchLength - 4
	if  > len() {
		 = len()
	}

	// Extend the match to be as long as possible.
	return int32(matchLen([:], [:]))
}

// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func ( *fastGen) (,  int32,  []byte) int32 {
	if debugDeflate {
		if  >=  {
			panic(fmt.Sprint("t >=s:", , ))
		}
		if int() >= len() {
			panic(fmt.Sprint("s >= len(src):", , len()))
		}
		if  < 0 {
			panic(fmt.Sprint("t < 0:", ))
		}
		if - > maxMatchOffset {
			panic(fmt.Sprint(, "-", , "(", -, ") > maxMatchLength (", maxMatchOffset, ")"))
		}
	}
	// Extend the match to be as long as possible.
	return int32(matchLen([:], [:]))
}

// Reset the encoding table.
func ( *fastGen) () {
	if cap(.hist) < allocHistory {
		.hist = make([]byte, 0, allocHistory)
	}
	// We offset current position so everything will be out of reach.
	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
	if .cur <= bufferReset {
		.cur += maxMatchOffset + int32(len(.hist))
	}
	.hist = .hist[:0]
}