package flate

import (
	
	
	
)

// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL1 struct {
	fastGen
	table [tableSize]tableEntry
}

// EncodeL1 uses a similar algorithm to level 1
func ( *fastEncL1) ( *tokens,  []byte) {
	const (
		            = 12 - 1
		 = 1 + 1 + 
		              = 5
	)
	if debugDeflate && .cur < 0 {
		panic(fmt.Sprint("e.cur < 0: ", .cur))
	}

	// Protect against e.cur wraparound.
	for .cur >= bufferReset {
		if len(.hist) == 0 {
			for  := range .table[:] {
				.table[] = tableEntry{}
			}
			.cur = maxMatchOffset
			break
		}
		// Shift down everything in the table that isn't already too far away.
		 := .cur + int32(len(.hist)) - maxMatchOffset
		for  := range .table[:] {
			 := .table[].offset
			if  <=  {
				 = 0
			} else {
				 =  - .cur + maxMatchOffset
			}
			.table[].offset = 
		}
		.cur = maxMatchOffset
	}

	 := .addBlock()

	// This check isn't in the Snappy implementation, but there, the caller
	// instead of the callee handles this case.
	if len() <  {
		// We do not fill the token table.
		// This will be picked up by caller.
		.n = uint16(len())
		return
	}

	// Override src
	 = .hist
	 := 

	// sLimit is when to stop looking for offset/length copies. The inputMargin
	// lets us use a fast path for emitLiteral in the main loop, while we are
	// looking for copies.
	 := int32(len() - )

	// nextEmit is where in src the next emitLiteral should start from.
	 := load6432(, )

	for {
		const  = 5
		const  = 2

		 := 
		var  tableEntry
		for {
			 := hashLen(, tableBits, )
			 = .table[]
			 =  +  + (-)>>
			if  >  {
				goto 
			}

			 := load6432(, )
			.table[] = tableEntry{offset:  + .cur}
			 = hashLen(, tableBits, )

			 :=  - (.offset - .cur)
			if  < maxMatchOffset && uint32() == load3232(, .offset-.cur) {
				.table[] = tableEntry{offset:  + .cur}
				break
			}

			// Do one right away...
			 = 
			 = 
			++
			 = .table[]
			 >>= 8
			.table[] = tableEntry{offset:  + .cur}

			 =  - (.offset - .cur)
			if  < maxMatchOffset && uint32() == load3232(, .offset-.cur) {
				.table[] = tableEntry{offset:  + .cur}
				break
			}
			 = 
			 = 
		}

		// A 4-byte match has been found. We'll later see if more than 4 bytes
		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
		// them as literal bytes.
		for {
			// Invariant: we have a 4-byte match at s, and no need to emit any
			// literal bytes prior to s.

			// Extend the 4-byte match as long as possible.
			 := .offset - .cur
			var  = int32(4)
			if false {
				 = .matchlenLong(+4, +4, ) + 4
			} else {
				// inlined:
				 := [+4:]
				 := [+4:]
				for len() >= 8 {
					if  := binary.LittleEndian.Uint64() ^ binary.LittleEndian.Uint64();  != 0 {
						 += int32(bits.TrailingZeros64() >> 3)
						break
					}
					 += 8
					 = [8:]
					 = [8:]
				}
				if len() < 8 {
					 = [:len()]
					for  := range  {
						if [] != [] {
							break
						}
						++
					}
				}
			}

			// Extend backwards
			for  > 0 &&  >  && [-1] == [-1] {
				--
				--
				++
			}
			if  <  {
				if false {
					emitLiteral(, [:])
				} else {
					for ,  := range [:] {
						.tokens[.n] = token()
						.litHist[]++
						.n++
					}
				}
			}

			// Save the match found
			if false {
				.AddMatchLong(, uint32(--baseMatchOffset))
			} else {
				// Inlined...
				 := uint32( -  - baseMatchOffset)
				 := 
				 := offsetCode()
				 |=  << 16
				for  > 0 {
					 := 
					if  > 258 {
						if  > 258+baseMatchLength {
							 = 258
						} else {
							 = 258 - baseMatchLength
						}
					}
					 -= 
					 -= baseMatchLength
					.extraHist[lengthCodes1[uint8()]]++
					.offHist[]++
					.tokens[.n] = token(matchType | uint32()<<lengthShift | )
					.n++
				}
			}
			 += 
			 = 
			if  >=  {
				 =  + 1
			}
			if  >=  {
				// Index first pair after match end.
				if int(++8) < len() {
					 := load6432(, )
					.table[hashLen(, tableBits, )] = tableEntry{offset:  + .cur}
				}
				goto 
			}

			// We could immediately start working at s now, but to improve
			// compression we first update the hash table at s-2 and at s. If
			// another emitCopy is not our next move, also calculate nextHash
			// at s+1. At least on GOARCH=amd64, these three hash calculations
			// are faster as one load64 call (with some shifts) instead of
			// three load32 calls.
			 := load6432(, -2)
			 := .cur +  - 2
			 := hashLen(, tableBits, )
			.table[] = tableEntry{offset: }
			 >>= 16
			 := hashLen(, tableBits, )
			 = .table[]
			.table[] = tableEntry{offset:  + 2}

			 :=  - (.offset - .cur)
			if  > maxMatchOffset || uint32() != load3232(, .offset-.cur) {
				 =  >> 8
				++
				break
			}
		}
	}

:
	if int() < len() {
		// If nothing was added, don't encode literals.
		if .n == 0 {
			return
		}
		emitLiteral(, [:])
	}
}