package flate

import 

// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL2 struct {
	fastGen
	table [bTableSize]tableEntry
}

// EncodeL2 uses a similar algorithm to level 1, but is capable
// of matching across blocks giving better compression at a small slowdown.
func ( *fastEncL2) ( *tokens,  []byte) {
	const (
		            = 12 - 1
		 = 1 + 1 + 
		              = 5
	)

	if debugDeflate && .cur < 0 {
		panic(fmt.Sprint("e.cur < 0: ", .cur))
	}

	// Protect against e.cur wraparound.
	for .cur >= bufferReset {
		if len(.hist) == 0 {
			for  := range .table[:] {
				.table[] = tableEntry{}
			}
			.cur = maxMatchOffset
			break
		}
		// Shift down everything in the table that isn't already too far away.
		 := .cur + int32(len(.hist)) - maxMatchOffset
		for  := range .table[:] {
			 := .table[].offset
			if  <=  {
				 = 0
			} else {
				 =  - .cur + maxMatchOffset
			}
			.table[].offset = 
		}
		.cur = maxMatchOffset
	}

	 := .addBlock()

	// This check isn't in the Snappy implementation, but there, the caller
	// instead of the callee handles this case.
	if len() <  {
		// We do not fill the token table.
		// This will be picked up by caller.
		.n = uint16(len())
		return
	}

	// Override src
	 = .hist
	 := 

	// sLimit is when to stop looking for offset/length copies. The inputMargin
	// lets us use a fast path for emitLiteral in the main loop, while we are
	// looking for copies.
	 := int32(len() - )

	// nextEmit is where in src the next emitLiteral should start from.
	 := load6432(, )
	for {
		// When should we start skipping if we haven't found matches in a long while.
		const  = 5
		const  = 2

		 := 
		var  tableEntry
		for {
			 := hashLen(, bTableBits, )
			 = 
			 =  +  + (-)>>
			if  >  {
				goto 
			}
			 = .table[]
			 := load6432(, )
			.table[] = tableEntry{offset:  + .cur}
			 = hashLen(, bTableBits, )

			 :=  - (.offset - .cur)
			if  < maxMatchOffset && uint32() == load3232(, .offset-.cur) {
				.table[] = tableEntry{offset:  + .cur}
				break
			}

			// Do one right away...
			 = 
			 = 
			++
			 = .table[]
			 >>= 8
			.table[] = tableEntry{offset:  + .cur}

			 =  - (.offset - .cur)
			if  < maxMatchOffset && uint32() == load3232(, .offset-.cur) {
				break
			}
			 = 
		}

		// A 4-byte match has been found. We'll later see if more than 4 bytes
		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
		// them as literal bytes.

		// Call emitCopy, and then see if another emitCopy could be our next
		// move. Repeat until we find no match for the input immediately after
		// what was consumed by the last emitCopy call.
		//
		// If we exit this loop normally then we need to call emitLiteral next,
		// though we don't yet know how big the literal will be. We handle that
		// by proceeding to the next iteration of the main loop. We also can
		// exit this loop via goto if we get close to exhausting the input.
		for {
			// Invariant: we have a 4-byte match at s, and no need to emit any
			// literal bytes prior to s.

			// Extend the 4-byte match as long as possible.
			 := .offset - .cur
			 := .matchlenLong(+4, +4, ) + 4

			// Extend backwards
			for  > 0 &&  >  && [-1] == [-1] {
				--
				--
				++
			}
			if  <  {
				if false {
					emitLiteral(, [:])
				} else {
					for ,  := range [:] {
						.tokens[.n] = token()
						.litHist[]++
						.n++
					}
				}
			}

			.AddMatchLong(, uint32(--baseMatchOffset))
			 += 
			 = 
			if  >=  {
				 =  + 1
			}

			if  >=  {
				// Index first pair after match end.
				if int(++8) < len() {
					 := load6432(, )
					.table[hashLen(, bTableBits, )] = tableEntry{offset:  + .cur}
				}
				goto 
			}

			// Store every second hash in-between, but offset by 1.
			for  :=  -  + 2;  < -5;  += 7 {
				 := load6432(, )
				 := hashLen(, bTableBits, )
				.table[] = tableEntry{offset: .cur + }
				// Skip one
				 >>= 16
				 = hashLen(, bTableBits, )
				.table[] = tableEntry{offset: .cur +  + 2}
				// Skip one
				 >>= 16
				 = hashLen(, bTableBits, )
				.table[] = tableEntry{offset: .cur +  + 4}
			}

			// We could immediately start working at s now, but to improve
			// compression we first update the hash table at s-2 to s. If
			// another emitCopy is not our next move, also calculate nextHash
			// at s+1. At least on GOARCH=amd64, these three hash calculations
			// are faster as one load64 call (with some shifts) instead of
			// three load32 calls.
			 := load6432(, -2)
			 := .cur +  - 2
			 := hashLen(, bTableBits, )
			 := hashLen(>>8, bTableBits, )
			.table[] = tableEntry{offset: }
			.table[] = tableEntry{offset:  + 1}
			 := hashLen(>>16, bTableBits, )
			 = .table[]
			.table[] = tableEntry{offset:  + 2}

			 :=  - (.offset - .cur)
			if  > maxMatchOffset || uint32(>>16) != load3232(, .offset-.cur) {
				 =  >> 24
				++
				break
			}
		}
	}

:
	if int() < len() {
		// If nothing was added, don't encode literals.
		if .n == 0 {
			return
		}

		emitLiteral(, [:])
	}
}