// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package html

import (
	
	
	
	
	

	
)

// A TokenType is the type of a Token.
type TokenType uint32

const (
	// ErrorToken means that an error occurred during tokenization.
	ErrorToken TokenType = iota
	// TextToken means a text node.
	TextToken
	// A StartTagToken looks like <a>.
	StartTagToken
	// An EndTagToken looks like </a>.
	EndTagToken
	// A SelfClosingTagToken tag looks like <br/>.
	SelfClosingTagToken
	// A CommentToken looks like <!--x-->.
	CommentToken
	// A DoctypeToken looks like <!DOCTYPE x>
	DoctypeToken
)

// ErrBufferExceeded means that the buffering limit was exceeded.
var ErrBufferExceeded = errors.New("max buffer exceeded")

// String returns a string representation of the TokenType.
func ( TokenType) () string {
	switch  {
	case ErrorToken:
		return "Error"
	case TextToken:
		return "Text"
	case StartTagToken:
		return "StartTag"
	case EndTagToken:
		return "EndTag"
	case SelfClosingTagToken:
		return "SelfClosingTag"
	case CommentToken:
		return "Comment"
	case DoctypeToken:
		return "Doctype"
	}
	return "Invalid(" + strconv.Itoa(int()) + ")"
}

// An Attribute is an attribute namespace-key-value triple. Namespace is
// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
// does not contain escapable characters like '&', '<' or '>'), and Val is
// unescaped (it looks like "a<b" rather than "a&lt;b").
//
// Namespace is only used by the parser, not the tokenizer.
type Attribute struct {
	Namespace, Key, Val string
}

// A Token consists of a TokenType and some Data (tag name for start and end
// tags, content for text, comments and doctypes). A tag Token may also contain
// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
// zero if Data is not a known tag name.
type Token struct {
	Type     TokenType
	DataAtom atom.Atom
	Data     string
	Attr     []Attribute
}

// tagString returns a string representation of a tag Token's Data and Attr.
func ( Token) () string {
	if len(.Attr) == 0 {
		return .Data
	}
	 := bytes.NewBufferString(.Data)
	for ,  := range .Attr {
		.WriteByte(' ')
		.WriteString(.Key)
		.WriteString(`="`)
		escape(, .Val)
		.WriteByte('"')
	}
	return .String()
}

// String returns a string representation of the Token.
func ( Token) () string {
	switch .Type {
	case ErrorToken:
		return ""
	case TextToken:
		return EscapeString(.Data)
	case StartTagToken:
		return "<" + .tagString() + ">"
	case EndTagToken:
		return "</" + .tagString() + ">"
	case SelfClosingTagToken:
		return "<" + .tagString() + "/>"
	case CommentToken:
		return "<!--" + escapeCommentString(.Data) + "-->"
	case DoctypeToken:
		return "<!DOCTYPE " + EscapeString(.Data) + ">"
	}
	return "Invalid(" + strconv.Itoa(int(.Type)) + ")"
}

// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
// the end is exclusive.
type span struct {
	start, end int
}

// A Tokenizer returns a stream of HTML Tokens.
type Tokenizer struct {
	// r is the source of the HTML text.
	r io.Reader
	// tt is the TokenType of the current token.
	tt TokenType
	// err is the first error encountered during tokenization. It is possible
	// for tt != Error && err != nil to hold: this means that Next returned a
	// valid token but the subsequent Next call will return an error token.
	// For example, if the HTML text input was just "plain", then the first
	// Next call would set z.err to io.EOF but return a TextToken, and all
	// subsequent Next calls would return an ErrorToken.
	// err is never reset. Once it becomes non-nil, it stays non-nil.
	err error
	// readErr is the error returned by the io.Reader r. It is separate from
	// err because it is valid for an io.Reader to return (n int, err1 error)
	// such that n > 0 && err1 != nil, and callers should always process the
	// n > 0 bytes before considering the error err1.
	readErr error
	// buf[raw.start:raw.end] holds the raw bytes of the current token.
	// buf[raw.end:] is buffered input that will yield future tokens.
	raw span
	buf []byte
	// maxBuf limits the data buffered in buf. A value of 0 means unlimited.
	maxBuf int
	// buf[data.start:data.end] holds the raw bytes of the current token's data:
	// a text token's text, a tag token's tag name, etc.
	data span
	// pendingAttr is the attribute key and value currently being tokenized.
	// When complete, pendingAttr is pushed onto attr. nAttrReturned is
	// incremented on each call to TagAttr.
	pendingAttr   [2]span
	attr          [][2]span
	nAttrReturned int
	// rawTag is the "script" in "</script>" that closes the next token. If
	// non-empty, the subsequent call to Next will return a raw or RCDATA text
	// token: one that treats "<p>" as text instead of an element.
	// rawTag's contents are lower-cased.
	rawTag string
	// textIsRaw is whether the current text token's data is not escaped.
	textIsRaw bool
	// convertNUL is whether NUL bytes in the current token's data should
	// be converted into \ufffd replacement characters.
	convertNUL bool
	// allowCDATA is whether CDATA sections are allowed in the current context.
	allowCDATA bool
}

// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
// the text "foo". The default value is false, which means to recognize it as
// a bogus comment "<!-- [CDATA[foo]] -->" instead.
//
// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
// only if tokenizing foreign content, such as MathML and SVG. However,
// tracking foreign-contentness is difficult to do purely in the tokenizer,
// as opposed to the parser, due to HTML integration points: an <svg> element
// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
// In practice, if using the tokenizer without caring whether MathML or SVG
// CDATA is text or comments, such as tokenizing HTML to find all the anchor
// text, it is acceptable to ignore this responsibility.
func ( *Tokenizer) ( bool) {
	.allowCDATA = 
}

// NextIsNotRawText instructs the tokenizer that the next token should not be
// considered as 'raw text'. Some elements, such as script and title elements,
// normally require the next token after the opening tag to be 'raw text' that
// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
// an end tag token for "</title>". There are no distinct start tag or end tag
// tokens for the "<b>" and "</b>".
//
// This tokenizer implementation will generally look for raw text at the right
// times. Strictly speaking, an HTML5 compliant tokenizer should not look for
// raw text if in foreign content: <title> generally needs raw text, but a
// <title> inside an <svg> does not. Another example is that a <textarea>
// generally needs raw text, but a <textarea> is not allowed as an immediate
// child of a <select>; in normal parsing, a <textarea> implies </select>, but
// one cannot close the implicit element when parsing a <select>'s InnerHTML.
// Similarly to AllowCDATA, tracking the correct moment to override raw-text-
// ness is difficult to do purely in the tokenizer, as opposed to the parser.
// For strict compliance with the HTML5 tokenization algorithm, it is the
// responsibility of the user of a tokenizer to call NextIsNotRawText as
// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
// responsibility for basic usage.
//
// Note that this 'raw text' concept is different from the one offered by the
// Tokenizer.Raw method.
func ( *Tokenizer) () {
	.rawTag = ""
}

// Err returns the error associated with the most recent ErrorToken token.
// This is typically io.EOF, meaning the end of tokenization.
func ( *Tokenizer) () error {
	if .tt != ErrorToken {
		return nil
	}
	return .err
}

// readByte returns the next byte from the input stream, doing a buffered read
// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
// slice that holds all the bytes read so far for the current token.
// It sets z.err if the underlying reader returns an error.
// Pre-condition: z.err == nil.
func ( *Tokenizer) () byte {
	if .raw.end >= len(.buf) {
		// Our buffer is exhausted and we have to read from z.r. Check if the
		// previous read resulted in an error.
		if .readErr != nil {
			.err = .readErr
			return 0
		}
		// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
		// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
		// allocate a new buffer before the copy.
		 := cap(.buf)
		 := .raw.end - .raw.start
		var  []byte
		if 2* >  {
			 = make([]byte, , 2*)
		} else {
			 = .buf[:]
		}
		copy(, .buf[.raw.start:.raw.end])
		if  := .raw.start;  != 0 {
			// Adjust the data/attr spans to refer to the same contents after the copy.
			.data.start -= 
			.data.end -= 
			.pendingAttr[0].start -= 
			.pendingAttr[0].end -= 
			.pendingAttr[1].start -= 
			.pendingAttr[1].end -= 
			for  := range .attr {
				.attr[][0].start -= 
				.attr[][0].end -= 
				.attr[][1].start -= 
				.attr[][1].end -= 
			}
		}
		.raw.start, .raw.end, .buf = 0, , [:]
		// Now that we have copied the live bytes to the start of the buffer,
		// we read from z.r into the remainder.
		var  int
		, .readErr = readAtLeastOneByte(.r, [:cap()])
		if  == 0 {
			.err = .readErr
			return 0
		}
		.buf = [:+]
	}
	 := .buf[.raw.end]
	.raw.end++
	if .maxBuf > 0 && .raw.end-.raw.start >= .maxBuf {
		.err = ErrBufferExceeded
		return 0
	}
	return 
}

// Buffered returns a slice containing data buffered but not yet tokenized.
func ( *Tokenizer) () []byte {
	return .buf[.raw.end:]
}

// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
// too many times in succession.
func readAtLeastOneByte( io.Reader,  []byte) (int, error) {
	for  := 0;  < 100; ++ {
		if ,  := .Read();  != 0 ||  != nil {
			return , 
		}
	}
	return 0, io.ErrNoProgress
}

// skipWhiteSpace skips past any white space.
func ( *Tokenizer) () {
	if .err != nil {
		return
	}
	for {
		 := .readByte()
		if .err != nil {
			return
		}
		switch  {
		case ' ', '\n', '\r', '\t', '\f':
			// No-op.
		default:
			.raw.end--
			return
		}
	}
}

// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
// is typically something like "script" or "textarea".
func ( *Tokenizer) () {
	if .rawTag == "script" {
		.readScript()
		.textIsRaw = true
		.rawTag = ""
		return
	}
:
	for {
		 := .readByte()
		if .err != nil {
			break 
		}
		if  != '<' {
			continue 
		}
		 = .readByte()
		if .err != nil {
			break 
		}
		if  != '/' {
			.raw.end--
			continue 
		}
		if .readRawEndTag() || .err != nil {
			break 
		}
	}
	.data.end = .raw.end
	// A textarea's or title's RCDATA can contain escaped entities.
	.textIsRaw = .rawTag != "textarea" && .rawTag != "title"
	.rawTag = ""
}

// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
// If it succeeds, it backs up the input position to reconsume the tag and
// returns true. Otherwise it returns false. The opening "</" has already been
// consumed.
func ( *Tokenizer) () bool {
	for  := 0;  < len(.rawTag); ++ {
		 := .readByte()
		if .err != nil {
			return false
		}
		if  != .rawTag[] &&  != .rawTag[]-('a'-'A') {
			.raw.end--
			return false
		}
	}
	 := .readByte()
	if .err != nil {
		return false
	}
	switch  {
	case ' ', '\n', '\r', '\t', '\f', '/', '>':
		// The 3 is 2 for the leading "</" plus 1 for the trailing character c.
		.raw.end -= 3 + len(.rawTag)
		return true
	}
	.raw.end--
	return false
}

// readScript reads until the next </script> tag, following the byzantine
// rules for escaping/hiding the closing tag.
func ( *Tokenizer) () {
	defer func() {
		.data.end = .raw.end
	}()
	var  byte

:
	 = .readByte()
	if .err != nil {
		return
	}
	if  == '<' {
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '/':
		goto 
	case '!':
		goto 
	}
	.raw.end--
	goto 

:
	if .readRawEndTag() || .err != nil {
		return
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	if  == '-' {
		goto 
	}
	.raw.end--
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	if  == '-' {
		goto 
	}
	.raw.end--
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	case '>':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	if  == '/' {
		goto 
	}
	if 'a' <=  &&  <= 'z' || 'A' <=  &&  <= 'Z' {
		goto 
	}
	.raw.end--
	goto 

:
	if .readRawEndTag() || .err != nil {
		return
	}
	goto 

:
	.raw.end--
	for  := 0;  < len("script"); ++ {
		 = .readByte()
		if .err != nil {
			return
		}
		if  != "script"[] &&  != "SCRIPT"[] {
			.raw.end--
			goto 
		}
	}
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case ' ', '\n', '\r', '\t', '\f', '/', '>':
		goto 
	}
	.raw.end--
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '-':
		goto 
	case '<':
		goto 
	case '>':
		goto 
	}
	goto 

:
	 = .readByte()
	if .err != nil {
		return
	}
	if  == '/' {
		goto 
	}
	.raw.end--
	goto 

:
	if .readRawEndTag() {
		.raw.end += len("</script>")
		goto 
	}
	if .err != nil {
		return
	}
	goto 
}

// readComment reads the next comment token starting with "<!--". The opening
// "<!--" has already been consumed.
func ( *Tokenizer) () {
	// When modifying this function, consider manually increasing the
	// maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more.
	// That increase should only be temporary, not committed, as it
	// exponentially affects the test running time.

	.data.start = .raw.end
	defer func() {
		if .data.end < .data.start {
			// It's a comment with no data, like <!-->.
			.data.end = .data.start
		}
	}()

	var  int
	 := true
	for {
		 := .readByte()
		if .err != nil {
			.data.end = .calculateAbruptCommentDataEnd()
			return
		}
		switch  {
		case '-':
			++
			continue
		case '>':
			if  >= 2 ||  {
				.data.end = .raw.end - len("-->")
				return
			}
		case '!':
			if  >= 2 {
				 = .readByte()
				if .err != nil {
					.data.end = .calculateAbruptCommentDataEnd()
					return
				} else if  == '>' {
					.data.end = .raw.end - len("--!>")
					return
				} else if  == '-' {
					 = 1
					 = false
					continue
				}
			}
		}
		 = 0
		 = false
	}
}

func ( *Tokenizer) () int {
	 := .Raw()
	const  = len("<!--")
	if len() >=  {
		 = [:]
		if hasSuffix(, "--!") {
			return .raw.end - 3
		} else if hasSuffix(, "--") {
			return .raw.end - 2
		} else if hasSuffix(, "-") {
			return .raw.end - 1
		}
	}
	return .raw.end
}

func hasSuffix( []byte,  string) bool {
	if len() < len() {
		return false
	}
	 = [len()-len():]
	for  := range  {
		if [] != [] {
			return false
		}
	}
	return true
}

// readUntilCloseAngle reads until the next ">".
func ( *Tokenizer) () {
	.data.start = .raw.end
	for {
		 := .readByte()
		if .err != nil {
			.data.end = .raw.end
			return
		}
		if  == '>' {
			.data.end = .raw.end - len(">")
			return
		}
	}
}

// readMarkupDeclaration reads the next token starting with "<!". It might be
// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
// "<!a bogus comment". The opening "<!" has already been consumed.
func ( *Tokenizer) () TokenType {
	.data.start = .raw.end
	var  [2]byte
	for  := 0;  < 2; ++ {
		[] = .readByte()
		if .err != nil {
			.data.end = .raw.end
			return CommentToken
		}
	}
	if [0] == '-' && [1] == '-' {
		.readComment()
		return CommentToken
	}
	.raw.end -= 2
	if .readDoctype() {
		return DoctypeToken
	}
	if .allowCDATA && .readCDATA() {
		.convertNUL = true
		return TextToken
	}
	// It's a bogus comment.
	.readUntilCloseAngle()
	return CommentToken
}

// readDoctype attempts to read a doctype declaration and returns true if
// successful. The opening "<!" has already been consumed.
func ( *Tokenizer) () bool {
	const  = "DOCTYPE"
	for  := 0;  < len(); ++ {
		 := .readByte()
		if .err != nil {
			.data.end = .raw.end
			return false
		}
		if  != [] &&  != []+('a'-'A') {
			// Back up to read the fragment of "DOCTYPE" again.
			.raw.end = .data.start
			return false
		}
	}
	if .skipWhiteSpace(); .err != nil {
		.data.start = .raw.end
		.data.end = .raw.end
		return true
	}
	.readUntilCloseAngle()
	return true
}

// readCDATA attempts to read a CDATA section and returns true if
// successful. The opening "<!" has already been consumed.
func ( *Tokenizer) () bool {
	const  = "[CDATA["
	for  := 0;  < len(); ++ {
		 := .readByte()
		if .err != nil {
			.data.end = .raw.end
			return false
		}
		if  != [] {
			// Back up to read the fragment of "[CDATA[" again.
			.raw.end = .data.start
			return false
		}
	}
	.data.start = .raw.end
	 := 0
	for {
		 := .readByte()
		if .err != nil {
			.data.end = .raw.end
			return true
		}
		switch  {
		case ']':
			++
		case '>':
			if  >= 2 {
				.data.end = .raw.end - len("]]>")
				return true
			}
			 = 0
		default:
			 = 0
		}
	}
}

// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
// case-insensitively matches any element of ss.
func ( *Tokenizer) ( ...string) bool {
:
	for ,  := range  {
		if .data.end-.data.start != len() {
			continue 
		}
		for  := 0;  < len(); ++ {
			 := .buf[.data.start+]
			if 'A' <=  &&  <= 'Z' {
				 += 'a' - 'A'
			}
			if  != [] {
				continue 
			}
		}
		return true
	}
	return false
}

// readStartTag reads the next start tag token. The opening "<a" has already
// been consumed, where 'a' means anything in [A-Za-z].
func ( *Tokenizer) () TokenType {
	.readTag(true)
	if .err != nil {
		return ErrorToken
	}
	// Several tags flag the tokenizer's next token as raw.
	,  := .buf[.data.start], false
	if 'A' <=  &&  <= 'Z' {
		 += 'a' - 'A'
	}
	switch  {
	case 'i':
		 = .startTagIn("iframe")
	case 'n':
		 = .startTagIn("noembed", "noframes", "noscript")
	case 'p':
		 = .startTagIn("plaintext")
	case 's':
		 = .startTagIn("script", "style")
	case 't':
		 = .startTagIn("textarea", "title")
	case 'x':
		 = .startTagIn("xmp")
	}
	if  {
		.rawTag = strings.ToLower(string(.buf[.data.start:.data.end]))
	}
	// Look for a self-closing token like "<br/>".
	if .err == nil && .buf[.raw.end-2] == '/' {
		return SelfClosingTagToken
	}
	return StartTagToken
}

// readTag reads the next tag token and its attributes. If saveAttr, those
// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
// The opening "<a" or "</a" has already been consumed, where 'a' means anything
// in [A-Za-z].
func ( *Tokenizer) ( bool) {
	.attr = .attr[:0]
	.nAttrReturned = 0
	// Read the tag name and attribute key/value pairs.
	.readTagName()
	if .skipWhiteSpace(); .err != nil {
		return
	}
	for {
		 := .readByte()
		if .err != nil ||  == '>' {
			break
		}
		.raw.end--
		.readTagAttrKey()
		.readTagAttrVal()
		// Save pendingAttr if saveAttr and that attribute has a non-empty key.
		if  && .pendingAttr[0].start != .pendingAttr[0].end {
			.attr = append(.attr, .pendingAttr)
		}
		if .skipWhiteSpace(); .err != nil {
			break
		}
	}
}

// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
// is positioned such that the first byte of the tag name (the "d" in "<div")
// has already been consumed.
func ( *Tokenizer) () {
	.data.start = .raw.end - 1
	for {
		 := .readByte()
		if .err != nil {
			.data.end = .raw.end
			return
		}
		switch  {
		case ' ', '\n', '\r', '\t', '\f':
			.data.end = .raw.end - 1
			return
		case '/', '>':
			.raw.end--
			.data.end = .raw.end
			return
		}
	}
}

// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
// Precondition: z.err == nil.
func ( *Tokenizer) () {
	.pendingAttr[0].start = .raw.end
	for {
		 := .readByte()
		if .err != nil {
			.pendingAttr[0].end = .raw.end
			return
		}
		switch  {
		case ' ', '\n', '\r', '\t', '\f', '/':
			.pendingAttr[0].end = .raw.end - 1
			return
		case '=':
			if .pendingAttr[0].start+1 == .raw.end {
				// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
				// begins, we treat it as a character in the attribute name and continue.
				continue
			}
			fallthrough
		case '>':
			.raw.end--
			.pendingAttr[0].end = .raw.end
			return
		}
	}
}

// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
func ( *Tokenizer) () {
	.pendingAttr[1].start = .raw.end
	.pendingAttr[1].end = .raw.end
	if .skipWhiteSpace(); .err != nil {
		return
	}
	 := .readByte()
	if .err != nil {
		return
	}
	if  != '=' {
		.raw.end--
		return
	}
	if .skipWhiteSpace(); .err != nil {
		return
	}
	 := .readByte()
	if .err != nil {
		return
	}
	switch  {
	case '>':
		.raw.end--
		return

	case '\'', '"':
		.pendingAttr[1].start = .raw.end
		for {
			 := .readByte()
			if .err != nil {
				.pendingAttr[1].end = .raw.end
				return
			}
			if  ==  {
				.pendingAttr[1].end = .raw.end - 1
				return
			}
		}

	default:
		.pendingAttr[1].start = .raw.end - 1
		for {
			 := .readByte()
			if .err != nil {
				.pendingAttr[1].end = .raw.end
				return
			}
			switch  {
			case ' ', '\n', '\r', '\t', '\f':
				.pendingAttr[1].end = .raw.end - 1
				return
			case '>':
				.raw.end--
				.pendingAttr[1].end = .raw.end
				return
			}
		}
	}
}

// Next scans the next token and returns its type.
func ( *Tokenizer) () TokenType {
	.raw.start = .raw.end
	.data.start = .raw.end
	.data.end = .raw.end
	if .err != nil {
		.tt = ErrorToken
		return .tt
	}
	if .rawTag != "" {
		if .rawTag == "plaintext" {
			// Read everything up to EOF.
			for .err == nil {
				.readByte()
			}
			.data.end = .raw.end
			.textIsRaw = true
		} else {
			.readRawOrRCDATA()
		}
		if .data.end > .data.start {
			.tt = TextToken
			.convertNUL = true
			return .tt
		}
	}
	.textIsRaw = false
	.convertNUL = false

:
	for {
		 := .readByte()
		if .err != nil {
			break 
		}
		if  != '<' {
			continue 
		}

		// Check if the '<' we have just read is part of a tag, comment
		// or doctype. If not, it's part of the accumulated text token.
		 = .readByte()
		if .err != nil {
			break 
		}
		var  TokenType
		switch {
		case 'a' <=  &&  <= 'z' || 'A' <=  &&  <= 'Z':
			 = StartTagToken
		case  == '/':
			 = EndTagToken
		case  == '!' ||  == '?':
			// We use CommentToken to mean any of "<!--actual comments-->",
			// "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
			 = CommentToken
		default:
			// Reconsume the current character.
			.raw.end--
			continue
		}

		// We have a non-text token, but we might have accumulated some text
		// before that. If so, we return the text first, and return the non-
		// text token on the subsequent call to Next.
		if  := .raw.end - len("<a"); .raw.start <  {
			.raw.end = 
			.data.end = 
			.tt = TextToken
			return .tt
		}
		switch  {
		case StartTagToken:
			.tt = .readStartTag()
			return .tt
		case EndTagToken:
			 = .readByte()
			if .err != nil {
				break 
			}
			if  == '>' {
				// "</>" does not generate a token at all. Generate an empty comment
				// to allow passthrough clients to pick up the data using Raw.
				// Reset the tokenizer state and start again.
				.tt = CommentToken
				return .tt
			}
			if 'a' <=  &&  <= 'z' || 'A' <=  &&  <= 'Z' {
				.readTag(false)
				if .err != nil {
					.tt = ErrorToken
				} else {
					.tt = EndTagToken
				}
				return .tt
			}
			.raw.end--
			.readUntilCloseAngle()
			.tt = CommentToken
			return .tt
		case CommentToken:
			if  == '!' {
				.tt = .readMarkupDeclaration()
				return .tt
			}
			.raw.end--
			.readUntilCloseAngle()
			.tt = CommentToken
			return .tt
		}
	}
	if .raw.start < .raw.end {
		.data.end = .raw.end
		.tt = TextToken
		return .tt
	}
	.tt = ErrorToken
	return .tt
}

// Raw returns the unmodified text of the current token. Calling Next, Token,
// Text, TagName or TagAttr may change the contents of the returned slice.
//
// The token stream's raw bytes partition the byte stream (up until an
// ErrorToken). There are no overlaps or gaps between two consecutive token's
// raw bytes. One implication is that the byte offset of the current token is
// the sum of the lengths of all previous tokens' raw bytes.
func ( *Tokenizer) () []byte {
	return .buf[.raw.start:.raw.end]
}

// convertNewlines converts "\r" and "\r\n" in s to "\n".
// The conversion happens in place, but the resulting slice may be shorter.
func convertNewlines( []byte) []byte {
	for ,  := range  {
		if  != '\r' {
			continue
		}

		 :=  + 1
		if  >= len() || [] != '\n' {
			[] = '\n'
			continue
		}

		 := 
		for  < len() {
			if [] == '\r' {
				if +1 < len() && [+1] == '\n' {
					++
				}
				[] = '\n'
			} else {
				[] = []
			}
			++
			++
		}
		return [:]
	}
	return 
}

var (
	nul         = []byte("\x00")
	replacement = []byte("\ufffd")
)

// Text returns the unescaped text of a text, comment or doctype token. The
// contents of the returned slice may change on the next call to Next.
func ( *Tokenizer) () []byte {
	switch .tt {
	case TextToken, CommentToken, DoctypeToken:
		 := .buf[.data.start:.data.end]
		.data.start = .raw.end
		.data.end = .raw.end
		 = convertNewlines()
		if (.convertNUL || .tt == CommentToken) && bytes.Contains(, nul) {
			 = bytes.Replace(, nul, replacement, -1)
		}
		if !.textIsRaw {
			 = unescape(, false)
		}
		return 
	}
	return nil
}

// TagName returns the lower-cased name of a tag token (the `img` out of
// `<IMG SRC="foo">`) and whether the tag has attributes.
// The contents of the returned slice may change on the next call to Next.
func ( *Tokenizer) () ( []byte,  bool) {
	if .data.start < .data.end {
		switch .tt {
		case StartTagToken, EndTagToken, SelfClosingTagToken:
			 := .buf[.data.start:.data.end]
			.data.start = .raw.end
			.data.end = .raw.end
			return lower(), .nAttrReturned < len(.attr)
		}
	}
	return nil, false
}

// TagAttr returns the lower-cased key and unescaped value of the next unparsed
// attribute for the current tag token and whether there are more attributes.
// The contents of the returned slices may change on the next call to Next.
func ( *Tokenizer) () (,  []byte,  bool) {
	if .nAttrReturned < len(.attr) {
		switch .tt {
		case StartTagToken, SelfClosingTagToken:
			 := .attr[.nAttrReturned]
			.nAttrReturned++
			 = .buf[[0].start:[0].end]
			 = .buf[[1].start:[1].end]
			return lower(), unescape(convertNewlines(), true), .nAttrReturned < len(.attr)
		}
	}
	return nil, nil, false
}

// Token returns the current Token. The result's Data and Attr values remain
// valid after subsequent Next calls.
func ( *Tokenizer) () Token {
	 := Token{Type: .tt}
	switch .tt {
	case TextToken, CommentToken, DoctypeToken:
		.Data = string(.Text())
	case StartTagToken, SelfClosingTagToken, EndTagToken:
		,  := .TagName()
		for  {
			var ,  []byte
			, ,  = .TagAttr()
			.Attr = append(.Attr, Attribute{"", atom.String(), string()})
		}
		if  := atom.Lookup();  != 0 {
			.DataAtom, .Data = , .String()
		} else {
			.DataAtom, .Data = 0, string()
		}
	}
	return 
}

// SetMaxBuf sets a limit on the amount of data buffered during tokenization.
// A value of 0 means unlimited.
func ( *Tokenizer) ( int) {
	.maxBuf = 
}

// NewTokenizer returns a new HTML Tokenizer for the given Reader.
// The input is assumed to be UTF-8 encoded.
func ( io.Reader) *Tokenizer {
	return NewTokenizerFragment(, "")
}

// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
// tokenizing an existing element's InnerHTML fragment. contextTag is that
// element's tag, such as "div" or "iframe".
//
// For example, how the InnerHTML "a<b" is tokenized depends on whether it is
// for a <p> tag or a <script> tag.
//
// The input is assumed to be UTF-8 encoded.
func ( io.Reader,  string) *Tokenizer {
	 := &Tokenizer{
		r:   ,
		buf: make([]byte, 0, 4096),
	}
	if  != "" {
		switch  := strings.ToLower();  {
		case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
			.rawTag = 
		}
	}
	return 
}