diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-10-26 23:57:58 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-10-26 23:57:58 +0000 |
commit | fa5d125b5cfa5c935e46d27a2cbcd71ae37687ac (patch) | |
tree | 19d182df05ead7ff8ba7ee00a7d57555e1383fdf /libgo/go/html/token.go | |
parent | e3d46e67996cf20ca3a75fccbb5a0007bfa3f992 (diff) | |
download | gcc-fa5d125b5cfa5c935e46d27a2cbcd71ae37687ac.tar.gz |
Update Go library to last weekly.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@180552 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/go/html/token.go')
-rw-r--r-- | libgo/go/html/token.go | 707 |
1 files changed, 436 insertions, 271 deletions
diff --git a/libgo/go/html/token.go b/libgo/go/html/token.go index d266b3a300b..2826f95f17f 100644 --- a/libgo/go/html/token.go +++ b/libgo/go/html/token.go @@ -9,6 +9,7 @@ import ( "io" "os" "strconv" + "strings" ) // A TokenType is the type of a Token. @@ -100,13 +101,19 @@ func (t Token) String() string { case SelfClosingTagToken: return "<" + t.tagString() + "/>" case CommentToken: - return "<!--" + EscapeString(t.Data) + "-->" + return "<!--" + t.Data + "-->" case DoctypeToken: - return "<!DOCTYPE " + EscapeString(t.Data) + ">" + return "<!DOCTYPE " + t.Data + ">" } return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" } +// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, +// the end is exclusive. +type span struct { + start, end int +} + // A Tokenizer returns a stream of HTML Tokens. type Tokenizer struct { // If ReturnComments is set, Next returns comment tokens; @@ -115,7 +122,7 @@ type Tokenizer struct { // r is the source of the HTML text. r io.Reader - // tt is the TokenType of the most recently read token. + // tt is the TokenType of the current token. tt TokenType // err is the first error encountered during tokenization. It is possible // for tt != Error && err != nil to hold: this means that Next returned a @@ -125,10 +132,26 @@ type Tokenizer struct { // subsequent Next calls would return an ErrorToken. // err is never reset. Once it becomes non-nil, it stays non-nil. err os.Error - // buf[p0:p1] holds the raw data of the most recent token. - // buf[p1:] is buffered input that will yield future tokens. - p0, p1 int - buf []byte + // buf[raw.start:raw.end] holds the raw bytes of the current token. + // buf[raw.end:] is buffered input that will yield future tokens. + raw span + buf []byte + // buf[data.start:data.end] holds the raw bytes of the current token's data: + // a text token's text, a tag token's tag name, etc. + data span + // pendingAttr is the attribute key and value currently being tokenized. + // When complete, pendingAttr is pushed onto attr. nAttrReturned is + // incremented on each call to TagAttr. + pendingAttr [2]span + attr [][2]span + nAttrReturned int + // rawTag is the "script" in "</script>" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "<p>" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool } // Error returns the error associated with the most recent ErrorToken token. @@ -140,33 +163,42 @@ func (z *Tokenizer) Error() os.Error { return z.err } -// Raw returns the unmodified text of the current token. Calling Next, Token, -// Text, TagName or TagAttr may change the contents of the returned slice. -func (z *Tokenizer) Raw() []byte { - return z.buf[z.p0:z.p1] -} - // readByte returns the next byte from the input stream, doing a buffered read -// from z.r into z.buf if necessary. z.buf[z.p0:z.p1] remains a contiguous byte +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte // slice that holds all the bytes read so far for the current token. // It sets z.err if the underlying reader returns an error. // Pre-condition: z.err == nil. func (z *Tokenizer) readByte() byte { - if z.p1 >= len(z.buf) { + if z.raw.end >= len(z.buf) { // Our buffer is exhausted and we have to read from z.r. - // We copy z.buf[z.p0:z.p1] to the beginning of z.buf. If the length - // z.p1 - z.p0 is more than half the capacity of z.buf, then we + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we // allocate a new buffer before the copy. c := cap(z.buf) - d := z.p1 - z.p0 + d := z.raw.end - z.raw.start var buf1 []byte if 2*d > c { buf1 = make([]byte, d, 2*c) } else { buf1 = z.buf[:d] } - copy(buf1, z.buf[z.p0:z.p1]) - z.p0, z.p1, z.buf = 0, d, buf1[:d] + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] // Now that we have copied the live bytes to the start of the buffer, // we read from z.r into the remainder. n, err := z.r.Read(buf1[d:cap(buf1)]) @@ -176,297 +208,467 @@ func (z *Tokenizer) readByte() byte { } z.buf = buf1[:d+n] } - x := z.buf[z.p1] - z.p1++ + x := z.buf[z.raw.end] + z.raw.end++ return x } -// readTo keeps reading bytes until x is found or a read error occurs. If an -// error does occur, z.err is set to that error. -// Pre-condition: z.err == nil. -func (z *Tokenizer) readTo(x uint8) { +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } for { c := z.readByte() if z.err != nil { return } switch c { - case x: + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- return - case '\\': - z.readByte() + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + continue loop + } + for i := 0; i < len(z.rawTag); i++ { + c = z.readByte() if z.err != nil { - return + break loop + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + continue loop } } + c = z.readByte() + if z.err != nil { + break loop + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + break loop + case '<': + // Step back one, to catch "</foo</foo>". + z.raw.end-- + } } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" } -// nextComment reads the next token starting with "<!--". -// The opening "<!--" has already been consumed. -// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 4 <= z.p1. -func (z *Tokenizer) nextComment() { - // <!--> is a valid comment. +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() for dashCount := 2; ; { c := z.readByte() if z.err != nil { + z.data.end = z.raw.end return } switch c { case '-': dashCount++ + continue case '>': if dashCount >= 2 { - z.tt = CommentToken + z.data.end = z.raw.end - len("-->") return } - dashCount = 0 - default: - dashCount = 0 + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } + } } + dashCount = 0 } } -// nextMarkupDeclaration reads the next token starting with "<!". -// It might be a "<!--comment-->", a "<!DOCTYPE foo>", or "<!malformed text". -// The opening "<!" has already been consumed. -// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 2 <= z.p1. -func (z *Tokenizer) nextMarkupDeclaration() { +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", or "<!a bogus comment". The opening +// "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end var c [2]byte for i := 0; i < 2; i++ { c[i] = z.readByte() if z.err != nil { - return + z.data.end = z.raw.end + return CommentToken } } if c[0] == '-' && c[1] == '-' { - z.nextComment() - return + z.readComment() + return CommentToken } - z.p1 -= 2 - const s = "DOCTYPE " - for i := 0; ; i++ { + z.raw.end -= 2 + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { c := z.readByte() if z.err != nil { - return + z.data.end = z.raw.end + return CommentToken } - // Capitalize c. - if 'a' <= c && c <= 'z' { - c = 'A' + (c - 'a') - } - if i < len(s) && c != s[i] { - z.nextText() - return - } - if c == '>' { - if i >= len(s) { - z.tt = DoctypeToken - } - return + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + z.readUntilCloseAngle() + return CommentToken } } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return DoctypeToken + } + z.readUntilCloseAngle() + return DoctypeToken } -// nextTag reads the next token starting with "<". It might be a "<startTag>", -// an "</endTag>", a "<!markup declaration>", or "<malformed text". -// The opening "<" has already been consumed. -// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1. -func (z *Tokenizer) nextTag() { - c := z.readByte() - if z.err != nil { - return - } - switch { - case c == '/': - z.tt = EndTagToken - // Lower-cased characters are more common in tag names, so we check for them first. - case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': - z.tt = StartTagToken - case c == '!': - z.nextMarkupDeclaration() - return - case c == '?': - z.tt, z.err = ErrorToken, os.NewError("html: TODO: implement XML processing instructions") - return - default: - z.tt, z.err = ErrorToken, os.NewError("html: TODO: handle malformed tags") - return +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return ErrorToken } for { c := z.readByte() - if z.err != nil { - return + if z.err != nil || c == '>' { + break } - switch c { - case '"', '\'': - z.readTo(c) - if z.err != nil { - return - } - case '>': - if z.buf[z.p1-2] == '/' && z.tt == StartTagToken { - z.tt = SelfClosingTagToken + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if it has a non-empty key. + if z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } + // Any "<noembed>", "<noframes>", "<noscript>", "<script>", "<style>", + // "<textarea>" or "<title>" tag flags the tokenizer's next token as raw. + // The tag name lengths of these special cases ranges in [5, 8]. + if x := z.data.end - z.data.start; 5 <= x && x <= 8 { + switch z.buf[z.data.start] { + case 'n', 's', 't', 'N', 'S', 'T': + switch s := strings.ToLower(string(z.buf[z.data.start:z.data.end])); s { + case "noembed", "noframes", "noscript", "script", "style", "textarea", "title": + z.rawTag = s } + } + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readEndTag reads the next end tag token. The opening "</a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readEndTag() { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + z.readTagName() + for { + c := z.readByte() + if z.err != nil || c == '>' { return } } } -// nextText reads all text up until an '<'. -// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1. -func (z *Tokenizer) nextText() { +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 for { c := z.readByte() if z.err != nil { + z.data.end = z.raw.end return } - if c == '<' { - z.p1-- + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end return } } } -// Next scans the next token and returns its type. -func (z *Tokenizer) Next() TokenType { +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end for { - if z.err != nil { - z.tt = ErrorToken - return z.tt - } - z.p0 = z.p1 c := z.readByte() if z.err != nil { - z.tt = ErrorToken - return z.tt + z.pendingAttr[0].end = z.raw.end + return } - // We assume that the next token is text unless proven otherwise. - z.tt = TextToken - if c != '<' { - z.nextText() - } else { - z.nextTag() - if z.tt == CommentToken && !z.ReturnComments { - continue - } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=', '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return } - return z.tt } - panic("unreachable") } -// trim returns the largest j such that z.buf[i:j] contains only white space, -// or only white space plus the final ">" or "/>" of the raw data. -func (z *Tokenizer) trim(i int) int { - k := z.p1 - for ; i < k; i++ { - switch z.buf[i] { - case ' ', '\n', '\t', '\f': - continue - case '>': - if i == k-1 { - return k +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return } - case '/': - if i == k-2 { - return k + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return } } - return i - } - return k -} -// tagName finds the tag name at the start of z.buf[i:] and returns that name -// lower-cased, as well as the trimmed cursor location afterwards. -func (z *Tokenizer) tagName(i int) ([]byte, int) { - i0 := i -loop: - for ; i < z.p1; i++ { - c := z.buf[i] - switch c { - case ' ', '\n', '\t', '\f', '/', '>': - break loop - } - if 'A' <= c && c <= 'Z' { - z.buf[i] = c + 'a' - 'A' + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } } } - return z.buf[i0:i], z.trim(i) } -// unquotedAttrVal finds the unquoted attribute value at the start of z.buf[i:] -// and returns that value, as well as the trimmed cursor location afterwards. -func (z *Tokenizer) unquotedAttrVal(i int) ([]byte, int) { - i0 := i +// next scans the next token and returns its type. +func (z *Tokenizer) next() TokenType { + if z.err != nil { + return ErrorToken + } + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.rawTag != "" { + z.readRawOrRCDATA() + return TextToken + } + z.textIsRaw = false + loop: - for ; i < z.p1; i++ { - switch z.buf[i] { - case ' ', '\n', '\t', '\f', '>': + for { + c := z.readByte() + if z.err != nil { break loop - case '&': - // TODO: unescape the entity. } - } - return z.buf[i0:i], z.trim(i) -} - -// attrName finds the largest attribute name at the start -// of z.buf[i:] and returns it lower-cased, as well -// as the trimmed cursor location after that name. -// -// http://dev.w3.org/html5/spec/Overview.html#syntax-attribute-name -// TODO: unicode characters -func (z *Tokenizer) attrName(i int) ([]byte, int) { - for z.buf[i] == '/' { - i++ - if z.buf[i] == '>' { - return nil, z.trim(i) + if c != '<' { + continue loop } - } - i0 := i -loop: - for ; i < z.p1; i++ { - c := z.buf[i] - switch c { - case '>', '/', '=': + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { break loop } + var tokenType TokenType switch { - case 'A' <= c && c <= 'Z': - z.buf[i] = c + 'a' - 'A' - case c > ' ' && c < 0x7f: - // No-op. + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken default: - break loop + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + return TextToken + } + switch tokenType { + case StartTagToken: + return z.readStartTag() + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. + // Reset the tokenizer state and start again. + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + continue loop + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readEndTag() + return EndTagToken + } + z.raw.end-- + z.readUntilCloseAngle() + return CommentToken + case CommentToken: + if c == '!' { + return z.readMarkupDeclaration() + } + z.raw.end-- + z.readUntilCloseAngle() + return CommentToken + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + return TextToken + } + return ErrorToken +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + for { + z.tt = z.next() + // TODO: remove the ReturnComments option. A tokenizer should + // always return comment tags. + if z.tt == CommentToken && !z.ReturnComments { + continue } + return z.tt } - return z.buf[i0:i], z.trim(i) + panic("unreachable") +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] } // Text returns the unescaped text of a text, comment or doctype token. The // contents of the returned slice may change on the next call to Next. func (z *Tokenizer) Text() []byte { - var i0, i1 int switch z.tt { - case TextToken: - i0 = z.p0 - i1 = z.p1 - case CommentToken: - // Trim the "<!--" from the left and the "-->" from the right. - // "<!-->" is a valid comment, so the adjusted endpoints might overlap. - i0 = z.p0 + 4 - i1 = z.p1 - 3 - case DoctypeToken: - // Trim the "<!DOCTYPE " from the left and the ">" from the right. - i0 = z.p0 + 10 - i1 = z.p1 - 1 - default: - return nil - } - z.p0 = z.p1 - if i0 < i1 { - return unescape(z.buf[i0:i1]) + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + if !z.textIsRaw { + s = unescape(s) + } + return s } return nil } @@ -475,73 +677,33 @@ func (z *Tokenizer) Text() []byte { // `<IMG SRC="foo">`) and whether the tag has attributes. // The contents of the returned slice may change on the next call to Next. func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { - i := z.p0 + 1 - if i >= z.p1 { - z.p0 = z.p1 - return nil, false - } - if z.buf[i] == '/' { - i++ + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } } - name, z.p0 = z.tagName(i) - hasAttr = z.p0 != z.p1 - return + return nil, false } // TagAttr returns the lower-cased key and unescaped value of the next unparsed // attribute for the current tag token and whether there are more attributes. // The contents of the returned slices may change on the next call to Next. func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { - key, i := z.attrName(z.p0) - // Check for an empty attribute value. - if i == z.p1 { - z.p0 = i - return - } - // Get past the equals and quote characters. - if z.buf[i] != '=' { - z.p0, moreAttr = i, true - return - } - i = z.trim(i + 1) - if i == z.p1 { - z.p0 = i - return - } - closeQuote := z.buf[i] - if closeQuote != '\'' && closeQuote != '"' { - val, z.p0 = z.unquotedAttrVal(i) - moreAttr = z.p0 != z.p1 - return - } - i = z.trim(i + 1) - // Copy and unescape everything up to the closing quote. - dst, src := i, i -loop: - for src < z.p1 { - c := z.buf[src] - switch c { - case closeQuote: - src++ - break loop - case '&': - dst, src = unescapeEntity(z.buf, dst, src, true) - case '\\': - if src == z.p1 { - z.buf[dst] = '\\' - dst++ - } else { - z.buf[dst] = z.buf[src+1] - dst, src = dst+1, src+2 - } - default: - z.buf[dst] = c - dst, src = dst+1, src+1 + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(val), z.nAttrReturned < len(z.attr) } } - val, z.p0 = z.buf[i:dst], z.trim(src) - moreAttr = z.p0 != z.p1 - return + return nil, nil, false } // Token returns the next Token. The result's Data and Attr values remain valid @@ -551,7 +713,7 @@ func (z *Tokenizer) Token() Token { switch z.tt { case TextToken, CommentToken, DoctypeToken: t.Data = string(z.Text()) - case StartTagToken, EndTagToken, SelfClosingTagToken: + case StartTagToken, SelfClosingTagToken: var attr []Attribute name, moreAttr := z.TagName() for moreAttr { @@ -561,6 +723,9 @@ func (z *Tokenizer) Token() Token { } t.Data = string(name) t.Attr = attr + case EndTagToken: + name, _ := z.TagName() + t.Data = string(name) } return t } |