-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtokennexter.go
80 lines (75 loc) · 1.57 KB
/
tokennexter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
package lexer
import (
"errors"
"io"
"github.com/tekwizely/go-parsing/lexer/token"
)
// tokenNexter is the internal structure that backs the lexer's token.Nexter.
//
type tokenNexter struct {
lexer *Lexer
next token.Token
eof bool
}
// Next implements token.Nexter.Next().
// We build on the previous HasNext/Next impl to keep changes minimal.
//
func (t *tokenNexter) Next() (token.Token, error) {
if !t.hasNext() {
return nil, io.EOF
}
tok := t.next
t.next = nil
// Error?
//
if tok.Type() == TLexErr {
return nil, errors.New(tok.Value())
}
return tok, nil
}
// hasNext Initiates calls to lexer.Fn functions and is the primary entry point for retrieving tokens from the lexer.
//
func (t *tokenNexter) hasNext() bool {
// If token previously fetched, return now
//
if t.next != nil {
return true
}
// Nothing to do once EOF reached
//
if t.eof {
return false
}
// If no tokens available, try to fetch some.
//
for t.lexer.output.Len() == 0 {
// Anyone to call?
// Anything to scan?
//
if t.lexer.nextFn != nil && t.lexer.CanPeek(1) {
t.lexer.nextFn = t.lexer.nextFn(t.lexer)
} else
// Lexer Terminated or input at EOF, let's clean up.
// If EOF was never emitted, then emit it now.
//
if !t.lexer.eofOut {
t.lexer.EmitEOF()
}
}
// Consume the token.
// We'll either cache it or discard it.
//
tok := t.lexer.output.Remove(t.lexer.output.Front()).(*_token)
// Is the token EOF?
//
if tok.eof() {
// Mark EOF, discarding the token
//
t.eof = true
return false
}
// Store the token for pickup
//
t.next = tok
return true
}