|
1 | 1 | use regexp::Regexp;
|
2 |
| -use regexp::NFA; |
| 2 | +use std::vec::Vec; |
| 3 | +use std::slice::CloneableVector; |
3 | 4 |
|
4 | 5 | /// A single regexp for a token.
|
| 6 | +#[deriving(Clone)] |
5 | 7 | struct LexerRule {
|
6 |
| - matcher: NFA, |
| 8 | + matcher: Regexp, |
7 | 9 | token: Token,
|
8 | 10 | }
|
9 | 11 |
|
| 12 | +#[deriving(Eq, Clone)] |
10 | 13 | pub enum Token {
|
11 |
| - WhiteSpace, |
| 14 | + // Whitespace |
| 15 | + WS, |
| 16 | + |
| 17 | + // Reserved words |
| 18 | + Let, |
| 19 | + |
| 20 | + // Symbols |
| 21 | + LParen, |
| 22 | + RParen, |
| 23 | + LBrace, |
| 24 | + RBrace, |
| 25 | + LBracket, |
| 26 | + RBracket, |
| 27 | + Less, |
| 28 | + Greater, |
| 29 | + And, |
| 30 | + Or, |
| 31 | + Xor, |
| 32 | + AndAnd, |
| 33 | + OrOr, |
| 34 | + Add, |
| 35 | + Sub, |
| 36 | + Mul, |
| 37 | + Div, |
| 38 | + Lsh, |
| 39 | + Rsh, |
| 40 | + Colon, |
| 41 | + Semi, |
| 42 | + Eq, |
| 43 | + Bang, |
| 44 | + |
| 45 | + // Literals |
12 | 46 | Ident,
|
13 | 47 | Number,
|
14 | 48 | HexNumber,
|
15 | 49 | String,
|
16 |
| - Let, |
17 |
| - LogicalAnd, |
18 |
| - LogicalOr, |
19 |
| - Character(char), |
20 | 50 | }
|
21 | 51 |
|
22 | 52 | pub struct Lexer {
|
23 | 53 | rules: ~[LexerRule],
|
24 | 54 | }
|
25 | 55 |
|
26 | 56 | impl Lexer {
|
27 |
| - fn add_rule(&mut self, regexp: &str, token: Token) { |
28 |
| - let nfa = Regexp::parse(regexp).compile(); |
29 |
| - self.rules.push(LexerRule { |
30 |
| - matcher: nfa, |
31 |
| - token: token, |
32 |
| - } |
33 |
| - ) |
34 |
| - } |
35 |
| - |
36 |
| - fn add_char_rule(&mut self, c: char) { |
37 |
| - if c == '@' || c == '#' || c == 'a' || c == ' ' { |
38 |
| - self.add_rule(format!("{:c}", c), Character(c)); |
39 |
| - } else { |
40 |
| - self.add_rule(format!("\\\\{:c}", c), Character(c)); |
41 |
| - } |
42 |
| - } |
43 |
| - |
44 |
| - fn add_char_rules(&mut self, s: &str) { |
45 |
| - for c in s.chars() { |
46 |
| - self.add_char_rule(c); |
| 57 | + pub fn new() -> Lexer { |
| 58 | + macro_rules! lexer { |
| 59 | + ( $( $t:expr => $r:expr ),*) => ( |
| 60 | + Lexer { rules: ~[ $( LexerRule { matcher: regexp!(concat!("^(?:", $r, ")")), token: $t } ),* ] } |
| 61 | + ) |
47 | 62 | }
|
48 |
| - } |
49 | 63 |
|
50 |
| - pub fn new() -> Lexer { |
51 |
| - let mut l: Lexer = Lexer { rules: ~[] }; |
52 | 64 | // Note: rules are in decreasing order of priority if there's a
|
53 | 65 | // conflict. In particular, reserved words must go before Ident.
|
| 66 | + lexer! { |
| 67 | + // Whitespace |
| 68 | + WS => r"\s|//.*|(?s)/\*.*\*/", |
| 69 | + |
| 70 | + // Reserved words |
| 71 | + Let => r"let", |
54 | 72 |
|
55 |
| - l.add_rule("let", Let); |
56 |
| - l.add_rule("*(\\ )", WhiteSpace); |
57 |
| - l.add_rule("\\a*(\\@)", Ident); |
58 |
| - l.add_rule("\\#*(\\#)", Number); |
59 |
| - l.add_rule("0x|(\\#,a,b,c,d,e,f,A,B,C,D,E,F)"+ |
60 |
| - "*(|(\\#,a,b,c,d,e,f,A,B,C,D,E,F))", HexNumber); |
61 |
| - // TODO: this needs to be improved. |
62 |
| - l.add_rule("\"*(|(\\@,\\ ,\\\\\"))\"", String); |
63 |
| - // TODO: this too. |
64 |
| - l.add_rule("/\\**(|(\\@,\\ ,\\\\\"))\\*/", WhiteSpace); |
65 |
| - l.add_rule("&&", LogicalAnd); |
66 |
| - l.add_rule("\\|\\|", LogicalOr); |
67 |
| - |
68 |
| - // All individual characters that are valid on their own as tokens. |
69 |
| - l.add_char_rules("()+-*/;:=!%^&|"); |
70 |
| - |
71 |
| - l |
| 73 | + // Symbols |
| 74 | + LParen => r"\(", |
| 75 | + RParen => r"\)", |
| 76 | + LBrace => r"\{", |
| 77 | + RBrace => r"\}", |
| 78 | + LBracket => r"\[", |
| 79 | + RBracket => r"\]", |
| 80 | + Less => r"<", |
| 81 | + Greater => r">", |
| 82 | + And => r"&", |
| 83 | + Or => r"\|", |
| 84 | + Xor => r"\^", |
| 85 | + AndAnd => r"&&", |
| 86 | + OrOr => r"\|\|", |
| 87 | + Add => r"\+", |
| 88 | + Sub => r"-", |
| 89 | + Mul => r"\*", |
| 90 | + Div => r"/", |
| 91 | + Lsh => r"<<", |
| 92 | + Rsh => r">>", |
| 93 | + Colon => r":", |
| 94 | + Semi => r";", |
| 95 | + Eq => r"=", |
| 96 | + Bang => r"!", |
| 97 | + |
| 98 | + // Literals |
| 99 | + Ident => r"[a-zA-Z_]\w*", |
| 100 | + Number => r"\d", |
| 101 | + HexNumber => r"0[xX][:xdigit:]+", |
| 102 | + String => r#""(?:\\"|[^"])*""# |
| 103 | + } |
72 | 104 | }
|
73 | 105 |
|
74 | 106 | pub fn tokenize(&self, s: &str) -> (~[(Token, ~str)]) {
|
75 | 107 | let mut pos = 0u;
|
76 |
| - let mut result = ~[]; |
| 108 | + let mut result = vec!(); |
77 | 109 | while pos < s.len() {
|
78 | 110 | let mut longest = 0u;
|
79 |
| - let mut best_token = None; |
80 |
| - let mut best_str = ~""; |
| 111 | + let mut best = None; |
81 | 112 | for rule in self.rules.iter() {
|
82 |
| - let m = rule.matcher.match_string(s.slice_from(pos)); |
| 113 | + let m = rule.matcher.find(s.slice_from(pos)); |
83 | 114 | match m {
|
84 |
| - Some(ref s) if s.len() > longest => { |
85 |
| - best_token = Some(rule.token); |
86 |
| - best_str = s.clone(); |
87 |
| - longest = s.len(); |
| 115 | + Some((begin, end)) if begin == 0 => { |
| 116 | + let s = s.slice(pos, pos + end); |
| 117 | + if s.len() > longest { |
| 118 | + best = Some((rule.token, s)); |
| 119 | + longest = s.len(); |
| 120 | + } |
88 | 121 | },
|
89 | 122 | _ => {},
|
90 | 123 | }
|
91 | 124 | }
|
92 | 125 | pos += longest;
|
93 |
| - match best_token.unwrap() { |
94 |
| - WhiteSpace => {}, |
95 |
| - x => result.push((x, best_str)) |
| 126 | + match best.unwrap() { |
| 127 | + (WS, _) => {}, |
| 128 | + (t, s) => result.push((t, s.to_owned())) |
96 | 129 | }
|
97 | 130 | }
|
98 | 131 |
|
99 |
| - result |
| 132 | + result.as_slice().to_owned() |
100 | 133 | }
|
101 | 134 | }
|
102 | 135 |
|
|
0 commit comments