-
Notifications
You must be signed in to change notification settings - Fork 0
/
tokenizer.py
32 lines (28 loc) · 921 Bytes
/
tokenizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import re
class tokenizer(object):
def __init__(self, tokens, filename):
self.tokens = tokens
self.stream = 0
parts = []
for name, part in tokens:
parts.append("(?P<%s>%s)" % (name, part))
self.regexpString = "|".join(parts)
self.regexp = re.compile(self.regexpString, re.MULTILINE)
try:
self.stream = open(filename, "r")
except IOError, e:
print e
def cleanup(self):
self.stream.close()
def next(self):
# yield lexemes
for text in self.stream:
for match in self.regexp.finditer(text):
found = False
for name, rexp in self.tokens:
m = match.group(name)
if m is not None:
yield (name, m)
break
self.stream.close()
yield ('eof','eof')