-
Notifications
You must be signed in to change notification settings - Fork 0
/
lexer.py
135 lines (114 loc) · 3.47 KB
/
lexer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import ply.lex as lex
class MyLexer:
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_SEMI = r';'
t_COMMA = r','
t_LESSTHAN = r'\<'
t_GREATHAN = r'\>'
t_GREATEQUAL= r'\>='
t_LESSEQUAL = r'\<='
t_DIFFERENT = r'\<\>'
t_EQUAL = r'='
t_TWOEQUAL = r'=='
t_AND = r'&&'
t_OR = r'\|\|'
t_STRING = r'".*"'
#all the reserved words
reserved = {
'if' : 'IF',
'else' : 'ELSE',
'loop' : 'LOOP',
'int' : 'INT',
'float' : 'FLOAT',
'bool' : 'BOOL',
'char' : 'CHAR',
'loop' : 'LOOP',
'main' : 'MAIN',
'print' : 'PRINT',
'brush' : 'BRUSH',
'pd' : 'PD',
'pu' : 'PU',
'fd' : 'FD',
'rt' : 'RT',
'read' : 'READ',
'circle' : 'CIRCLE',
'square' : 'SQUARE',
'arc' : 'ARC',
'void' : 'VOID',
'color' : 'COLOR',
'true' : 'TRUE',
'false' : 'FALSE',
'home' : 'HOME',
'return' : 'RETURN'
}
# List of token names. This is always required
tokens = ['GREATEQUAL','LESSEQUAL','INTEGER','PLUS','MINUS','TIMES','DIVIDE','LPAREN','RPAREN','LBRACE','RBRACE','LBRACKET','RBRACKET','SEMI','ID','COMMA','GREATHAN','LESSTHAN','DIFFERENT','EQUAL','TWOEQUAL','AND','OR','STRING'] + list(reserved.values())
listOfTokens = []
#s reqgular exprsion that takes the fisrts leter then another letter or a number
def t_ID(self,t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = self.reserved.get(t.value,'ID') # Check for reserved words
self.listOfTokens.append(t)
return t
#get al the comments that are with #
def t_COMMENT(self,t):
r'\#.*'
pass
# No return value. Token discarded
#get all the floats
def t_FLOAT(self,t):
r'\d+\.\d+'
t.value=float(t.value)
self.listOfTokens.append(t)
return t
# A regular expression rule with some action code
def t_INTEGER(self,t):
r'\d+'
t.value = int(t.value)
self.listOfTokens.append(t)
return t
# Define a rule so we can track line numbers
def t_newline(self,t):
r'\n+'
t.lexer.lineno += len(t.value)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(self,input,token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def getAllTokens(self):
return self.listOfTokens
# Error handling rule
def t_error(self,t):
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
# Build the lexer
def build(self,**kwargs):
self.lexer = lex.lex(module=self, **kwargs)
# Test it output
def test(self,data):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok: break
print tok
# Build the lexer and try it out
m = MyLexer()
#m.test('''+ 234 oo 22.22 {} [] main == <> < > >= <= && || if " yolo" ''') # Test it
#print m.getAllTokens()