-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlex.py
197 lines (155 loc) · 4.45 KB
/
lex.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
#HOSEIN MIRHOSEINI/ NIRVANA NIROOMAND
#importing ply
import sys
import ply.lex as lex
import re
class LEXER(object):
# funtion to write Tokens
def __init__(self):
self.lexer = lex.lex(module=self)
# List of token names. This is always required
tokens = [
'NUMBER',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'LPAREN',
'RPAREN',
'ID',
'ASSIGN',
'LBRACKET',
'RBRACKET',
'LT',
'GT',
'LE',
'GE',
'NE',
'EQUAL',
]
# Reserved word
reserved = {
'if': 'IF',
'elif': 'ELIF',
'else': 'ELSE',
'while': 'WHILE',
'for': 'FOR',
'in': 'IN',
'range': 'RANGE',
':': 'TWOP'
}
# adding tokens together
tokens = tokens + list(reserved.values())
# IDs
def t_ID(self,t):
r':|[a-zA-Z_][a-zA-Z_0-9]*'
t.type = self.reserved.get(t.value, 'ID') # Check for reserved words
return t
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
def printList(self):
print(self.reserved)
def t_NOT(self,t):
r'not'
t.type = self.reserved.get(t.value, 'NOT')
return t
def t_EQUAL(self,t):
r'=='
t.type = self.reserved.get(t.value, 'EQUAL')
return t
def t_GE(self,t):
r'>='
t.type = self.reserved.get(t.value, 'GE')
return t
def t_LE(self,t):
r'<='
t.type = self.reserved.get(t.value, 'LE')
return t
def t_NE(self,t):
r'!='
t.type = self.reserved.get(t.value, 'NE')
return t
def t_LT(self,t):
r'<'
t.type = self.reserved.get(t.value, 'LT')
return t
def t_GT(self,t):
r'>'
t.type = self.reserved.get(t.value, 'GT')
return t
def t_ASSIGN(self,t):
r'='
t.type = self.reserved.get(t.value, 'ASSIGN')
return t
def t_LRACKET(self,t):
r'{'
t.type = LEXER.reserved.get(t.value, 'LBRACKET')
return t
def t_RBRACKET(self,t):
r'}'
t.type = LEXER.reserved.get(t.value, 'RBRACKET')
return t
# A regular expression rule with some action code
def t_NUMBER(self,t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
@staticmethod
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
@staticmethod
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def append_multiple_lines(self,file_name, lines_to_append):
# Open the file in append & read mode ('a+')
with open(file_name, "a+") as file_object:
appendEOL = False
# Move read cursor to the start of file.
file_object.seek(0)
# Check if file is not empty
data = file_object.read(100)
if len(data) > 0:
appendEOL = True
# Iterate over each string in the list
for line in lines_to_append:
# If file is not empty then append '\n' before first line for
# other lines always append '\n' before appending line
if appendEOL == True:
file_object.write("\n")
else:
appendEOL = True
# Append element at the end of file
file_object.write(line)
def test(self):
# Clear file at first
with open("TokList.txt", 'r+') as f:
f.truncate(0)
with open('p.txt', 'r') as file:
data = file.read().rstrip()
# Give the lexer some input
self.lexer.input(data)
# Tokenize
TOKENS = []
while True:
addedData = ""
tok = self.lexer.token()
if not tok:
break # No more input
addedData = tok.type + " " + str(tok.value)
TOKENS.append(addedData)
self.append_multiple_lines("TokList.txt", TOKENS)
lexer = LEXER()
lexer.test()
print("Check out the TokList.txt")
#######################################################################################################