I have a lexer:
from sly import Lexer
class BasicLexer(Lexer):
tokens = {OBJECT, FUNCTON}
ignore = '.'
OBJECT = r'object\(\"(.*?)\"\)'
FUNCTION = r'function\(\"(.*?)\"\)'
def OBJECT(self, t):
match = re.search(r'object\("(.*?)"\)', t.value)
t.value = match.group(1)
return t
def FUNCTION(self, t):
match = re.search(r'function\("(.*?)"\)', t.value)
t.value = match.group(1)
return t
When I run it, it returns 2 tokens:
if __name__ == '__main__':
data = '''object("cars").function("work")'''
lexer = BasicLexer()
for tok in lexer.tokenize(data):
print('type=%r, value=%r' % (tok.type, tok.value))
type='OBJECT', value='cars'
type='FUNCTION', value='work'
Now, creating parser:
from sly import Parser
class BasicParser(Parser):
tokens = BasicLexer.tokens
def __init__(self):
self.env = { }
@_('')
def statement(self, p):
pass
@_('OBJECT')
def statement(self, p):
return ('object', p.OBJECT)
@_('FUNCTION')
def statement(self, p):
return ('function', p.FUNCTION)
if __name__ == '__main__':
lexer = BasicLexer()
parser = BasicParser()
text = '''object("cars").function("work")'''
result = parser.parse(lexer.tokenize(text))
print(result)
returns the following error:
sly: Syntax error at line 1, token=FUNCTION
None
For some reason, it can't parse when lexer.tokenize(text)
returns a generator
generating multiple tokens. Any idea why?