| # ----------------------------------------------------------------------------- |
| # lex_module_import.py |
| # |
| # A lexer defined in a module, but built in lex_module.py |
| # ----------------------------------------------------------------------------- |
| |
| tokens = ( |
| 'NAME','NUMBER', |
| 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', |
| 'LPAREN','RPAREN', |
| ) |
| |
| # Tokens |
| |
| t_PLUS = r'\+' |
| t_MINUS = r'-' |
| t_TIMES = r'\*' |
| t_DIVIDE = r'/' |
| t_EQUALS = r'=' |
| t_LPAREN = r'\(' |
| t_RPAREN = r'\)' |
| t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' |
| |
| def t_NUMBER(t): |
| r'\d+' |
| try: |
| t.value = int(t.value) |
| except ValueError: |
| print("Integer value too large %s" % t.value) |
| t.value = 0 |
| return t |
| |
| t_ignore = " \t" |
| |
| def t_newline(t): |
| r'\n+' |
| t.lineno += t.value.count("\n") |
| |
| def t_error(t): |
| print("Illegal character '%s'" % t.value[0]) |
| t.lexer.skip(1) |
| |