From 753f1f72fb972d254485344e96b44e09a00bf6f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Fern=C3=A1ndez=20Serrata?= <76864299+Rudxain@users.noreply.github.com> Date: Wed, 23 Nov 2022 20:08:28 -0400 Subject: [PATCH] final commit? I guess --- src-py/Lexer.py | 14 ++++++++++---- src-py/RickRoll.py | 3 ++- src-py/helpers.py | 7 +++---- src-py/interpreter.py | 20 +++++++++++--------- src-py/pyrickroll.py | 2 +- src-py/rickvm.py | 4 ++-- 6 files changed, 29 insertions(+), 21 deletions(-) diff --git a/src-py/Lexer.py b/src-py/Lexer.py index 6cf1525..c75a5da 100644 --- a/src-py/Lexer.py +++ b/src-py/Lexer.py @@ -1,9 +1,15 @@ +# the type-checker complains about the re-export, for some reason, +# so we must explicitly import it here. +from typing import Final + from Keywords import * from helpers import remove_all -ALL_KW_STR = ','.join(KEYWORDS) +ALL_KW_STR: Final = ','.join(KEYWORDS) def lexicalize(stmt: str): + SP_LN: Final = {' ', '\n'} + current_token = '' quote_count = 0 tokens: list[str] = [] @@ -14,9 +20,9 @@ def lexicalize(stmt: str): continue if char in SEPARATORS and quote_count % 2 == 0: - if current_token not in {' ', '\n'}: + if current_token not in SP_LN: tokens.append(current_token) - if char not in {' ', '\n'}: + if char not in SP_LN: tokens.append(char) current_token = '' @@ -30,7 +36,7 @@ def order_words(tokens: list[str]): if current `token+kw_in_statement` not in all keyword string, add `kw_in_statement` to `final_token` if statement is ended, add `kw_in_statement` to `final_token` """ - final_token: list[str] = [] + final_token: Final[list[str]] = [] kw_in_statement = '' temp = False for tok in tokens: diff --git a/src-py/RickRoll.py b/src-py/RickRoll.py index e707a32..a64238f 100755 --- a/src-py/RickRoll.py +++ b/src-py/RickRoll.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from typing import Final from traceback import format_exc @@ -33,7 +34,7 @@ def main(): args = arg_parser.parse_args() # excludes `def`s, `import`s and `argparse` times - start = time() + start: Final = time() # Run the RickRoll program if args.file: # Convert .rickroll to C++ diff --git a/src-py/helpers.py b/src-py/helpers.py index f196b54..75862a5 100644 --- a/src-py/helpers.py +++ b/src-py/helpers.py @@ -5,10 +5,9 @@ from typing import Callable, Final - -def starts_ends(container: str | list, x): +def starts_ends(s: str, char: str): """ - Check if it starts and ends with the same value. + Check if it starts and ends with the same char. Examples: ``` @@ -19,7 +18,7 @@ def starts_ends(container: str | list, x): """ # for some reason, type-inference only works if it explicitly returns a bool # WTF? - return True if container[0] == x and container[-1] == x else False + return True if s[0] == char and s[-1] == char else False join_list: Final[Callable[[list], str]] = lambda l: ''.join(map(str, l)) diff --git a/src-py/interpreter.py b/src-py/interpreter.py index cfa2b15..a863218 100644 --- a/src-py/interpreter.py +++ b/src-py/interpreter.py @@ -1,3 +1,5 @@ +from typing import Final # explanation at Lexer.py + from sys import stdout from time import time @@ -5,7 +7,7 @@ from Lexer import lexicalize from helpers import filter_str, precedence, starts_ends -start = time() +start: Final = time() class AST: def print_node(Node: list, args): @@ -47,7 +49,7 @@ def __init__(self, tokens: list[list[str]], Node: list): self.tokens = tokens self.pos = 0 - self.stmt = [] + self.stmt: Final = [] while self.pos < len(self.tokens): self.parse() @@ -98,7 +100,7 @@ def parse(self): Parser(tokens=child_stmts, Node=while_nodes) AST.while_node(self.Node, cond, while_nodes) -def applyOp(a, b, op: str): +def applyOp(a: int | str, b: int | str, op: str) -> int | str: if op == '+': return a + b if op == '-': return a - b if op == '*': return a * b @@ -109,12 +111,12 @@ def applyOp(a, b, op: str): or op==KW.GOE_OP.value and a>=b or op==KW.LOE_OP.value and a<=b \ else 'False' -def evaluate(tokens: list[str]): +def evaluate(tokens: str): if len(tokens) == 1 and starts_ends(tokens[0], '"'): return filter_str(tokens[0]) - values = [] - ops: list[str] = [] + values: Final[list[int | str]] = [] + ops: Final[list[str]] = [] for i in range(len(tokens)): if not tokens[i]: return @@ -153,13 +155,13 @@ def evaluate(tokens: list[str]): values.append(applyOp(val1, val2, op)) return values[-1] -variables = {} +variables: Final[dict[str, int | str | None]] = {} class Interpreter: def __init__(self): self.idx = 0 - def interpret(self, nodes: list[list]): + def interpret(self, nodes: list | str): for node in nodes: self.idx += 1 if node[0] == "print_node": @@ -179,7 +181,7 @@ def interpret(self, nodes: list[list]): def run_in_interpreter(src_file_name: str): intpr = Interpreter() - Node: list[list] = [] + Node = [] with open(src_file_name, mode='r', encoding='utf-8') as src: content = src.readlines() diff --git a/src-py/pyrickroll.py b/src-py/pyrickroll.py index 5d9730f..d841499 100644 --- a/src-py/pyrickroll.py +++ b/src-py/pyrickroll.py @@ -63,7 +63,7 @@ def __make_token(self, tok: str): class TranslateToPython: def __init__(self): # tokens - self.values = [] + self.values: list[str] = [] self.is_main = False self.is_function = False self.indent_count = 0 diff --git a/src-py/rickvm.py b/src-py/rickvm.py index 35e2b65..502a19d 100644 --- a/src-py/rickvm.py +++ b/src-py/rickvm.py @@ -10,7 +10,7 @@ from helpers import filter_str, precedence, starts_ends -def applyOp(a: float | str, b: float | str, op: str): +def applyOp(a: int | str, b: int | str, op: str) -> int | str: if op == '+': return a + b if op == '-': return a - b if op == '*': return a * b @@ -27,7 +27,7 @@ def evaluate(tokens: list[str]): return filter_str(tokens[0]) return tokens[0] - values: Final[list[float | str]] = [] + values: Final[list[int | str]] = [] ops: Final[list[str]] = [] for i in range(len(tokens)):