diff options
| author | 2023-09-27 17:31:16 +0800 | |
|---|---|---|
| committer | 2023-09-27 17:31:16 +0800 | |
| commit | ba4129933cdb6d91e695b2de900b8753652ec385 (patch) | |
| tree | c520d508bf50cd22ea3123840f4aff77f148256b /psi | |
| parent | 3ad303968524f6dc57b7d5900e33963c77342552 (diff) | |
| download | TRPGNivis-ba4129933cdb6d91e695b2de900b8753652ec385.tar.gz TRPGNivis-ba4129933cdb6d91e695b2de900b8753652ec385.zip | |
feat(pyproject): 优化python包管理结构
Diffstat (limited to 'psi')
| -rw-r--r-- | psi/__init__.py | 26 | ||||
| -rw-r--r-- | psi/exception.py | 0 | ||||
| -rw-r--r-- | psi/execution.py | 17 | ||||
| -rw-r--r-- | psi/interpreter.py | 26 | ||||
| -rw-r--r-- | psi/lexer.py | 153 | ||||
| -rw-r--r-- | psi/mathmatics.py | 0 | ||||
| -rw-r--r-- | psi/parsers.py | 65 | ||||
| -rw-r--r-- | psi/type.py | 0 |
8 files changed, 0 insertions, 287 deletions
diff --git a/psi/__init__.py b/psi/__init__.py deleted file mode 100644 index 0c9f577..0000000 --- a/psi/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Psi
-@TODO 词法分析器
-@BODY 似乎要写的还蛮多的,所以先写几个TODO List
-"""
-
-__all__ = ['Psi']
-
-from psi.execution import Execution
-
-class Psi:
- def __init__(self, input):
- self.input = input
- self.execution = Execution(input)
- self.result = None
-
- def execute(self):
- self.result = self.execution.execute()
- return self.result
-
- def get_result(self):
- return self.result
-
- def set_input(self, input):
- self.input = input
- self.execution = Execution(input)
- self.result = None
diff --git a/psi/exception.py b/psi/exception.py deleted file mode 100644 index e69de29..0000000 --- a/psi/exception.py +++ /dev/null diff --git a/psi/execution.py b/psi/execution.py deleted file mode 100644 index 052f7ab..0000000 --- a/psi/execution.py +++ /dev/null @@ -1,17 +0,0 @@ -from psi.parsers import Parser -from psi.interpreter import Interpreter - -__all__ = ['Execution'] - -class Execution: - def __init__(self, input): - self.input = input - - def execute(self): - parser = Parser(self.input) - ast = parser.parse() - - interpreter = Interpreter(ast) - result = interpreter.interpret() - - return result diff --git a/psi/interpreter.py b/psi/interpreter.py deleted file mode 100644 index f98a777..0000000 --- a/psi/interpreter.py +++ /dev/null @@ -1,26 +0,0 @@ -from psi.lexer import Token - - -__all__ = ['Interpreter'] - -class Interpreter: - def __init__(self, ast): - self.ast = ast - - def interpret(self): - return self.interpret_expr(self.ast) - - def interpret_expr(self, node): - if isinstance(node, Token): - return node.value - elif isinstance(node, list): - for expr in node: - result = self.interpret_expr(expr) - if result is not None: - return result - - def interpret_condition(self, node): - variable = self.interpret_expr(node[0]) - value = self.interpret_expr(node[2]) - - return variable == value diff --git a/psi/lexer.py b/psi/lexer.py deleted file mode 100644 index 2fce0eb..0000000 --- a/psi/lexer.py +++ /dev/null @@ -1,153 +0,0 @@ -""" -Token and Lexer Documentation -============================= - -This module provides the `Token` and `Lexer` classes for tokenizing input strings. - -Token Class ------------ - -The `Token` class represents a token with a type, value, and position in the input string. It is a subclass of the built-in `dict` class. - -Attributes: -- `type` (str): The type of the token. -- `value` (str or int): The value of the token. -- `position` (int): The position of the token in the input string. - -Methods: -- `__getattr__(self, name)`: Retrieves the value of an attribute by name. Raises an `AttributeError` if the attribute does not exist. - -Lexer Class ------------ - -The `Lexer` class tokenizes an input string using a set of rules. - -Attributes: -- `input` (str): The input string to tokenize. -- `position` (int): The current position in the input string. -- `tokens` (list): The list of tokens generated by the lexer. - -Methods: -- `get_next_token(self)`: Retrieves the next token from the input string. -- `__iter__(self)`: Returns an iterator over the tokens. -- `__getitem__(self, index)`: Retrieves a token by index. -- `__len__(self)`: Returns the number of tokens. - -Usage Example -------------- - -```python -lexer = Lexer(''' -@newMessage: { - ? message == 1: reply: hi - ! reply: no -} -''') - -token = lexer.get_next_token() -while token['type'] != 'EOF': - print(f'Type: {token["type"]}, Value: {token["value"]}, Position: {token["position"]}') - token = lexer.get_next_token() - -print("\nAll tokens:") -print([t['type'] for t in lexer]) -""" - -__all__ = ['Token', 'Lexer'] - -class Token(dict): - def __init__(self, type, value, position): - super().__init__(type=type, value=value, position=position) - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(f"'Token' object has no attribute '{name}'") - -class Lexer: - def __init__(self, input): - self.input = input - self.position = 0 - self.tokens = [] - - def get_next_token(self): - while self.position < len(self.input): - current_char = self.input[self.position] - - if current_char.isspace(): - self.position += 1 - continue - - if current_char == '#': - self.position += 1 - while (self.position < len(self.input) and - self.input[self.position] != '\n'): - self.position += 1 - continue - - if current_char == '/' and self.position + 1 < len(self.input) and self.input[self.position + 1] == '*': - self.position += 2 - while (self.position < len(self.input) - 1 and - (self.input[self.position] != '*' or self.input[self.position + 1] != '/')): - self.position += 1 - if self.position < len(self.input) - 1: - self.position += 2 - continue - - if current_char.isalpha(): - start_position = self.position - while (self.position < len(self.input) and - self.input[self.position].isalnum()): - self.position += 1 - token = Token('IDENTIFIER', self.input[start_position:self.position], start_position) - self.tokens.append(token) - return token - - if current_char.isdigit(): - start_position = self.position - while (self.position < len(self.input) and - self.input[self.position].isdigit()): - self.position += 1 - token = Token('INTEGER', int(self.input[start_position:self.position]), start_position) - self.tokens.append(token) - return token - - if current_char in {'<', '>', '=', '!', '&', '|', '@'}: - if (self.position + 1 < len(self.input) and - self.input[self.position + 1] in {'=', '&', '|'}): - token = Token('OPERATOR', current_char + self.input[self.position + 1], self.position) - self.position += 2 - else: - token = Token('OPERATOR', current_char, self.position) - self.position += 1 - self.tokens.append(token) - return token - - if current_char in {'{', '}', '(', ')', '[', ']', ';', ',', '.', ':'}: - token = Token('SEPARATOR', current_char, self.position) - self.position += 1 - self.tokens.append(token) - return token - - if current_char in {'?', '!', '|'}: - token = Token('CONTROL', current_char, self.position) - self.position += 1 - self.tokens.append(token) - return token - - self.position += 1 - raise Exception(f'Unknown character: {current_char}') - - token = Token('EOF', None, self.position) - self.tokens.append(token) - return token - - def __iter__(self): - return iter(self.tokens) - - def __getitem__(self, index): - return self.tokens[index] - - def __len__(self): - return len(self.tokens)
\ No newline at end of file diff --git a/psi/mathmatics.py b/psi/mathmatics.py deleted file mode 100644 index e69de29..0000000 --- a/psi/mathmatics.py +++ /dev/null diff --git a/psi/parsers.py b/psi/parsers.py deleted file mode 100644 index db3e52c..0000000 --- a/psi/parsers.py +++ /dev/null @@ -1,65 +0,0 @@ -from psi.lexer import Lexer, Token - - -__all__ = ['Parser'] - -class Parser: - def __init__(self, input): - self.lexer = Lexer(input) - self.tokens = iter(self.lexer) - self.current_token = next(self.tokens) - - def parse(self): - return self.parse_expr() - - def parse_expr(self): - token = self.current_token - if token.value == '?': - self.eat('?') - - condition = self.parse_condition() - - self.eat(':') - - if condition: - result = self.parse_reply() - else: - result = None - - return result - - def parse_condition(self): - variable = self.parse_variable() - self.eat('==') - value = self.parse_value() - - return variable == value - - def parse_variable(self): - token = self.current_token - self.eat('IDENTIFIER') - return token.value - - def parse_value(self): - token = self.current_token - if token.type == 'INTEGER': - self.eat('INTEGER') - return token.value - else: - raise Exception(f'Invalid value: {token.value}') - - def parse_reply(self): - self.eat('reply') - self.eat(':') - - token = self.current_token - if token.type != 'SEPARATOR': - raise Exception(f'Invalid reply: {token.value}') - - return token.value - - def eat(self, expected_type): - if self.current_token.type == expected_type: - self.current_token = next(self.tokens) - else: - raise Exception(f'Unexpected token: {self.current_token.value}') diff --git a/psi/type.py b/psi/type.py deleted file mode 100644 index e69de29..0000000 --- a/psi/type.py +++ /dev/null |
