aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/psi
diff options
context:
space:
mode:
author简律纯 <i@jyunko.cn>2023-09-27 17:31:16 +0800
committer简律纯 <i@jyunko.cn>2023-09-27 17:31:16 +0800
commitba4129933cdb6d91e695b2de900b8753652ec385 (patch)
treec520d508bf50cd22ea3123840f4aff77f148256b /src/psi
parent3ad303968524f6dc57b7d5900e33963c77342552 (diff)
downloadTRPGNivis-ba4129933cdb6d91e695b2de900b8753652ec385.tar.gz
TRPGNivis-ba4129933cdb6d91e695b2de900b8753652ec385.zip
feat(pyproject): 优化python包管理结构
Diffstat (limited to 'src/psi')
-rw-r--r--src/psi/__init__.py26
-rw-r--r--src/psi/exception.py0
-rw-r--r--src/psi/execution.py17
-rw-r--r--src/psi/interpreter.py26
-rw-r--r--src/psi/lexer.py153
-rw-r--r--src/psi/mathmatics.py0
-rw-r--r--src/psi/parsers.py65
-rw-r--r--src/psi/type.py0
8 files changed, 287 insertions, 0 deletions
diff --git a/src/psi/__init__.py b/src/psi/__init__.py
new file mode 100644
index 0000000..1c274e2
--- /dev/null
+++ b/src/psi/__init__.py
@@ -0,0 +1,26 @@
+"""Psi
+@TODO 词法分析器
+@BODY 似乎要写的还蛮多的,所以先写几个TODO List
+"""
+
+__all__ = ['psi']
+
+from psi.execution import Execution
+
+class Psi:
+ def __init__(self, input):
+ self.input = input
+ self.execution = Execution(input)
+ self.result = None
+
+ def execute(self):
+ self.result = self.execution.execute()
+ return self.result
+
+ def get_result(self):
+ return self.result
+
+ def set_input(self, input):
+ self.input = input
+ self.execution = Execution(input)
+ self.result = None
diff --git a/src/psi/exception.py b/src/psi/exception.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/psi/exception.py
diff --git a/src/psi/execution.py b/src/psi/execution.py
new file mode 100644
index 0000000..052f7ab
--- /dev/null
+++ b/src/psi/execution.py
@@ -0,0 +1,17 @@
+from psi.parsers import Parser
+from psi.interpreter import Interpreter
+
+__all__ = ['Execution']
+
+class Execution:
+ def __init__(self, input):
+ self.input = input
+
+ def execute(self):
+ parser = Parser(self.input)
+ ast = parser.parse()
+
+ interpreter = Interpreter(ast)
+ result = interpreter.interpret()
+
+ return result
diff --git a/src/psi/interpreter.py b/src/psi/interpreter.py
new file mode 100644
index 0000000..f98a777
--- /dev/null
+++ b/src/psi/interpreter.py
@@ -0,0 +1,26 @@
+from psi.lexer import Token
+
+
+__all__ = ['Interpreter']
+
+class Interpreter:
+ def __init__(self, ast):
+ self.ast = ast
+
+ def interpret(self):
+ return self.interpret_expr(self.ast)
+
+ def interpret_expr(self, node):
+ if isinstance(node, Token):
+ return node.value
+ elif isinstance(node, list):
+ for expr in node:
+ result = self.interpret_expr(expr)
+ if result is not None:
+ return result
+
+ def interpret_condition(self, node):
+ variable = self.interpret_expr(node[0])
+ value = self.interpret_expr(node[2])
+
+ return variable == value
diff --git a/src/psi/lexer.py b/src/psi/lexer.py
new file mode 100644
index 0000000..2fce0eb
--- /dev/null
+++ b/src/psi/lexer.py
@@ -0,0 +1,153 @@
+"""
+Token and Lexer Documentation
+=============================
+
+This module provides the `Token` and `Lexer` classes for tokenizing input strings.
+
+Token Class
+-----------
+
+The `Token` class represents a token with a type, value, and position in the input string. It is a subclass of the built-in `dict` class.
+
+Attributes:
+- `type` (str): The type of the token.
+- `value` (str or int): The value of the token.
+- `position` (int): The position of the token in the input string.
+
+Methods:
+- `__getattr__(self, name)`: Retrieves the value of an attribute by name. Raises an `AttributeError` if the attribute does not exist.
+
+Lexer Class
+-----------
+
+The `Lexer` class tokenizes an input string using a set of rules.
+
+Attributes:
+- `input` (str): The input string to tokenize.
+- `position` (int): The current position in the input string.
+- `tokens` (list): The list of tokens generated by the lexer.
+
+Methods:
+- `get_next_token(self)`: Retrieves the next token from the input string.
+- `__iter__(self)`: Returns an iterator over the tokens.
+- `__getitem__(self, index)`: Retrieves a token by index.
+- `__len__(self)`: Returns the number of tokens.
+
+Usage Example
+-------------
+
+```python
+lexer = Lexer('''
+@newMessage: {
+ ? message == 1: reply: hi
+ ! reply: no
+}
+''')
+
+token = lexer.get_next_token()
+while token['type'] != 'EOF':
+ print(f'Type: {token["type"]}, Value: {token["value"]}, Position: {token["position"]}')
+ token = lexer.get_next_token()
+
+print("\nAll tokens:")
+print([t['type'] for t in lexer])
+"""
+
+__all__ = ['Token', 'Lexer']
+
+class Token(dict):
+ def __init__(self, type, value, position):
+ super().__init__(type=type, value=value, position=position)
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(f"'Token' object has no attribute '{name}'")
+
+class Lexer:
+ def __init__(self, input):
+ self.input = input
+ self.position = 0
+ self.tokens = []
+
+ def get_next_token(self):
+ while self.position < len(self.input):
+ current_char = self.input[self.position]
+
+ if current_char.isspace():
+ self.position += 1
+ continue
+
+ if current_char == '#':
+ self.position += 1
+ while (self.position < len(self.input) and
+ self.input[self.position] != '\n'):
+ self.position += 1
+ continue
+
+ if current_char == '/' and self.position + 1 < len(self.input) and self.input[self.position + 1] == '*':
+ self.position += 2
+ while (self.position < len(self.input) - 1 and
+ (self.input[self.position] != '*' or self.input[self.position + 1] != '/')):
+ self.position += 1
+ if self.position < len(self.input) - 1:
+ self.position += 2
+ continue
+
+ if current_char.isalpha():
+ start_position = self.position
+ while (self.position < len(self.input) and
+ self.input[self.position].isalnum()):
+ self.position += 1
+ token = Token('IDENTIFIER', self.input[start_position:self.position], start_position)
+ self.tokens.append(token)
+ return token
+
+ if current_char.isdigit():
+ start_position = self.position
+ while (self.position < len(self.input) and
+ self.input[self.position].isdigit()):
+ self.position += 1
+ token = Token('INTEGER', int(self.input[start_position:self.position]), start_position)
+ self.tokens.append(token)
+ return token
+
+ if current_char in {'<', '>', '=', '!', '&', '|', '@'}:
+ if (self.position + 1 < len(self.input) and
+ self.input[self.position + 1] in {'=', '&', '|'}):
+ token = Token('OPERATOR', current_char + self.input[self.position + 1], self.position)
+ self.position += 2
+ else:
+ token = Token('OPERATOR', current_char, self.position)
+ self.position += 1
+ self.tokens.append(token)
+ return token
+
+ if current_char in {'{', '}', '(', ')', '[', ']', ';', ',', '.', ':'}:
+ token = Token('SEPARATOR', current_char, self.position)
+ self.position += 1
+ self.tokens.append(token)
+ return token
+
+ if current_char in {'?', '!', '|'}:
+ token = Token('CONTROL', current_char, self.position)
+ self.position += 1
+ self.tokens.append(token)
+ return token
+
+ self.position += 1
+ raise Exception(f'Unknown character: {current_char}')
+
+ token = Token('EOF', None, self.position)
+ self.tokens.append(token)
+ return token
+
+ def __iter__(self):
+ return iter(self.tokens)
+
+ def __getitem__(self, index):
+ return self.tokens[index]
+
+ def __len__(self):
+ return len(self.tokens) \ No newline at end of file
diff --git a/src/psi/mathmatics.py b/src/psi/mathmatics.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/psi/mathmatics.py
diff --git a/src/psi/parsers.py b/src/psi/parsers.py
new file mode 100644
index 0000000..db3e52c
--- /dev/null
+++ b/src/psi/parsers.py
@@ -0,0 +1,65 @@
+from psi.lexer import Lexer, Token
+
+
+__all__ = ['Parser']
+
+class Parser:
+ def __init__(self, input):
+ self.lexer = Lexer(input)
+ self.tokens = iter(self.lexer)
+ self.current_token = next(self.tokens)
+
+ def parse(self):
+ return self.parse_expr()
+
+ def parse_expr(self):
+ token = self.current_token
+ if token.value == '?':
+ self.eat('?')
+
+ condition = self.parse_condition()
+
+ self.eat(':')
+
+ if condition:
+ result = self.parse_reply()
+ else:
+ result = None
+
+ return result
+
+ def parse_condition(self):
+ variable = self.parse_variable()
+ self.eat('==')
+ value = self.parse_value()
+
+ return variable == value
+
+ def parse_variable(self):
+ token = self.current_token
+ self.eat('IDENTIFIER')
+ return token.value
+
+ def parse_value(self):
+ token = self.current_token
+ if token.type == 'INTEGER':
+ self.eat('INTEGER')
+ return token.value
+ else:
+ raise Exception(f'Invalid value: {token.value}')
+
+ def parse_reply(self):
+ self.eat('reply')
+ self.eat(':')
+
+ token = self.current_token
+ if token.type != 'SEPARATOR':
+ raise Exception(f'Invalid reply: {token.value}')
+
+ return token.value
+
+ def eat(self, expected_type):
+ if self.current_token.type == expected_type:
+ self.current_token = next(self.tokens)
+ else:
+ raise Exception(f'Unexpected token: {self.current_token.value}')
diff --git a/src/psi/type.py b/src/psi/type.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/psi/type.py