diff options
| author | 2024-02-24 13:04:40 +0800 | |
|---|---|---|
| committer | 2024-02-24 13:04:40 +0800 | |
| commit | 57b2835ecc6c9b30920e929985b9d7cafcb7c457 (patch) | |
| tree | df0c1221d4766365c231e8bd02e6d8e7bdb63420 | |
| parent | 45e2f3631bc8d13dacba57e705c7591e7e707b2a (diff) | |
| download | TRPGNivis-57b2835ecc6c9b30920e929985b9d7cafcb7c457.tar.gz TRPGNivis-57b2835ecc6c9b30920e929985b9d7cafcb7c457.zip | |
chore(project): add ruff deps
chore(lint): format code with ruff
chore(project): add tool.ruff format section
| -rw-r--r-- | docs/source/conf.py | 4 | ||||
| -rw-r--r-- | nivis-python/__init__.py | 4 | ||||
| -rw-r--r-- | nivis-python/exception.py | 3 | ||||
| -rw-r--r-- | nivis-python/execution.py | 3 | ||||
| -rw-r--r-- | nivis-python/interpreter.py | 3 | ||||
| -rw-r--r-- | nivis-python/lexer.py | 77 | ||||
| -rw-r--r-- | nivis-python/parsers.py | 29 | ||||
| -rw-r--r-- | pdm.lock | 29 | ||||
| -rw-r--r-- | pyproject.toml | 84 | ||||
| -rw-r--r-- | tests/__init__.py | 8 | ||||
| -rw-r--r-- | tests/get_next_token.py | 2 |
11 files changed, 182 insertions, 64 deletions
diff --git a/docs/source/conf.py b/docs/source/conf.py index 9890225..fab6f31 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -26,9 +26,7 @@ AUTHORS = ",".join([f"{aut['name']}" for aut in AUTHOR_TABLE]) project = PROJECT_NAME # "Infini" release = PROJECT_VERSION # "latest" -copyright = ( - "2023-PRESENT, HydroRoll-Team & 浊莲." -) +copyright = "2023-PRESENT, HydroRoll-Team & 浊莲." author = AUTHORS # "Hsiang Nianian" html_title = "INFINI GENERATOR II" diff --git a/nivis-python/__init__.py b/nivis-python/__init__.py index b3e08df..f19e06b 100644 --- a/nivis-python/__init__.py +++ b/nivis-python/__init__.py @@ -3,10 +3,10 @@ @BODY 似乎要写的还蛮多的,所以先写几个TODO List """ -__all__ = ['psi', 'Exception', 'interpreter', 'lexer', 'Parser'] +__all__ = ["psi", "Exception", "interpreter", "lexer", "Parser"] from .psi import psi from .execution import Execution from .interpreter import Interpreter from .lexer import Lexer -from .parsers import Parser
\ No newline at end of file +from .parsers import Parser diff --git a/nivis-python/exception.py b/nivis-python/exception.py index a8b7201..0ec22e5 100644 --- a/nivis-python/exception.py +++ b/nivis-python/exception.py @@ -10,7 +10,7 @@ class PsiException(Exception): ``` """ - + class ValueError(PsiException): """ An exception class for value-related errors in Psi code. @@ -23,6 +23,7 @@ class ValueError(PsiException): ``` """ + class GrammarError(PsiException): """ An exception class for grammar-related errors in Psi code. diff --git a/nivis-python/execution.py b/nivis-python/execution.py index 0abdf2c..a11d55b 100644 --- a/nivis-python/execution.py +++ b/nivis-python/execution.py @@ -1,7 +1,8 @@ from psi.parsers import Parser from psi.interpreter import Interpreter -__all__ = ['Execution'] +__all__ = ["Execution"] + class Execution: """ diff --git a/nivis-python/interpreter.py b/nivis-python/interpreter.py index 8aa8fad..0817274 100644 --- a/nivis-python/interpreter.py +++ b/nivis-python/interpreter.py @@ -1,7 +1,8 @@ from psi.lexer import Token -__all__ = ['Interpreter'] +__all__ = ["Interpreter"] + class Interpreter: """ diff --git a/nivis-python/lexer.py b/nivis-python/lexer.py index 474891b..f4dafc4 100644 --- a/nivis-python/lexer.py +++ b/nivis-python/lexer.py @@ -54,7 +54,8 @@ print([t['type'] for t in lexer]) """ from psi.exception import ValueError -__all__ = ['Token', 'Lexer'] +__all__ = ["Token", "Lexer"] + class Token(dict): """ @@ -124,6 +125,7 @@ class Lexer: print(token) ``` """ + def __init__(self, input): """ Initializes a Lexer object. @@ -155,17 +157,25 @@ class Lexer: self.position += 1 continue - if current_char == '#': + if current_char == "#": self.position += 1 - while (self.position < len(self.input) and - self.input[self.position] != '\n'): + while ( + self.position < len(self.input) + and self.input[self.position] != "\n" + ): self.position += 1 continue - if current_char == '/' and self.position + 1 < len(self.input) and self.input[self.position + 1] == '*': + if ( + current_char == "/" + and self.position + 1 < len(self.input) + and self.input[self.position + 1] == "*" + ): self.position += 2 - while (self.position < len(self.input) - 1 and - (self.input[self.position] != '*' or self.input[self.position + 1] != '/')): + while self.position < len(self.input) - 1 and ( + self.input[self.position] != "*" + or self.input[self.position + 1] != "/" + ): self.position += 1 if self.position < len(self.input) - 1: self.position += 2 @@ -173,41 +183,58 @@ class Lexer: if current_char.isalpha(): start_position = self.position - while (self.position < len(self.input) and - self.input[self.position].isalnum()): + while ( + self.position < len(self.input) + and self.input[self.position].isalnum() + ): self.position += 1 - token = Token('IDENTIFIER', self.input[start_position:self.position], start_position) + token = Token( + "IDENTIFIER", + self.input[start_position : self.position], + start_position, + ) self.tokens.append(token) return token if current_char.isdigit(): start_position = self.position - while (self.position < len(self.input) and - self.input[self.position].isdigit()): + while ( + self.position < len(self.input) + and self.input[self.position].isdigit() + ): self.position += 1 - token = Token('INTEGER', int(self.input[start_position:self.position]), start_position) + token = Token( + "INTEGER", + int(self.input[start_position : self.position]), + start_position, + ) self.tokens.append(token) return token - if current_char in {'<', '>', '=', '!', '&', '|', '@'}: - if (self.position + 1 < len(self.input) and - self.input[self.position + 1] in {'=', '&', '|'}): - token = Token('OPERATOR', current_char + self.input[self.position + 1], self.position) + if current_char in {"<", ">", "=", "!", "&", "|", "@"}: + if self.position + 1 < len(self.input) and self.input[ + self.position + 1 + ] in {"=", "&", "|"}: + token = Token( + "OPERATOR", + current_char + self.input[self.position + 1], + self.position, + ) self.position += 2 else: - token = Token('OPERATOR', current_char, self.position) + token = Token("OPERATOR", current_char, self.position) self.position += 1 self.tokens.append(token) return token - if current_char in {'{', '}', '(', ')', '[', ']', ';', ',', '.', ':'}: - return self._extracted_from_get_next_token_64('SEPARATOR', current_char) - if current_char in {'?', '!', '|'}: - return self._extracted_from_get_next_token_64('CONTROL', current_char) + if current_char in {"{", "}", "(", ")", "[", "]", ";", ",", ".", ":"}: + return self._extracted_from_get_next_token_64("SEPARATOR", current_char) + if current_char in {"?", "!", "|"}: + return self._extracted_from_get_next_token_64("CONTROL", current_char) self.position += 1 - raise ValueError(f'Unknown character: {current_char}') + raise ValueError(f"Unknown character: {current_char}") - token = Token('EOF', None, self.position) + token = Token("EOF", None, self.position) self.tokens.append(token) return token @@ -246,4 +273,4 @@ class Lexer: Returns: The number of tokens. """ - return len(self.tokens)
\ No newline at end of file + return len(self.tokens) diff --git a/nivis-python/parsers.py b/nivis-python/parsers.py index f68f95f..08c3a2c 100644 --- a/nivis-python/parsers.py +++ b/nivis-python/parsers.py @@ -1,7 +1,8 @@ from psi.lexer import Lexer, Token -__all__ = ['Parser'] +__all__ = ["Parser"] + class Parser: """ @@ -51,12 +52,12 @@ class Parser: The result of the parsing. """ token = self.current_token - if token.value == '?': - self.eat('?') + if token.value == "?": + self.eat("?") condition = self.parse_condition() - self.eat(':') + self.eat(":") if condition: result = self.parse_reply() @@ -73,7 +74,7 @@ class Parser: The result of the parsing. """ variable = self.parse_variable() - self.eat('==') + self.eat("==") value = self.parse_value() return variable == value @@ -86,7 +87,7 @@ class Parser: The result of the parsing. """ token = self.current_token - self.eat('IDENTIFIER') + self.eat("IDENTIFIER") return token.value def parse_value(self): @@ -100,11 +101,11 @@ class Parser: Exception: Raised when an invalid value is encountered. """ token = self.current_token - if token.type == 'INTEGER': - self.eat('INTEGER') + if token.type == "INTEGER": + self.eat("INTEGER") return token.value else: - raise Exception(f'Invalid value: {token.value}') + raise Exception(f"Invalid value: {token.value}") def parse_reply(self): """ @@ -116,12 +117,12 @@ class Parser: Raises: Exception: Raised when an invalid reply is encountered. """ - self.eat('reply') - self.eat(':') + self.eat("reply") + self.eat(":") token = self.current_token - if token.type != 'SEPARATOR': - raise Exception(f'Invalid reply: {token.value}') + if token.type != "SEPARATOR": + raise Exception(f"Invalid reply: {token.value}") return token.value @@ -141,4 +142,4 @@ class Parser: if self.current_token.type == expected_type: self.current_token = next(self.tokens) else: - raise Exception(f'Unexpected token: {self.current_token.value}') + raise Exception(f"Unexpected token: {self.current_token.value}") @@ -2,11 +2,11 @@ # It is not intended for manual editing. [metadata] -groups = ["default", "docs"] +groups = ["default", "docs", "lint"] cross_platform = true static_urls = false lock_version = "4.3" -content_hash = "sha256:a3b2ce1d5bb6047c665fe5b29b48981225c6d7af9751e16ded436071d9eea8fc" +content_hash = "sha256:29b63ff483b818541b61b1dabd6f9220b0a920a8e6186396fe6b9d98d9113716" [[package]] name = "alabaster" @@ -339,6 +339,31 @@ files = [ ] [[package]] +name = "ruff" +version = "0.2.2" +requires_python = ">=3.7" +summary = "An extremely fast Python linter and code formatter, written in Rust." +files = [ + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0a9efb032855ffb3c21f6405751d5e147b0c6b631e3ca3f6b20f917572b97eb6"}, + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d450b7fbff85913f866a5384d8912710936e2b96da74541c82c1b458472ddb39"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecd46e3106850a5c26aee114e562c329f9a1fbe9e4821b008c4404f64ff9ce73"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e22676a5b875bd72acd3d11d5fa9075d3a5f53b877fe7b4793e4673499318ba"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1695700d1e25a99d28f7a1636d85bafcc5030bba9d0578c0781ba1790dbcf51c"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b0c232af3d0bd8f521806223723456ffebf8e323bd1e4e82b0befb20ba18388e"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f63d96494eeec2fc70d909393bcd76c69f35334cdbd9e20d089fb3f0640216ca"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a61ea0ff048e06de273b2e45bd72629f470f5da8f71daf09fe481278b175001"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1439c8f407e4f356470e54cdecdca1bd5439a0673792dbe34a2b0a551a2fe3"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:940de32dc8853eba0f67f7198b3e79bc6ba95c2edbfdfac2144c8235114d6726"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c126da55c38dd917621552ab430213bdb3273bb10ddb67bc4b761989210eb6e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3b65494f7e4bed2e74110dac1f0d17dc8e1f42faaa784e7c58a98e335ec83d7e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1ec49be4fe6ddac0503833f3ed8930528e26d1e60ad35c2446da372d16651ce9"}, + {file = "ruff-0.2.2-py3-none-win32.whl", hash = "sha256:d920499b576f6c68295bc04e7b17b6544d9d05f196bb3aac4358792ef6f34325"}, + {file = "ruff-0.2.2-py3-none-win_amd64.whl", hash = "sha256:cc9a91ae137d687f43a44c900e5d95e9617cb37d4c989e462980ba27039d239d"}, + {file = "ruff-0.2.2-py3-none-win_arm64.whl", hash = "sha256:c9d15fc41e6054bfc7200478720570078f0b41c9ae4f010bcc16bd6f4d1aacdd"}, + {file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d"}, +] + +[[package]] name = "setuptools" version = "69.1.1" requires_python = ">=3.8" diff --git a/pyproject.toml b/pyproject.toml index 16df8fe..79925d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,19 +2,11 @@ name = "nivis-python" version = "0.1.0" description = "Python implementation for Nivis." -authors = [ - {name = "简律纯", email = "i@jyunko.cn"}, -] -dependencies = [ - "ply>=3.11", -] +authors = [{ name = "简律纯", email = "i@jyunko.cn" }] +dependencies = ["ply>=3.11"] requires-python = ">=3.11" readme = "README.rst" -license = {text = "MIT"} - -[build-system] -requires = ["pdm-backend"] -build-backend = "pdm.backend" +license = { text = "MIT" } [tool.pdm.dev-dependencies] docs = [ @@ -26,3 +18,73 @@ docs = [ "sphinx-design>=0.5.0", "sphinx-intl>=2.1.0", ] +lint = [ + "ruff>=0.2.2", +] + +[tool.ruff] +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Same as Black. +line-length = 88 +indent-width = 4 + +# Assume Python 3.8 +target-version = "py38" + +[tool.ruff.lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +select = ["E4", "E7", "E9", "F"] +ignore = [] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +[build-system] +requires = ["pdm-backend"] +build-backend = "pdm.backend" diff --git a/tests/__init__.py b/tests/__init__.py index c9a0e8f..97989dc 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -4,12 +4,14 @@ DIR = dirname(abspath(__file__)) token_dict = {} # 创建一个空字典 -with open(join(DIR, '..', 'psi', 'Grammar', 'Token'), 'r') as file: +with open(join(DIR, "..", "psi", "Grammar", "Token"), "r") as file: for line in file: if line := line.strip(): values = line.split() # 使用空格分割行,得到值列表 code = values[0] # 第一个值为代码 - symbol = values[1] if len(values) > 1 else None # 第二个值为符号,如果没有第二个值,则设置为None + symbol = ( + values[1] if len(values) > 1 else None + ) # 第二个值为符号,如果没有第二个值,则设置为None token_dict[code] = symbol # 将代码和符号添加到字典中 # 将字典中的键值对转换为多个变量及其对应的值 @@ -20,4 +22,4 @@ for code, symbol in token_dict.items(): print(LPAR) print(RPAR) print(AWAIT) -# 其他变量...
\ No newline at end of file +# 其他变量... diff --git a/tests/get_next_token.py b/tests/get_next_token.py index 2801337..741ad72 100644 --- a/tests/get_next_token.py +++ b/tests/get_next_token.py @@ -1,2 +1,2 @@ class ABC: - ...
\ No newline at end of file + ... |
