mypy: add trivial annotations to functions that don't change logic
This commit is contained in:
parent
9756025e2d
commit
36d23c37bd
@ -1,4 +1,4 @@
|
||||
try:
|
||||
from .version import __version__
|
||||
from .version import __version__ # type: ignore
|
||||
except ImportError:
|
||||
__version__ = "master"
|
||||
|
@ -2,12 +2,13 @@ import argparse
|
||||
import dataclasses
|
||||
import inspect
|
||||
import subprocess
|
||||
import typing
|
||||
|
||||
from .options import ParserOptions
|
||||
from .simple import parse_string
|
||||
from .simple import parse_string, ParsedData
|
||||
|
||||
|
||||
def nondefault_repr(data):
|
||||
def nondefault_repr(data: ParsedData) -> str:
|
||||
"""
|
||||
Similar to the default dataclass repr, but exclude any
|
||||
default parameters or parameters with compare=False
|
||||
@ -17,7 +18,7 @@ def nondefault_repr(data):
|
||||
get_fields = dataclasses.fields
|
||||
MISSING = dataclasses.MISSING
|
||||
|
||||
def _inner_repr(o) -> str:
|
||||
def _inner_repr(o: typing.Any) -> str:
|
||||
if is_dataclass(o):
|
||||
vals = []
|
||||
for f in get_fields(o):
|
||||
|
@ -42,6 +42,9 @@ class LexToken(Protocol):
|
||||
#: Location token was found at
|
||||
location: Location
|
||||
|
||||
#: private
|
||||
lexer: "Lexer"
|
||||
|
||||
|
||||
PhonyEnding: LexToken = lex.LexToken() # type: ignore
|
||||
PhonyEnding.type = "PLACEHOLDER"
|
||||
@ -183,13 +186,13 @@ class Lexer:
|
||||
t_NUMBER = r"[0-9][0-9XxA-Fa-f]*"
|
||||
t_FLOAT_NUMBER = r"[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?"
|
||||
|
||||
def t_NAME(self, t):
|
||||
def t_NAME(self, t: LexToken) -> LexToken:
|
||||
r"[A-Za-z_~][A-Za-z0-9_]*"
|
||||
if t.value in self.keywords:
|
||||
t.type = t.value
|
||||
return t
|
||||
|
||||
def t_PRECOMP_MACRO(self, t):
|
||||
def t_PRECOMP_MACRO(self, t: LexToken) -> typing.Optional[LexToken]:
|
||||
r"\#.*"
|
||||
m = _line_re.match(t.value)
|
||||
if m:
|
||||
@ -200,11 +203,11 @@ class Lexer:
|
||||
self.filename = filename
|
||||
|
||||
self.line_offset = 1 + self.lex.lineno - int(m.group(1))
|
||||
|
||||
return None
|
||||
else:
|
||||
return t
|
||||
|
||||
def t_COMMENT_SINGLELINE(self, t):
|
||||
def t_COMMENT_SINGLELINE(self, t: LexToken) -> LexToken:
|
||||
r"\/\/.*\n?"
|
||||
if t.value.startswith("///") or t.value.startswith("//!"):
|
||||
self.comments.append(t.value.lstrip("\t ").rstrip("\n"))
|
||||
@ -227,7 +230,7 @@ class Lexer:
|
||||
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
|
||||
|
||||
# Found at http://ostermiller.org/findcomment.html
|
||||
def t_COMMENT_MULTILINE(self, t):
|
||||
def t_COMMENT_MULTILINE(self, t: LexToken) -> LexToken:
|
||||
r"/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/\n?"
|
||||
if t.value.startswith("/**") or t.value.startswith("/*!"):
|
||||
# not sure why, but get double new lines
|
||||
@ -238,19 +241,20 @@ class Lexer:
|
||||
t.lexer.lineno += t.value.count("\n")
|
||||
return t
|
||||
|
||||
def t_NEWLINE(self, t):
|
||||
def t_NEWLINE(self, t: LexToken) -> LexToken:
|
||||
r"\n+"
|
||||
t.lexer.lineno += len(t.value)
|
||||
del self.comments[:]
|
||||
return t
|
||||
|
||||
def t_error(self, v):
|
||||
print("Lex error: ", v)
|
||||
def t_error(self, t: LexToken) -> None:
|
||||
print("Lex error: ", t)
|
||||
|
||||
_lexer = None
|
||||
lex: lex.Lexer
|
||||
lineno: int
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
def __new__(cls, *args, **kwargs) -> "Lexer":
|
||||
# only build the lexer once
|
||||
inst = super().__new__(cls)
|
||||
if cls._lexer is None:
|
||||
@ -261,14 +265,14 @@ class Lexer:
|
||||
return inst
|
||||
|
||||
def __init__(self, filename: typing.Optional[str] = None):
|
||||
self.input = self.lex.input
|
||||
self.input: typing.Callable[[str], None] = self.lex.input
|
||||
|
||||
# For tracking current file/line position
|
||||
self.filename = filename
|
||||
self.line_offset = 0
|
||||
|
||||
self.filenames = []
|
||||
self._filenames_set = set()
|
||||
self.filenames: typing.List[str] = []
|
||||
self._filenames_set: typing.Set[str] = set()
|
||||
|
||||
if self.filename:
|
||||
self.filenames.append(filename)
|
||||
@ -339,13 +343,15 @@ class Lexer:
|
||||
|
||||
_discard_types = {"NEWLINE", "COMMENT_SINGLELINE", "COMMENT_MULTILINE"}
|
||||
|
||||
def _token_limit_exceeded(self):
|
||||
def _token_limit_exceeded(self) -> typing.NoReturn:
|
||||
from .errors import CxxParseError
|
||||
|
||||
raise CxxParseError("no more tokens left in this group")
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_group_of_tokens(self, toks: typing.List[LexToken]):
|
||||
def set_group_of_tokens(
|
||||
self, toks: typing.List[LexToken]
|
||||
) -> typing.Generator[typing.Deque[LexToken], None, None]:
|
||||
# intended for use when you have a set of tokens that you know
|
||||
# must be consumed, such as a paren grouping or some type of
|
||||
# lookahead case
|
||||
|
@ -93,9 +93,9 @@ class CxxParser:
|
||||
self.verbose = True if self.options.verbose else False
|
||||
if self.verbose:
|
||||
|
||||
def debug_print(fmt: str, *args: typing.Any):
|
||||
def debug_print(fmt: str, *args: typing.Any) -> None:
|
||||
fmt = f"[%4d] {fmt}"
|
||||
args = (inspect.currentframe().f_back.f_lineno,) + args
|
||||
args = (inspect.currentframe().f_back.f_lineno,) + args # type: ignore
|
||||
print(fmt % args)
|
||||
|
||||
self.debug_print = debug_print
|
||||
@ -135,7 +135,7 @@ class CxxParser:
|
||||
#
|
||||
|
||||
def _parse_error(
|
||||
self, tok: typing.Optional[LexToken], expected=""
|
||||
self, tok: typing.Optional[LexToken], expected: str = ""
|
||||
) -> CxxParseError:
|
||||
if not tok:
|
||||
# common case after a failed token_if
|
||||
@ -982,7 +982,7 @@ class CxxParser:
|
||||
template: typing.Optional[TemplateDecl],
|
||||
typedef: bool,
|
||||
location: Location,
|
||||
props: typing.Dict[str, LexToken],
|
||||
mods: ParsedTypeModifiers,
|
||||
) -> None:
|
||||
"""
|
||||
class_specifier: class_head "{" [member_specification] "}"
|
||||
@ -1038,7 +1038,7 @@ class CxxParser:
|
||||
typename, bases, template, explicit, final, doxygen, self._current_access
|
||||
)
|
||||
state = self._push_state(
|
||||
ClassBlockState, clsdecl, default_access, typedef, props
|
||||
ClassBlockState, clsdecl, default_access, typedef, mods
|
||||
)
|
||||
state.location = location
|
||||
self.visitor.on_class_start(state)
|
||||
@ -1691,6 +1691,7 @@ class CxxParser:
|
||||
if not isinstance(pqname.segments[-1], NameSpecifier):
|
||||
raise self._parse_error(None)
|
||||
|
||||
props: typing.Dict
|
||||
props = dict.fromkeys(mods.both.keys(), True)
|
||||
if msvc_convention:
|
||||
props["msvc_convention"] = msvc_convention.value
|
||||
@ -2011,6 +2012,7 @@ class CxxParser:
|
||||
toks = []
|
||||
|
||||
# On entry we only have the base type, decorate it
|
||||
dtype: typing.Optional[DecoratedType]
|
||||
dtype = self._parse_cv_ptr(parsed_type)
|
||||
|
||||
state = self.state
|
||||
@ -2145,7 +2147,7 @@ class CxxParser:
|
||||
self._next_token_must_be("(")
|
||||
|
||||
# make our own pqname/op here
|
||||
segments = [NameSpecifier("operator")]
|
||||
segments: typing.List[PQNameSegment] = [NameSpecifier("operator")]
|
||||
pqname = PQName(segments)
|
||||
op = "conversion"
|
||||
|
||||
|
@ -298,7 +298,7 @@ class SimpleCxxVisitor:
|
||||
def parse_string(
|
||||
content: str,
|
||||
*,
|
||||
filename="<str>",
|
||||
filename: str = "<str>",
|
||||
options: typing.Optional[ParserOptions] = None,
|
||||
cleandoc: bool = False,
|
||||
) -> ParsedData:
|
||||
|
@ -56,7 +56,7 @@ if __name__ == "__main__":
|
||||
|
||||
lexer = Lexer(args.header)
|
||||
with open(lexer.filename) as fp:
|
||||
lexer.input(fp.read())
|
||||
lexer.input(fp.read()) # type: ignore
|
||||
|
||||
toks: typing.List[Token] = []
|
||||
while True:
|
||||
|
Loading…
x
Reference in New Issue
Block a user