mypy: add trivial annotations to functions that don't change logic
This commit is contained in:
parent
9756025e2d
commit
36d23c37bd
@ -1,4 +1,4 @@
|
|||||||
try:
|
try:
|
||||||
from .version import __version__
|
from .version import __version__ # type: ignore
|
||||||
except ImportError:
|
except ImportError:
|
||||||
__version__ = "master"
|
__version__ = "master"
|
||||||
|
@ -2,12 +2,13 @@ import argparse
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import inspect
|
import inspect
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import typing
|
||||||
|
|
||||||
from .options import ParserOptions
|
from .options import ParserOptions
|
||||||
from .simple import parse_string
|
from .simple import parse_string, ParsedData
|
||||||
|
|
||||||
|
|
||||||
def nondefault_repr(data):
|
def nondefault_repr(data: ParsedData) -> str:
|
||||||
"""
|
"""
|
||||||
Similar to the default dataclass repr, but exclude any
|
Similar to the default dataclass repr, but exclude any
|
||||||
default parameters or parameters with compare=False
|
default parameters or parameters with compare=False
|
||||||
@ -17,7 +18,7 @@ def nondefault_repr(data):
|
|||||||
get_fields = dataclasses.fields
|
get_fields = dataclasses.fields
|
||||||
MISSING = dataclasses.MISSING
|
MISSING = dataclasses.MISSING
|
||||||
|
|
||||||
def _inner_repr(o) -> str:
|
def _inner_repr(o: typing.Any) -> str:
|
||||||
if is_dataclass(o):
|
if is_dataclass(o):
|
||||||
vals = []
|
vals = []
|
||||||
for f in get_fields(o):
|
for f in get_fields(o):
|
||||||
|
@ -42,6 +42,9 @@ class LexToken(Protocol):
|
|||||||
#: Location token was found at
|
#: Location token was found at
|
||||||
location: Location
|
location: Location
|
||||||
|
|
||||||
|
#: private
|
||||||
|
lexer: "Lexer"
|
||||||
|
|
||||||
|
|
||||||
PhonyEnding: LexToken = lex.LexToken() # type: ignore
|
PhonyEnding: LexToken = lex.LexToken() # type: ignore
|
||||||
PhonyEnding.type = "PLACEHOLDER"
|
PhonyEnding.type = "PLACEHOLDER"
|
||||||
@ -183,13 +186,13 @@ class Lexer:
|
|||||||
t_NUMBER = r"[0-9][0-9XxA-Fa-f]*"
|
t_NUMBER = r"[0-9][0-9XxA-Fa-f]*"
|
||||||
t_FLOAT_NUMBER = r"[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?"
|
t_FLOAT_NUMBER = r"[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?"
|
||||||
|
|
||||||
def t_NAME(self, t):
|
def t_NAME(self, t: LexToken) -> LexToken:
|
||||||
r"[A-Za-z_~][A-Za-z0-9_]*"
|
r"[A-Za-z_~][A-Za-z0-9_]*"
|
||||||
if t.value in self.keywords:
|
if t.value in self.keywords:
|
||||||
t.type = t.value
|
t.type = t.value
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def t_PRECOMP_MACRO(self, t):
|
def t_PRECOMP_MACRO(self, t: LexToken) -> typing.Optional[LexToken]:
|
||||||
r"\#.*"
|
r"\#.*"
|
||||||
m = _line_re.match(t.value)
|
m = _line_re.match(t.value)
|
||||||
if m:
|
if m:
|
||||||
@ -200,11 +203,11 @@ class Lexer:
|
|||||||
self.filename = filename
|
self.filename = filename
|
||||||
|
|
||||||
self.line_offset = 1 + self.lex.lineno - int(m.group(1))
|
self.line_offset = 1 + self.lex.lineno - int(m.group(1))
|
||||||
|
return None
|
||||||
else:
|
else:
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def t_COMMENT_SINGLELINE(self, t):
|
def t_COMMENT_SINGLELINE(self, t: LexToken) -> LexToken:
|
||||||
r"\/\/.*\n?"
|
r"\/\/.*\n?"
|
||||||
if t.value.startswith("///") or t.value.startswith("//!"):
|
if t.value.startswith("///") or t.value.startswith("//!"):
|
||||||
self.comments.append(t.value.lstrip("\t ").rstrip("\n"))
|
self.comments.append(t.value.lstrip("\t ").rstrip("\n"))
|
||||||
@ -227,7 +230,7 @@ class Lexer:
|
|||||||
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
|
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
|
||||||
|
|
||||||
# Found at http://ostermiller.org/findcomment.html
|
# Found at http://ostermiller.org/findcomment.html
|
||||||
def t_COMMENT_MULTILINE(self, t):
|
def t_COMMENT_MULTILINE(self, t: LexToken) -> LexToken:
|
||||||
r"/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/\n?"
|
r"/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/\n?"
|
||||||
if t.value.startswith("/**") or t.value.startswith("/*!"):
|
if t.value.startswith("/**") or t.value.startswith("/*!"):
|
||||||
# not sure why, but get double new lines
|
# not sure why, but get double new lines
|
||||||
@ -238,19 +241,20 @@ class Lexer:
|
|||||||
t.lexer.lineno += t.value.count("\n")
|
t.lexer.lineno += t.value.count("\n")
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def t_NEWLINE(self, t):
|
def t_NEWLINE(self, t: LexToken) -> LexToken:
|
||||||
r"\n+"
|
r"\n+"
|
||||||
t.lexer.lineno += len(t.value)
|
t.lexer.lineno += len(t.value)
|
||||||
del self.comments[:]
|
del self.comments[:]
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def t_error(self, v):
|
def t_error(self, t: LexToken) -> None:
|
||||||
print("Lex error: ", v)
|
print("Lex error: ", t)
|
||||||
|
|
||||||
_lexer = None
|
_lexer = None
|
||||||
lex: lex.Lexer
|
lex: lex.Lexer
|
||||||
|
lineno: int
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs) -> "Lexer":
|
||||||
# only build the lexer once
|
# only build the lexer once
|
||||||
inst = super().__new__(cls)
|
inst = super().__new__(cls)
|
||||||
if cls._lexer is None:
|
if cls._lexer is None:
|
||||||
@ -261,14 +265,14 @@ class Lexer:
|
|||||||
return inst
|
return inst
|
||||||
|
|
||||||
def __init__(self, filename: typing.Optional[str] = None):
|
def __init__(self, filename: typing.Optional[str] = None):
|
||||||
self.input = self.lex.input
|
self.input: typing.Callable[[str], None] = self.lex.input
|
||||||
|
|
||||||
# For tracking current file/line position
|
# For tracking current file/line position
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
self.line_offset = 0
|
self.line_offset = 0
|
||||||
|
|
||||||
self.filenames = []
|
self.filenames: typing.List[str] = []
|
||||||
self._filenames_set = set()
|
self._filenames_set: typing.Set[str] = set()
|
||||||
|
|
||||||
if self.filename:
|
if self.filename:
|
||||||
self.filenames.append(filename)
|
self.filenames.append(filename)
|
||||||
@ -339,13 +343,15 @@ class Lexer:
|
|||||||
|
|
||||||
_discard_types = {"NEWLINE", "COMMENT_SINGLELINE", "COMMENT_MULTILINE"}
|
_discard_types = {"NEWLINE", "COMMENT_SINGLELINE", "COMMENT_MULTILINE"}
|
||||||
|
|
||||||
def _token_limit_exceeded(self):
|
def _token_limit_exceeded(self) -> typing.NoReturn:
|
||||||
from .errors import CxxParseError
|
from .errors import CxxParseError
|
||||||
|
|
||||||
raise CxxParseError("no more tokens left in this group")
|
raise CxxParseError("no more tokens left in this group")
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def set_group_of_tokens(self, toks: typing.List[LexToken]):
|
def set_group_of_tokens(
|
||||||
|
self, toks: typing.List[LexToken]
|
||||||
|
) -> typing.Generator[typing.Deque[LexToken], None, None]:
|
||||||
# intended for use when you have a set of tokens that you know
|
# intended for use when you have a set of tokens that you know
|
||||||
# must be consumed, such as a paren grouping or some type of
|
# must be consumed, such as a paren grouping or some type of
|
||||||
# lookahead case
|
# lookahead case
|
||||||
|
@ -93,9 +93,9 @@ class CxxParser:
|
|||||||
self.verbose = True if self.options.verbose else False
|
self.verbose = True if self.options.verbose else False
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
|
|
||||||
def debug_print(fmt: str, *args: typing.Any):
|
def debug_print(fmt: str, *args: typing.Any) -> None:
|
||||||
fmt = f"[%4d] {fmt}"
|
fmt = f"[%4d] {fmt}"
|
||||||
args = (inspect.currentframe().f_back.f_lineno,) + args
|
args = (inspect.currentframe().f_back.f_lineno,) + args # type: ignore
|
||||||
print(fmt % args)
|
print(fmt % args)
|
||||||
|
|
||||||
self.debug_print = debug_print
|
self.debug_print = debug_print
|
||||||
@ -135,7 +135,7 @@ class CxxParser:
|
|||||||
#
|
#
|
||||||
|
|
||||||
def _parse_error(
|
def _parse_error(
|
||||||
self, tok: typing.Optional[LexToken], expected=""
|
self, tok: typing.Optional[LexToken], expected: str = ""
|
||||||
) -> CxxParseError:
|
) -> CxxParseError:
|
||||||
if not tok:
|
if not tok:
|
||||||
# common case after a failed token_if
|
# common case after a failed token_if
|
||||||
@ -982,7 +982,7 @@ class CxxParser:
|
|||||||
template: typing.Optional[TemplateDecl],
|
template: typing.Optional[TemplateDecl],
|
||||||
typedef: bool,
|
typedef: bool,
|
||||||
location: Location,
|
location: Location,
|
||||||
props: typing.Dict[str, LexToken],
|
mods: ParsedTypeModifiers,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
class_specifier: class_head "{" [member_specification] "}"
|
class_specifier: class_head "{" [member_specification] "}"
|
||||||
@ -1038,7 +1038,7 @@ class CxxParser:
|
|||||||
typename, bases, template, explicit, final, doxygen, self._current_access
|
typename, bases, template, explicit, final, doxygen, self._current_access
|
||||||
)
|
)
|
||||||
state = self._push_state(
|
state = self._push_state(
|
||||||
ClassBlockState, clsdecl, default_access, typedef, props
|
ClassBlockState, clsdecl, default_access, typedef, mods
|
||||||
)
|
)
|
||||||
state.location = location
|
state.location = location
|
||||||
self.visitor.on_class_start(state)
|
self.visitor.on_class_start(state)
|
||||||
@ -1691,6 +1691,7 @@ class CxxParser:
|
|||||||
if not isinstance(pqname.segments[-1], NameSpecifier):
|
if not isinstance(pqname.segments[-1], NameSpecifier):
|
||||||
raise self._parse_error(None)
|
raise self._parse_error(None)
|
||||||
|
|
||||||
|
props: typing.Dict
|
||||||
props = dict.fromkeys(mods.both.keys(), True)
|
props = dict.fromkeys(mods.both.keys(), True)
|
||||||
if msvc_convention:
|
if msvc_convention:
|
||||||
props["msvc_convention"] = msvc_convention.value
|
props["msvc_convention"] = msvc_convention.value
|
||||||
@ -2011,6 +2012,7 @@ class CxxParser:
|
|||||||
toks = []
|
toks = []
|
||||||
|
|
||||||
# On entry we only have the base type, decorate it
|
# On entry we only have the base type, decorate it
|
||||||
|
dtype: typing.Optional[DecoratedType]
|
||||||
dtype = self._parse_cv_ptr(parsed_type)
|
dtype = self._parse_cv_ptr(parsed_type)
|
||||||
|
|
||||||
state = self.state
|
state = self.state
|
||||||
@ -2145,7 +2147,7 @@ class CxxParser:
|
|||||||
self._next_token_must_be("(")
|
self._next_token_must_be("(")
|
||||||
|
|
||||||
# make our own pqname/op here
|
# make our own pqname/op here
|
||||||
segments = [NameSpecifier("operator")]
|
segments: typing.List[PQNameSegment] = [NameSpecifier("operator")]
|
||||||
pqname = PQName(segments)
|
pqname = PQName(segments)
|
||||||
op = "conversion"
|
op = "conversion"
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ class SimpleCxxVisitor:
|
|||||||
def parse_string(
|
def parse_string(
|
||||||
content: str,
|
content: str,
|
||||||
*,
|
*,
|
||||||
filename="<str>",
|
filename: str = "<str>",
|
||||||
options: typing.Optional[ParserOptions] = None,
|
options: typing.Optional[ParserOptions] = None,
|
||||||
cleandoc: bool = False,
|
cleandoc: bool = False,
|
||||||
) -> ParsedData:
|
) -> ParsedData:
|
||||||
|
@ -56,7 +56,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
lexer = Lexer(args.header)
|
lexer = Lexer(args.header)
|
||||||
with open(lexer.filename) as fp:
|
with open(lexer.filename) as fp:
|
||||||
lexer.input(fp.read())
|
lexer.input(fp.read()) # type: ignore
|
||||||
|
|
||||||
toks: typing.List[Token] = []
|
toks: typing.List[Token] = []
|
||||||
while True:
|
while True:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user