id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
185,652 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_ATOM = 15
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
def _format_attribute(node: ast.Attribute, level: int) -> str:
value = to_expr(node.value, PR_ATOM)
const = node.value
if (isinstance(const, ast.Constant) and isinstance(const.value, int)) or type(
const
) is ast.Num:
value += " ."
else:
value += "."
return value + node.attr | null |
185,653 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TUPLE = 0
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_tuple(node: ast.Tuple, level: int) -> str:
if not node.elts:
return "()"
elif len(node.elts) == 1:
return parens(level, PR_TUPLE, to_expr(node.elts[0]) + ",")
return parens(level, PR_TUPLE, ", ".join(to_expr(elm) for elm in node.elts)) | null |
185,654 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
List = _alias(list, 1, inst=False, name='List')
def _format_list(node: ast.List, level: int) -> str:
return "[" + ", ".join(to_expr(elm) for elm in node.elts) + "]" | null |
185,655 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TEST = 1
def _format_kw(node: ast.keyword):
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
def _format_call(node: ast.Call, level: int) -> str:
args = [to_expr(arg) for arg in node.args] + [
_format_kw(arg) for arg in node.keywords
]
return to_expr(node.func, PR_TEST) + "(" + ", ".join(args) + ")" | null |
185,656 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_NOT = 4
PR_FACTOR = 12
def get_unop(node: ast.unaryop) -> str:
if isinstance(node, ast.UAdd):
return "+"
elif isinstance(node, ast.USub):
return "-"
elif isinstance(node, ast.Not):
return "not "
elif isinstance(node, ast.Invert):
return "~"
return "<unknown unary op>"
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_unaryop(node: ast.UnaryOp, level: int) -> str:
tgt_level = PR_FACTOR
if isinstance(node.op, ast.Not):
tgt_level = PR_NOT
return parens(
level, tgt_level, get_unop(node.op) + to_expr(node.operand, tgt_level)
) | null |
185,657 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_FACTOR = 12
BIN_OPS = {
ast.Add: (" + ", PR_ARITH),
ast.Sub: (" - ", PR_ARITH),
ast.Mult: (" * ", PR_TERM),
ast.MatMult: (" @ ", PR_TERM),
ast.Div: (" / ", PR_TERM),
ast.Mod: (" % ", PR_TERM),
ast.LShift: (" << ", PR_SHIFT),
ast.RShift: (" >> ", PR_SHIFT),
ast.BitOr: (" | ", PR_BOR),
ast.BitXor: (" ^ ", PR_BXOR),
ast.BitAnd: (" & ", PR_BAND),
ast.FloorDiv: (" // ", PR_TERM),
ast.Pow: (" ** ", PR_POWER),
}
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_binaryop(node: ast.BinOp, level: int) -> str:
tgt_level = PR_FACTOR
op, tgt_level = BIN_OPS[type(node.op)]
rassoc = 0
if isinstance(node.op, ast.Pow):
rassoc = 1
return parens(
level,
tgt_level,
to_expr(node.left, tgt_level + rassoc)
+ op
+ to_expr(node.right, tgt_level + (1 - rassoc)),
) | null |
185,658 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TUPLE = 0
PR_ATOM = 15
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_subscript(node: ast.Subscript, level: int) -> str:
return f"{to_expr(node.value, PR_ATOM)}[{to_expr(node.slice, PR_TUPLE)}]" | null |
185,659 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_yield(node: ast.Yield, level: int) -> str:
raise SyntaxError("'yield expression' can not be used within an annotation") | null |
185,660 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_yield_from(node: ast.YieldFrom, level: int) -> str:
raise SyntaxError("'yield expression' can not be used within an annotation") | null |
185,661 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
Dict = _alias(dict, 2, inst=False, name='Dict')
def _format_dict(node: ast.Dict, level: int) -> str:
return (
"{"
+ ", ".join(
to_expr(k) + ": " + to_expr(v) for k, v in zip(node.keys, node.values)
)
+ "}"
) | null |
185,662 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_AWAIT = 14
PR_ATOM = 15
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_await(node: ast.Await, level: int):
return parens(level, PR_AWAIT, "await " + to_expr(node.value, PR_ATOM)) | null |
185,663 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_EXPR = 6
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_starred(node: ast.Starred, level: int):
return "*" + to_expr(node.value, PR_EXPR) | null |
185,664 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_OR = 2
PR_AND = 3
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_boolop(node: ast.BoolOp, level: int) -> str:
if isinstance(node.op, ast.And):
name = " and "
tgt_level = PR_AND
else:
name = " or "
tgt_level = PR_OR
return parens(
level, tgt_level, name.join(to_expr(n, tgt_level + 1) for n in node.values)
) | null |
185,665 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TEST = 1
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def _format_arguments(node: ast.arguments) -> str:
res = []
for i, arg in enumerate(node.args):
if i:
res.append(", ")
res.append(arg.arg)
if i < len(node.defaults):
res.append("=")
res.append(to_expr(node.defaults[i]))
if node.vararg or node.kwonlyargs:
if node.args:
res.append(", ")
res.append("*")
vararg = node.vararg
if vararg:
res.append(vararg.arg)
for i, arg in enumerate(node.kwonlyargs):
if res:
res.append(", ")
res.append(arg.arg)
if i < len(node.kw_defaults) and node.kw_defaults[i]:
res.append("=")
res.append(to_expr(node.kw_defaults[i]))
return "".join(res)
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_lambda(node: ast.Lambda, level: int) -> str:
value = "lambda "
if not node.args.args:
value = "lambda"
value += _format_arguments(node.args)
value += ": " + to_expr(node.body, PR_TEST)
return parens(level, PR_TEST, value) | null |
185,666 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TEST = 1
def parens(level: int, target_lvl: int, value: str) -> str:
if level > target_lvl:
return f"({value})"
return value
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_if_exp(node: ast.IfExp, level: int) -> str:
body = to_expr(node.body, PR_TEST + 1)
orelse = to_expr(node.orelse, PR_TEST)
test = to_expr(node.test, PR_TEST + 1)
return parens(level, PR_TEST, f"{body} if {test} else {orelse}") | null |
185,667 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
PR_TEST = 1
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_set(node: ast.Set, level: int) -> str:
return "{" + ", ".join(to_expr(elt, PR_TEST) for elt in node.elts) + "}" | null |
185,668 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_comprehensions(nodes: List[ast.comprehension]) -> str:
return "".join(_format_comprehension(n) for n in nodes)
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_set_comp(node: ast.SetComp, level: int) -> str:
return "{" + to_expr(node.elt) + _format_comprehensions(node.generators) + "}" | null |
185,669 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_comprehensions(nodes: List[ast.comprehension]) -> str:
return "".join(_format_comprehension(n) for n in nodes)
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_list_comp(node: ast.ListComp, level: int) -> str:
return "[" + to_expr(node.elt) + _format_comprehensions(node.generators) + "]" | null |
185,670 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_comprehensions(nodes: List[ast.comprehension]) -> str:
return "".join(_format_comprehension(n) for n in nodes)
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_dict_comp(node: ast.DictComp, level: int) -> str:
return (
"{"
+ to_expr(node.key)
+ ": "
+ to_expr(node.value)
+ _format_comprehensions(node.generators)
+ "}"
) | null |
185,671 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_comprehensions(nodes: List[ast.comprehension]) -> str:
return "".join(_format_comprehension(n) for n in nodes)
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_gen_exp(node: ast.GeneratorExp, level: int) -> str:
return "(" + to_expr(node.elt) + _format_comprehensions(node.generators) + ")" | null |
185,672 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str:
if node is None:
return ""
formatter = _FORMATTERS.get(type(node))
if formatter is not None:
return formatter(node, level)
return "<unsupported node: " + type(node).__name__ + ">"
def _format_slice(node: ast.Slice, level: int):
res = ""
if node.lower is not None:
res += to_expr(node.lower)
res += ":"
if node.upper is not None:
res += to_expr(node.upper)
if node.step:
res += ":"
res += to_expr(node.step)
return res | null |
185,673 | import ast
from typing import Any, Callable, Dict, List, Optional, Type
def _format_constant(node: ast.Constant, level: int):
if node.value is Ellipsis:
return "..."
return repr(node.value) | null |
185,674 | from .opcode_cinder import opcode as opcode_cinder
from .opcodebase import Opcode
def _load_mapping_arg_effect(oparg: int, _jmp: int = 0) -> int:
if oparg == 2:
return -1
elif oparg == 3:
return -2
return 1 | null |
185,675 | from __future__ import annotations
import ast
import importlib.util
import itertools
import marshal
import os
import sys
from ast import AST, ClassDef
from builtins import compile as builtin_compile
from contextlib import contextmanager
from typing import Dict, Union
from . import consts, future, misc, pyassem, symbols
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from .optimizer import AstOptimizer
from .pyassem import Block, PyFlowGraph
from .symbols import Scope, SymbolVisitor
from .unparse import to_expr
from .visitor import ASTVisitor, walk
def _set_qualname(code, qualname):
pass | null |
185,676 | from __future__ import annotations
import ast
import importlib.util
import itertools
import marshal
import os
import sys
from ast import AST, ClassDef
from builtins import compile as builtin_compile
from contextlib import contextmanager
from typing import Dict, Union
from . import consts, future, misc, pyassem, symbols
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from .optimizer import AstOptimizer
from .pyassem import Block, PyFlowGraph
from .symbols import Scope, SymbolVisitor
from .unparse import to_expr
from .visitor import ASTVisitor, walk
_DEFAULT_MODNAME = sys.intern("<module>")
def make_header(mtime, size):
return _ZERO + mtime.to_bytes(4, "little") + size.to_bytes(4, "little")
def compile(
source,
filename,
mode,
flags=0,
dont_inherit=None,
optimize=-1,
compiler=None,
modname=_DEFAULT_MODNAME,
):
"""Replacement for builtin compile() function
Does not yet support ast.PyCF_ALLOW_TOP_LEVEL_AWAIT flag.
"""
if dont_inherit is not None:
raise RuntimeError("not implemented yet")
result = make_compiler(source, filename, mode, flags, optimize, compiler, modname)
if flags & PyCF_ONLY_AST:
return result
return result.getCode()
def compileFile(filename, display=0, compiler=None, modname=_DEFAULT_MODNAME):
# compile.c uses marshal to write a long directly, with
# calling the interface that would also generate a 1-byte code
# to indicate the type of the value. simplest way to get the
# same effect is to call marshal and then skip the code.
fileinfo = os.stat(filename)
with open(filename, "U") as f:
buf = f.read()
code = compile(buf, filename, "exec", compiler=compiler, modname=modname)
with open(filename + "c", "wb") as f:
hdr = make_header(int(fileinfo.st_mtime), fileinfo.st_size)
f.write(importlib.util.MAGIC_NUMBER)
f.write(hdr)
marshal.dump(code, f) | null |
185,677 | from __future__ import annotations
import ast
import importlib.util
import itertools
import marshal
import os
import sys
from ast import AST, ClassDef
from builtins import compile as builtin_compile
from contextlib import contextmanager
from typing import Dict, Union
from . import consts, future, misc, pyassem, symbols
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from .optimizer import AstOptimizer
from .pyassem import Block, PyFlowGraph
from .symbols import Scope, SymbolVisitor
from .unparse import to_expr
from .visitor import ASTVisitor, walk
def is_const(node):
return isinstance(node, ast.Constant)
def all_items_const(seq, begin, end):
for item in seq[begin:end]:
if not is_const(item):
return False
return True | null |
185,678 | from __future__ import annotations
import ast
import importlib.util
import itertools
import marshal
import os
import sys
from ast import AST, ClassDef
from builtins import compile as builtin_compile
from contextlib import contextmanager
from typing import Dict, Union
from . import consts, future, misc, pyassem, symbols
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from .optimizer import AstOptimizer
from .pyassem import Block, PyFlowGraph
from .symbols import Scope, SymbolVisitor
from .unparse import to_expr
from .visitor import ASTVisitor, walk
def get_docstring(
node: ast.Module | ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef,
) -> str | None:
if (
node.body
and (b0 := node.body[0])
and isinstance(b0, ast.Expr)
and (b0v := b0.value)
and isinstance(b0v, ast.Str)
):
return b0v.s | null |
185,679 | from __future__ import annotations
import ast
import importlib.util
import itertools
import marshal
import os
import sys
from ast import AST, ClassDef
from builtins import compile as builtin_compile
from contextlib import contextmanager
from typing import Dict, Union
from . import consts, future, misc, pyassem, symbols
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from .optimizer import AstOptimizer
from .pyassem import Block, PyFlowGraph
from .symbols import Scope, SymbolVisitor
from .unparse import to_expr
from .visitor import ASTVisitor, walk
class OpFinder:
def __init__(self):
self.op = None
def visitAssName(self, node):
if self.op is None:
self.op = node.flags
elif self.op != node.flags:
raise ValueError("mixed ops in stmt")
visitAssAttr = visitAssName
visitSubscript = visitAssName
def walk(tree, visitor):
return visitor.visit(tree)
The provided code snippet includes necessary dependencies for implementing the `findOp` function. Write a Python function `def findOp(node)` to solve the following problem:
Find the op (DELETE, LOAD, STORE) in an AssTuple tree
Here is the function:
def findOp(node):
"""Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
v = OpFinder()
v.VERBOSE = 0
walk(node, v)
return v.op | Find the op (DELETE, LOAD, STORE) in an AssTuple tree |
185,680 | from __future__ import annotations
import linecache
from contextlib import contextmanager
from typing import TYPE_CHECKING
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
def error_location(filename: str, node: AST) -> Tuple[int, int, Optional[str]]:
source_line = linecache.getline(filename, node.lineno)
return (node.lineno, node.col_offset, source_line or None) | null |
185,681 | import argparse
import builtins
import importlib.util
import marshal
import os
import re
import sys
from dis import dis
from . import pycodegen, static
coding_re = re.compile(rb"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
group = argparser.add_mutually_exclusive_group()
group.add_argument(
"--static", action="store_true", help="compile using static compiler"
)
group.add_argument(
"--builtin", action="store_true", help="compile using built-in C compiler"
)
def open_with_coding(fname):
with open(fname, "rb") as f:
line = f.readline()
m = coding_re.match(line)
if not m:
line = f.readline()
m = coding_re.match(line)
encoding = "utf-8"
if m:
encoding = m.group(1).decode()
return open(fname, encoding=encoding) | null |
185,682 | from __future__ import print_function
import dis as _dis
import opcode
import re
import sys
from pprint import pformat
from types import CodeType
from typing import Dict, Generator, Iterable, List, Optional, Pattern, TextIO, Tuple
def _disassemble_bytes(
code: bytes,
lasti: int = -1,
varnames: Optional[Tuple[str]] = None,
names: Optional[Tuple[str]] = None,
constants: Optional[Tuple[object]] = None,
cells: Optional[Tuple[object]] = None,
linestarts: Optional[Dict[int, int]] = None,
*,
file: Optional[TextIO] = None,
line_offset: int = 0
) -> None:
# Omit the line number column entirely if we have no line number info
show_lineno = linestarts is not None
if show_lineno:
# pyre-fixme [16]: `Optional` has no attribute `values`.
maxlineno = max(linestarts.values()) + line_offset
if maxlineno >= 1000:
lineno_width = len(str(maxlineno))
else:
lineno_width = 3
else:
lineno_width = 0
maxoffset = len(code) - 2
if maxoffset >= 10000:
offset_width = len(str(maxoffset))
else:
offset_width = 4
for instr in _make_stable(
# pyre-fixme [16]: Module `dis` has no attribute `_get_instructions_bytes`
_dis._get_instructions_bytes(
code, varnames, names, constants, cells, linestarts, line_offset=line_offset
)
):
new_source_line = (
show_lineno and instr.starts_line is not None and instr.offset > 0
)
if new_source_line:
print(file=file)
is_current_instr = instr.offset == lasti
print(
# pyre-fixme [16]: `_dis.Instruction` has no attribute `_disassemble`
instr._disassemble(lineno_width, is_current_instr, offset_width),
file=file,
)
CodeType = type(_f.__code__)
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
def buffer(self) -> BinaryIO:
pass
def encoding(self) -> str:
pass
def errors(self) -> Optional[str]:
pass
def line_buffering(self) -> bool:
pass
def newlines(self) -> Any:
pass
def __enter__(self) -> 'TextIO':
pass
def disassemble(
co: CodeType,
lasti: int = -1,
*,
file: Optional[TextIO] = None,
skip_line_nos: bool = False
) -> None:
cell_names = co.co_cellvars + co.co_freevars
if skip_line_nos:
linestarts = None
else:
linestarts = dict(_dis.findlinestarts(co))
_disassemble_bytes(
co.co_code,
lasti,
co.co_varnames,
co.co_names,
co.co_consts,
cell_names,
linestarts,
file=file,
) | null |
185,683 | from __future__ import print_function
import dis as _dis
import opcode
import re
import sys
from pprint import pformat
from types import CodeType
from typing import Dict, Generator, Iterable, List, Optional, Pattern, TextIO, Tuple
coding_re: Pattern[bytes] = re.compile(
rb"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)"
)
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
def buffer(self) -> BinaryIO:
pass
def encoding(self) -> str:
pass
def errors(self) -> Optional[str]:
pass
def line_buffering(self) -> bool:
pass
def newlines(self) -> Any:
pass
def __enter__(self) -> 'TextIO':
pass
def open_with_coding(fname: str) -> TextIO:
with open(fname, "rb") as f:
l = f.readline()
m = coding_re.match(l)
if not m:
l = f.readline()
m = coding_re.match(l)
encoding = "utf-8"
if m:
encoding = m.group(1).decode()
return open(fname, encoding=encoding) | null |
185,684 | MANGLE_LEN = 256
def mangle(name, klass):
if klass is None:
return name
if not name.startswith("__"):
return name
if len(name) + 2 >= MANGLE_LEN:
return name
# TODO: Probably need to split and mangle recursively?
if "." in name:
return name
if name.endswith("__"):
return name
try:
i = 0
while klass[i] == "_":
i = i + 1
except IndexError:
return name
klass = klass[i:]
tlen = len(klass) + len(name)
if tlen > MANGLE_LEN:
klass = klass[: MANGLE_LEN - tlen]
return "_%s%s" % (klass, name) | null |
185,685 | from __future__ import annotations
import ast
import operator
import sys
from ast import Bytes, cmpop, Constant, copy_location, Ellipsis, NameConstant, Num, Str
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from .visitor import ASTRewriter
class DefaultLimits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 20
MAX_STR_SIZE = 20
MAX_TOTAL_ITEMS = 1024
LimitsType = type[PyLimits] | type[DefaultLimits]
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
def safe_lshift(left: Any, right: Any, limits: LimitsType = DefaultLimits) -> object:
if isinstance(left, int) and isinstance(right, int) and left and right:
lbits = left.bit_length()
if (
right < 0
or right > limits.MAX_INT_SIZE
or lbits > limits.MAX_INT_SIZE - right
):
raise OverflowError()
return left << right | null |
185,686 | from __future__ import annotations
import ast
import operator
import sys
from ast import Bytes, cmpop, Constant, copy_location, Ellipsis, NameConstant, Num, Str
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from .visitor import ASTRewriter
class DefaultLimits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 20
MAX_STR_SIZE = 20
MAX_TOTAL_ITEMS = 1024
LimitsType = type[PyLimits] | type[DefaultLimits]
def check_complexity(obj: object, limit: int) -> int:
if isinstance(obj, (frozenset, tuple)):
limit -= len(obj)
for item in obj:
limit = check_complexity(item, limit)
if limit < 0:
break
return limit
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
def safe_multiply(left: Any, right: Any, limits: LimitsType = DefaultLimits) -> object:
if isinstance(left, int) and isinstance(right, int) and left and right:
lbits = left.bit_length()
rbits = right.bit_length()
if lbits + rbits > limits.MAX_INT_SIZE:
raise OverflowError()
elif isinstance(left, int) and isinstance(right, (tuple, frozenset)):
rsize = len(right)
if rsize:
if left < 0 or left > limits.MAX_COLLECTION_SIZE / rsize:
raise OverflowError()
if left:
if check_complexity(right, limits.MAX_TOTAL_ITEMS // left) < 0:
raise OverflowError()
elif isinstance(left, int) and isinstance(right, (str, bytes)):
rsize = len(right)
if rsize:
if left < 0 or left > limits.MAX_STR_SIZE / rsize:
raise OverflowError()
elif isinstance(right, int) and isinstance(left, (tuple, frozenset, str, bytes)):
return safe_multiply(right, left, limits)
return left * right | null |
185,687 | from __future__ import annotations
import ast
import operator
import sys
from ast import Bytes, cmpop, Constant, copy_location, Ellipsis, NameConstant, Num, Str
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from .visitor import ASTRewriter
class DefaultLimits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 20
MAX_STR_SIZE = 20
MAX_TOTAL_ITEMS = 1024
LimitsType = type[PyLimits] | type[DefaultLimits]
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
def safe_power(left: Any, right: Any, limits: LimitsType = DefaultLimits) -> object:
if isinstance(left, int) and isinstance(right, int) and left and right > 0:
lbits = left.bit_length()
if lbits > limits.MAX_INT_SIZE / right:
raise OverflowError()
return left**right | null |
185,688 | from __future__ import annotations
import ast
import operator
import sys
from ast import Bytes, cmpop, Constant, copy_location, Ellipsis, NameConstant, Num, Str
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from .visitor import ASTRewriter
class DefaultLimits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 20
MAX_STR_SIZE = 20
MAX_TOTAL_ITEMS = 1024
LimitsType = type[PyLimits] | type[DefaultLimits]
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
def safe_mod(left: Any, right: Any, limits: LimitsType = DefaultLimits) -> object:
if isinstance(left, (str, bytes)):
raise OverflowError()
return left % right | null |
185,689 | from __future__ import annotations
import sys
from contextlib import contextmanager
from types import CodeType
from typing import ClassVar, Generator, List, Optional
from . import opcode_cinder, opcodes
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NEWLOCALS,
CO_OPTIMIZED,
CO_SUPPRESS_JIT,
)
from .flow_graph_optimizer import FlowGraphOptimizer
from .opcodebase import Opcode
def sign(a):
if not isinstance(a, float):
raise TypeError(f"Must be a real number, not {type(a)}")
if a != a:
return 1.0 # NaN case
return 1.0 if str(a)[0] != "-" else -1.0 | null |
185,690 | from __future__ import annotations
import sys
from contextlib import contextmanager
from types import CodeType
from typing import ClassVar, Generator, List, Optional
from . import opcode_cinder, opcodes
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NEWLOCALS,
CO_OPTIMIZED,
CO_SUPPRESS_JIT,
)
from .flow_graph_optimizer import FlowGraphOptimizer
from .opcodebase import Opcode
def instrsize(oparg):
if oparg <= 0xFF:
return 1
elif oparg <= 0xFFFF:
return 2
elif oparg <= 0xFFFFFF:
return 3
else:
return 4 | null |
185,691 | from __future__ import annotations
import sys
from contextlib import contextmanager
from types import CodeType
from typing import ClassVar, Generator, List, Optional
from . import opcode_cinder, opcodes
from .consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NEWLOCALS,
CO_OPTIMIZED,
CO_SUPPRESS_JIT,
)
from .flow_graph_optimizer import FlowGraphOptimizer
from .opcodebase import Opcode
def cast_signed_byte_to_unsigned(i):
if i < 0:
i = 255 + i + 1
return i | null |
185,692 | import ast
from ast import AST, copy_location
from typing import Any, Sequence, TypeVar, Union
def dumpNode(node):
print(node.__class__)
for attr in dir(node):
if attr[0] != "_":
print("\t", "%-10.10s" % attr, getattr(node, attr)) | null |
185,693 | from __future__ import print_function
import ast
from .visitor import ASTVisitor, walk
The provided code snippet includes necessary dependencies for implementing the `is_future` function. Write a Python function `def is_future(stmt)` to solve the following problem:
Return true if statement is a well-formed future statement
Here is the function:
def is_future(stmt):
"""Return true if statement is a well-formed future statement"""
if not isinstance(stmt, ast.ImportFrom):
return 0
if stmt.module == "__future__":
return 1
else:
return 0 | Return true if statement is a well-formed future statement |
185,694 | from __future__ import print_function
import ast
from .visitor import ASTVisitor, walk
class FutureParser(ASTVisitor):
def __init__(self):
def visitModule(self, node):
def check_stmt(self, stmt):
def get_features(self):
class BadFutureParser(ASTVisitor):
def visitImportFrom(self, node):
def walk(tree, visitor):
def find_futures(node):
p1 = FutureParser()
p2 = BadFutureParser()
walk(node, p1)
walk(node, p2)
return p1.get_features() | null |
185,695 | from __future__ import annotations
import ast
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
ClassDef,
Delete,
ExceptHandler,
FunctionDef,
Global,
Import,
ImportFrom,
Module,
Name,
)
from symtable import SymbolTable
from typing import final, List, MutableMapping, Optional, Set
from ..consts import CO_FUTURE_ANNOTATIONS
from ..pycodegen import find_futures
from .common import (
get_symbol_map,
imported_name,
ScopeStack,
StrictModuleError,
SymbolMap,
SymbolScope,
)
from .rewriter.rewriter import SymbolVisitor
class ClassConflictChecker(SymbolVisitor[object, TransformerScope]):
def __init__(
self,
symbols: SymbolTable,
symbol_map: SymbolMap,
filename: str,
flags: int,
) -> None:
super().__init__(
ScopeStack(
self.make_scope(symbols, None),
symbol_map=symbol_map,
scope_factory=self.make_scope,
),
)
self.filename = filename
self.flags = flags
def skip_annotations(self) -> bool:
return bool(self.flags & CO_FUTURE_ANNOTATIONS)
def error(self, names: List[str], lineno: int, col: int, filename: str) -> None:
MSG: str = "Class member conflicts with instance member: {names}"
raise StrictModuleError(MSG.format(names=names), filename, lineno, col)
def make_scope(
self,
symtable: SymbolTable,
node: Optional[AST],
vars: Optional[MutableMapping[str, object]] = None,
) -> SymbolScope[object, TransformerScope]:
if isinstance(node, FunctionDef):
data = FunctionScope(node, self.scopes.scopes[-1].scope_data)
elif isinstance(node, ClassDef):
data = ClassScope()
else:
data = TransformerScope()
return SymbolScope(symtable, data)
def visit_Name(self, node: Name) -> None:
scope = self.scope_for(node.id).scope_data
if isinstance(node.ctx, ast.Load):
scope.loaded(node.id)
else:
scope.stored(node.id)
def visit_ExceptHandler(self, node: ExceptHandler) -> None:
self.generic_visit(node)
name = node.name
if name is not None:
self.scope_for(name).scope_data.stored(name)
def visit_Delete(self, node: Delete) -> None:
for target in node.targets:
if isinstance(target, ast.Name):
self.scope_for(target.id).scope_data.stored(target.id)
def visit_Global(self, node: Global) -> None:
if self.scopes.in_class_scope:
for name in node.names:
if name == "__annotations__":
self.error(
["__annotations__"], node.lineno, node.col_offset, self.filename
)
def visit_ClassDef(self, node: ClassDef) -> None:
self.visit_Class_Outer(node)
class_scope = self.visit_Class_Inner(node).scope_data
assert isinstance(class_scope, ClassScope)
overlap = class_scope.instance_fields.intersection(class_scope.class_fields)
if overlap:
self.error(list(overlap), node.lineno, node.col_offset, self.filename)
self.scope_for(node.name).scope_data.stored(node.name)
def visit_FunctionDef(self, node: FunctionDef) -> None:
self.visit_Func_Outer(node)
func_scope = self.visit_Func_Inner(node)
self.scopes.current[node.name] = func_scope.scope_data
self.scope_for(node.name).scope_data.stored(node.name)
def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> None:
self.visit_Func_Outer(node)
self.visit_Func_Inner(node)
self.scope_for(node.name).scope_data.stored(node.name)
def visit_Import(self, node: Import) -> None:
for name in node.names:
self.scope_for(imported_name(name)).scope_data.stored(imported_name(name))
return self.generic_visit(node)
def visit_ImportFrom(self, node: ImportFrom) -> None:
if node.level == 0 and node.module is not None:
for name in node.names:
self.scope_for(name.asname or name.name).scope_data.stored(
name.asname or name.name
)
def visit_Assign(self, node: Assign) -> None:
self.scopes.scopes[-1].scope_data.visit_Assign(node)
self.generic_visit(node)
def visit_AnnAssign(self, node: AnnAssign) -> None:
self.scopes.scopes[-1].scope_data.visit_AnnAssign(node)
value = node.value
if value is not None:
self.visit(node.target)
self.visit(value)
if not self.skip_annotations:
self.visit(node.annotation)
def visit_arg(self, node: ast.arg) -> None:
if not self.skip_annotations:
self.generic_visit(node)
class SymbolTable:
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "top":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
"""Return the type of the symbol table.
The values retuned are 'class', 'module' and
'function'.
"""
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
"""Return an identifier for the table.
"""
return self._table.id
def get_name(self):
"""Return the table's name.
This corresponds to the name of the class, function
or 'top' if the table is for a class, function or
global respectively.
"""
return self._table.name
def get_lineno(self):
"""Return the number of the first line in the
block for the table.
"""
return self._table.lineno
def is_optimized(self):
"""Return *True* if the locals in the table
are optimizable.
"""
return bool(self._table.type == _symtable.TYPE_FUNCTION)
def is_nested(self):
"""Return *True* if the block is a nested class
or function."""
return bool(self._table.nested)
def has_children(self):
"""Return *True* if the block has nested namespaces.
"""
return bool(self._table.children)
def get_identifiers(self):
"""Return a list of names of symbols in the table.
"""
return self._table.symbols.keys()
def lookup(self, name):
"""Lookup a *name* in the table.
Returns a *Symbol* instance.
"""
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
module_scope = (self._table.name == "top")
sym = self._symbols[name] = Symbol(name, flags, namespaces,
module_scope=module_scope)
return sym
def get_symbols(self):
"""Return a list of *Symbol* instances for
names in the table.
"""
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
"""Return a list of the nested symbol tables.
"""
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
def find_futures(flags: int, node: ast.Module) -> int:
future_flags = flags & consts.PyCF_MASK
for feature in future.find_futures(node):
if feature == "barry_as_FLUFL":
future_flags |= consts.CO_FUTURE_BARRY_AS_BDFL
elif feature == "annotations":
future_flags |= consts.CO_FUTURE_ANNOTATIONS
return future_flags
def get_symbol_map(node: ast.AST, symtable: SymbolTable) -> SymbolMap:
visitor = SymbolMapBuilder(symtable)
visitor.visit(node)
return visitor.mapping
def check_class_conflict(
node: Module,
filename: str,
symbols: SymbolTable,
) -> None:
symbol_map = get_symbol_map(node, symbols)
flags = find_futures(0, node)
visitor = ClassConflictChecker(symbols, symbol_map, filename=filename, flags=flags)
visitor.visit(node) | null |
185,696 | from __future__ import annotations
import ast
import builtins
import logging
import os
import symtable
import sys
from contextlib import nullcontext
from symtable import SymbolTable as PythonSymbolTable, SymbolTableFactory
from types import CodeType
from typing import (
Callable,
ContextManager,
Dict,
final,
Iterable,
List,
Optional,
Set,
Tuple,
TYPE_CHECKING,
)
from cinderx.strictmodule import (
NONSTRICT_MODULE_KIND,
STATIC_MODULE_KIND,
StrictAnalysisResult,
StrictModuleLoader,
STUB_KIND_MASK_TYPING,
)
from ..errors import TypedSyntaxError
from ..pycodegen import compile as python_compile
from ..static import Compiler as StaticCompiler, ModuleTable, StaticCodeGenerator
from . import _static_module_ported, strict_compile
from .class_conflict_checker import check_class_conflict
from .common import StrictModuleError
from .flag_extractor import FlagExtractor, Flags
from .rewriter import remove_annotations, rewrite
The provided code snippet includes necessary dependencies for implementing the `getSymbolTable` function. Write a Python function `def getSymbolTable(mod: StrictAnalysisResult) -> PythonSymbolTable` to solve the following problem:
Construct a symtable object from analysis result
Here is the function:
def getSymbolTable(mod: StrictAnalysisResult) -> PythonSymbolTable:
"""
Construct a symtable object from analysis result
"""
return SymbolTableFactory()(mod.symtable, mod.file_name) | Construct a symtable object from analysis result |
185,697 | from __future__ import annotations
def _set_patch(module: StrictModule, name: str, value: object) -> None:
type(module).patch(module, name, value) | null |
185,698 | from __future__ import annotations
def _del_patch(module: StrictModule, name: str) -> None:
type(module).patch_delete(module, name) | null |
185,699 | from __future__ import annotations
_MAGIC_STRICT_OR_STATIC: bytes = (STRICT_MAGIC_NUMBER + 2**15).to_bytes(
2, "little"
) + b"\r\n"
_MAGIC_NEITHER_STRICT_NOR_STATIC: bytes = (0).to_bytes(2, "little") + b"\r\n"
_MAGIC_LEN: int = len(_MAGIC_STRICT_OR_STATIC)
class StrictBytecodeError(ImportError):
pass
def _classify_pyc(data, name, exc_details):
"""Perform basic validity checking of a pyc header and return the flags field,
which determines how the pyc should be further validated against the source.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required, though.)
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
ImportError is raised when the magic number is incorrect or when the flags
field is invalid. EOFError is raised when the data is found to be truncated.
"""
magic = data[:4]
if magic != MAGIC_NUMBER:
message = f'bad magic number in {name!r}: {magic!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if len(data) < 16:
message = f'reached EOF while reading pyc header of {name!r}'
_bootstrap._verbose_message('{}', message)
raise EOFError(message)
flags = _unpack_uint32(data[4:8])
# Only the first two flags are defined.
if flags & ~0b11:
message = f'invalid flags {flags!r} in {name!r}'
raise ImportError(message, **exc_details)
return flags
def classify_strict_pyc(
data: bytes, name: str, exc_details: dict[str, str]
) -> tuple[int, bool]:
# pyre-ignore[16]: typeshed doesn't know about this
flags = _classify_pyc(data[_MAGIC_LEN:], name, exc_details)
magic = data[:_MAGIC_LEN]
if magic == _MAGIC_NEITHER_STRICT_NOR_STATIC:
strict_or_static = False
elif magic == _MAGIC_STRICT_OR_STATIC:
strict_or_static = True
else:
raise StrictBytecodeError(
f"Bad magic number {magic!r} in {exc_details['path']}"
)
return (flags, strict_or_static) | null |
185,700 | from __future__ import annotations
def code_to_strict_timestamp_pyc(
code: CodeType, strict_or_static: bool, mtime: int = 0, source_size: int = 0
) -> bytearray:
"Produce the data for a strict timestamp-based pyc."
data = bytearray(
_MAGIC_STRICT_OR_STATIC
if strict_or_static
else _MAGIC_NEITHER_STRICT_NOR_STATIC
)
data.extend(MAGIC_NUMBER)
# pyre-ignore[16]: typeshed doesn't know about this
data.extend(_pack_uint32(0))
# pyre-ignore[16]: typeshed doesn't know about this
data.extend(_pack_uint32(mtime))
# pyre-ignore[16]: typeshed doesn't know about this
data.extend(_pack_uint32(source_size))
data.extend(marshal.dumps(code))
return data
def code_to_strict_hash_pyc(
code: CodeType, strict_or_static: bool, source_hash: bytes, checked: bool = True
) -> bytearray:
"Produce the data for a strict hash-based pyc."
data = bytearray(
_MAGIC_STRICT_OR_STATIC
if strict_or_static
else _MAGIC_NEITHER_STRICT_NOR_STATIC
)
data.extend(MAGIC_NUMBER)
flags = 0b1 | checked << 1
# pyre-ignore[16]: typeshed doesn't know about this
data.extend(_pack_uint32(flags))
assert len(source_hash) == 8
data.extend(source_hash)
data.extend(marshal.dumps(code))
return data
class StrictSourceFileLoader(SourceFileLoader):
compiler: Optional[Compiler] = None
module: Optional[ModuleType] = None
def __init__(
self,
fullname: str,
path: str,
import_path: Optional[Iterable[str]] = None,
stub_path: Optional[str] = None,
allow_list_prefix: Optional[Iterable[str]] = None,
allow_list_exact: Optional[Iterable[str]] = None,
enable_patching: bool = False,
log_source_load: Optional[Callable[[str, Optional[str], bool], None]] = None,
init_cached_properties: Optional[
Callable[
[Mapping[str, str | tuple[str, bool]]],
Callable[[Type[object]], Type[object]],
]
] = None,
log_time_func: Optional[Callable[[], TIMING_LOGGER_TYPE]] = None,
use_py_compiler: bool = False,
# The regexes are parsed on the C++ side, so re.Pattern is not accepted.
allow_list_regex: Optional[Iterable[str]] = None,
) -> None:
self.name = fullname
self.path = path
self.import_path: Iterable[str] = import_path or list(sys.path)
configured_stub_path = sys._xoptions.get("strict-module-stubs-path") or getenv(
"PYTHONSTRICTMODULESTUBSPATH"
)
if stub_path is None:
stub_path = configured_stub_path or DEFAULT_STUB_PATH
if stub_path and not isdir(stub_path):
raise ValueError(f"Strict module stubs path does not exist: {stub_path}")
self.stub_path: str = stub_path
self.allow_list_prefix: Iterable[str] = allow_list_prefix or []
self.allow_list_exact: Iterable[str] = allow_list_exact or []
self.allow_list_regex: Iterable[str] = allow_list_regex or []
self.enable_patching = enable_patching
self.log_source_load: Optional[
Callable[[str, Optional[str], bool], None]
] = log_source_load
self.bytecode_found = False
self.bytecode_path: Optional[str] = None
self.init_cached_properties = init_cached_properties
self.log_time_func = log_time_func
self.use_py_compiler = use_py_compiler
self.strict_or_static: bool = False
self.is_static: bool = False
def ensure_compiler(
cls,
path: Iterable[str],
stub_path: str,
allow_list_prefix: Iterable[str],
allow_list_exact: Iterable[str],
log_time_func: Optional[Callable[[], TIMING_LOGGER_TYPE]],
enable_patching: bool = False,
allow_list_regex: Optional[Iterable[str]] = None,
) -> Compiler:
if (comp := cls.compiler) is None:
comp = cls.compiler = Compiler(
path,
stub_path,
allow_list_prefix,
allow_list_exact,
raise_on_error=True,
log_time_func=log_time_func,
enable_patching=enable_patching,
allow_list_regex=allow_list_regex or [],
)
return comp
def get_code(self, fullname: str) -> CodeType:
source_path = self.get_filename(fullname)
source_mtime = None
source_bytes = None
source_hash = None
hash_based = False
check_source = True
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
bytecode_path = self.bytecode_path = add_strict_tag(
bytecode_path, self.enable_patching
)
try:
st = self.path_stats(source_path)
except OSError:
pass
else:
source_mtime = int(st["mtime"])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
self.bytecode_found = True
exc_details = {
"name": fullname,
"path": bytecode_path,
}
try:
flags, strict_or_static = classify_strict_pyc(
data, fullname, exc_details
)
self.strict_or_static = strict_or_static
bytes_data = memoryview(data)[20:]
hash_based = flags & 0b1 != 0
if hash_based:
check_source = flags & 0b10 != 0
if _imp.check_hash_based_pycs != "never" and (
check_source or _imp.check_hash_based_pycs == "always"
):
source_bytes = self.get_data(source_path)
source_hash = importlib.util.source_hash(source_bytes)
# pyre-ignore[16]: typeshed doesn't know about this
_validate_hash_pyc(
data[_MAGIC_LEN:],
source_hash,
fullname,
exc_details,
)
else:
# pyre-ignore[16]: typeshed doesn't know about this
_validate_timestamp_pyc(
data[_MAGIC_LEN:],
source_mtime,
st["size"],
fullname,
exc_details,
)
except (ImportError, EOFError):
pass
else:
# pyre-ignore[16]: typeshed doesn't know about this
_bootstrap._verbose_message(
"{} matches {}", bytecode_path, source_path
)
# pyre-ignore[16]: typeshed doesn't know about this
return _compile_bytecode(
bytes_data,
name=fullname,
bytecode_path=bytecode_path,
source_path=source_path,
)
if source_bytes is None:
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
# pyre-ignore[16]: typeshed doesn't know about this
_bootstrap._verbose_message("code object from {}", source_path)
if (
not sys.dont_write_bytecode
and bytecode_path is not None
and source_mtime is not None
# TODO(T88560840) don't write pycs for static modules for now, to
# work around lack of proper invalidation
and not self.is_static
):
if hash_based:
if source_hash is None:
source_hash = importlib.util.source_hash(source_bytes)
data = code_to_strict_hash_pyc(
code_object,
self.strict_or_static,
# pyre-ignore[6]: bad typeshed stub for importlib.util.source_hash
# pyre-ignore[6]: For 3rd argument expected `bytes` but got `int`.
source_hash,
check_source,
)
else:
data = code_to_strict_timestamp_pyc(
code_object, self.strict_or_static, source_mtime, len(source_bytes)
)
try:
# pyre-ignore[16]: typeshed doesn't know about this
self._cache_bytecode(source_path, bytecode_path, data)
except NotImplementedError:
pass
return code_object
def should_force_strict(self) -> bool:
return False
# pyre-ignore[40]: Non-static method `source_to_code` cannot override a static
# method defined in `importlib.abc.InspectLoader`.
# pyre-fixme[14]: `source_to_code` overrides method defined in `InspectLoader`
# inconsistently.
def source_to_code(
self, data: bytes | str, path: str, *, _optimize: int = -1
) -> CodeType:
log_source_load = self.log_source_load
if log_source_load is not None:
log_source_load(path, self.bytecode_path, self.bytecode_found)
# pyre-ignore[28]: typeshed doesn't know about _optimize arg
code = super().source_to_code(data, path, _optimize=_optimize)
force = self.should_force_strict()
if force or "__strict__" in code.co_names or "__static__" in code.co_names:
# Since a namespace package will never call `source_to_code` (there
# is no source!), there are only two possibilities here: non-package
# (submodule_search_paths should be None) or regular package
# (submodule_search_paths should have one entry, the directory
# containing the "__init__.py").
submodule_search_locations = None
if path.endswith("__init__.py"):
submodule_search_locations = [path[:12]]
# Usually _optimize will be -1 (which means "default to the value
# of sys.flags.optimize"). But this default happens very deep in
# Python's compiler (in PyAST_CompileObject), so if we just pass
# around -1 and rely on that, it means we can't make any of our own
# decisions based on that flag. So instead we do the default right
# here, so we have the correct optimize flag value throughout our
# compiler.
opt = sys.flags.optimize if _optimize == -1 else _optimize
# Let the ast transform attempt to validate the strict module. This
# will return an unmodified module if import __strict__ isn't
# actually at the top-level
code, is_valid_strict, is_static = self.ensure_compiler(
self.import_path,
self.stub_path,
self.allow_list_prefix,
self.allow_list_exact,
self.log_time_func,
self.enable_patching,
self.allow_list_regex,
).load_compiled_module_from_source(
data,
path,
self.name,
opt,
submodule_search_locations,
override_flags=Flags(is_strict=force),
)
self.strict_or_static = is_valid_strict or is_static
self.is_static = is_static
assert code is not None
return code
self.strict_or_static = False
return code
def exec_module(self, module: ModuleType) -> None:
# This ends up being slightly convoluted, because create_module
# gets called, then source_to_code gets called, so we don't know if
# we have a strict module until after we were requested to create it.
# So we'll run the module code we get back in the module that was
# initially published in sys.modules, check and see if it's a strict
# module, and then run the strict module body after replacing the
# entry in sys.modules with a StrictModule entry. This shouldn't
# really be observable because no user code runs between publishing
# the normal module in sys.modules and replacing it with the
# StrictModule.
code = self.get_code(module.__name__)
if code is None:
raise ImportError(
f"Cannot import module {module.__name__}; get_code() returned None"
)
# fix up the pyc path
cached = getattr(module, "__cached__", None)
if cached:
module.__cached__ = cached = add_strict_tag(cached, self.enable_patching)
spec: Optional[ModuleSpec] = module.__spec__
if cached and spec and spec.cached:
spec.cached = cached
if self.strict_or_static:
if spec is None:
raise ImportError(f"Missing module spec for {module.__name__}")
new_dict = {
"<fixed-modules>": cast(object, FIXED_MODULES),
"<builtins>": builtins.__dict__,
"<init-cached-properties>": self.init_cached_properties,
}
if code.co_flags & CO_STATICALLY_COMPILED:
init_static_python()
new_dict["<imported-from>"] = code.co_consts[-1]
new_dict.update(module.__dict__)
strict_mod = StrictModule(new_dict, self.enable_patching)
sys.modules[module.__name__] = strict_mod
exec(code, new_dict)
else:
exec(code, module.__dict__)
def add_strict_tag(path: str, enable_patching: bool) -> str:
base, __, ext = path.rpartition(".")
enable_patching_marker = ".patch" if enable_patching else ""
return f"{base}.strict{enable_patching_marker}.{ext}"
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, exist_ok=exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be
given, consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable
'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(
exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
class PycInvalidationMode(enum.Enum):
TIMESTAMP = 1
CHECKED_HASH = 2
UNCHECKED_HASH = 3
def _get_default_invalidation_mode():
if os.environ.get('SOURCE_DATE_EPOCH'):
return PycInvalidationMode.CHECKED_HASH
else:
return PycInvalidationMode.TIMESTAMP
Dict = _alias(dict, 2, inst=False, name='Dict')
The provided code snippet includes necessary dependencies for implementing the `strict_compile` function. Write a Python function `def strict_compile( file: str, cfile: str, dfile: str | None = None, doraise: bool = False, optimize: int = -1, # Since typeshed doesn't yet know about PycInvalidationMode, no way to # convince Pyre it's a valid type here. T54150924 invalidation_mode: object = None, loader_override: object = None, loader_options: Dict[str, str | int | bool] | None = None, ) -> str | None` to solve the following problem:
Byte-compile one Python source file to Python bytecode, using strict loader. :param file: The source file name. :param cfile: The target byte compiled file name. :param dfile: Purported file name, i.e. the file name that shows up in error messages. Defaults to the source file name. :param doraise: Flag indicating whether or not an exception should be raised when a compile error is found. If an exception occurs and this flag is set to False, a string indicating the nature of the exception will be printed, and the function will return to the caller. If an exception occurs and this flag is set to True, a PyCompileError exception will be raised. :param optimize: The optimization level for the compiler. Valid values are -1, 0, 1 and 2. A value of -1 means to use the optimization level of the current interpreter, as given by -O command line options. :return: Path to the resulting byte compiled file. Copied and modified from https://github.com/python/cpython/blob/3.6/Lib/py_compile.py#L65 This version does not support cfile=None, since compileall never passes that.
Here is the function:
def strict_compile(
file: str,
cfile: str,
dfile: str | None = None,
doraise: bool = False,
optimize: int = -1,
# Since typeshed doesn't yet know about PycInvalidationMode, no way to
# convince Pyre it's a valid type here. T54150924
invalidation_mode: object = None,
loader_override: object = None,
loader_options: Dict[str, str | int | bool] | None = None,
) -> str | None:
"""Byte-compile one Python source file to Python bytecode, using strict loader.
:param file: The source file name.
:param cfile: The target byte compiled file name.
:param dfile: Purported file name, i.e. the file name that shows up in
error messages. Defaults to the source file name.
:param doraise: Flag indicating whether or not an exception should be
raised when a compile error is found. If an exception occurs and this
flag is set to False, a string indicating the nature of the exception
will be printed, and the function will return to the caller. If an
exception occurs and this flag is set to True, a PyCompileError
exception will be raised.
:param optimize: The optimization level for the compiler. Valid values
are -1, 0, 1 and 2. A value of -1 means to use the optimization
level of the current interpreter, as given by -O command line options.
:return: Path to the resulting byte compiled file.
Copied and modified from https://github.com/python/cpython/blob/3.6/Lib/py_compile.py#L65
This version does not support cfile=None, since compileall never passes that.
"""
modname = file
for dir in sys.path:
if file.startswith(dir):
modname = file[len(dir) :]
break
modname = modname.replace("/", ".")
if modname.endswith("__init__.py"):
modname = modname[: -len("__init__.py")]
elif modname.endswith(".py"):
modname = modname[: -len(".py")]
modname = modname.strip(".")
if loader_options is None:
loader_options = {}
# TODO we ignore loader_override
loader = StrictSourceFileLoader(
modname,
file,
import_path=sys.path,
**loader_options,
)
cfile = add_strict_tag(cfile, enable_patching=loader.enable_patching)
source_bytes = loader.get_data(file)
try:
code = loader.source_to_code(source_bytes, dfile or file, _optimize=optimize)
except Exception as err:
raise
py_exc = PyCompileError(err.__class__, err, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + "\n")
return
makedirs(dirname(cfile), exist_ok=True)
if invalidation_mode is None:
invalidation_mode = _get_default_invalidation_mode()
if invalidation_mode == PycInvalidationMode.TIMESTAMP:
source_stats = loader.path_stats(file)
bytecode = code_to_strict_timestamp_pyc(
code, loader.strict_or_static, source_stats["mtime"], source_stats["size"]
)
else:
source_hash = importlib.util.source_hash(source_bytes)
bytecode = code_to_strict_hash_pyc(
code,
loader.strict_or_static,
# pyre-ignore[6]: bad typeshed stub for importlib.util.source_hash
# pyre-ignore[6]: For 3rd argument expected `bytes` but got `int`.
source_hash,
(invalidation_mode == PycInvalidationMode.CHECKED_HASH),
)
# pyre-ignore[16]: typeshed doesn't know about this
loader._cache_bytecode(file, cfile, bytecode)
return cfile | Byte-compile one Python source file to Python bytecode, using strict loader. :param file: The source file name. :param cfile: The target byte compiled file name. :param dfile: Purported file name, i.e. the file name that shows up in error messages. Defaults to the source file name. :param doraise: Flag indicating whether or not an exception should be raised when a compile error is found. If an exception occurs and this flag is set to False, a string indicating the nature of the exception will be printed, and the function will return to the caller. If an exception occurs and this flag is set to True, a PyCompileError exception will be raised. :param optimize: The optimization level for the compiler. Valid values are -1, 0, 1 and 2. A value of -1 means to use the optimization level of the current interpreter, as given by -O command line options. :return: Path to the resulting byte compiled file. Copied and modified from https://github.com/python/cpython/blob/3.6/Lib/py_compile.py#L65 This version does not support cfile=None, since compileall never passes that. |
185,701 | from __future__ import annotations
The provided code snippet includes necessary dependencies for implementing the `init_static_python` function. Write a Python function `def init_static_python() -> None` to solve the following problem:
Idempotent global initialization of Static Python. Should be called at least once if any Static modules/functions exist.
Here is the function:
def init_static_python() -> None:
"""Idempotent global initialization of Static Python.
Should be called at least once if any Static modules/functions exist.
"""
watch_sys_modules()
install_sp_audit_hook() | Idempotent global initialization of Static Python. Should be called at least once if any Static modules/functions exist. |
185,702 | from __future__ import annotations
import ast
from ast import AST, Call, Constant, Name
from typing import List, Optional, Tuple, TypeVar
def is_mutable(node: AST) -> bool:
return isinstance(node, Name) and node.id == "mutable" | null |
185,703 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
def lineinfo(node: TAst, target: Optional[AST] = None) -> TAst:
def make_arg(name: str) -> arg:
return lineinfo(ast.arg(name, None)) | null |
185,704 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
TAst = TypeVar("TAst", bound=AST)
def copyline(from_node: AST, to_node: TAst) -> TAst:
to_node.lineno = from_node.lineno
to_node.col_offset = from_node.col_offset
return to_node | null |
185,705 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
List = _alias(list, 1, inst=False, name='List')
def lineinfo(node: TAst, target: Optional[AST] = None) -> TAst:
if not target:
# set lineno to -1 to indicate non-user code
node.lineno = -1
node.col_offset = -1
node.end_lineno = -1
node.end_col_offset = -1
else:
copy_location(node, target)
return node
def make_function(name: str, pos_args: List[arg]) -> FunctionDef:
func = lineinfo(ast.FunctionDef())
func.name = name
args = ast.arguments()
args.kwonlyargs = []
args.kw_defaults = []
args.defaults = []
args.args = pos_args
args.posonlyargs = []
func.args = args
args.kwarg = None
args.vararg = None
func.decorator_list = []
func.returns = None
func.type_comment = ""
return func | null |
185,706 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
def make_assign(*a: object, **kw: object) -> Assign:
node = Assign(*a, **kw)
node.type_comment = None
return node
List = _alias(list, 1, inst=False, name='List')
def lineinfo(node: TAst, target: Optional[AST] = None) -> TAst:
if not target:
# set lineno to -1 to indicate non-user code
node.lineno = -1
node.col_offset = -1
node.end_lineno = -1
node.end_col_offset = -1
else:
copy_location(node, target)
return node
def make_assign_empty_list(name: str) -> Assign:
return lineinfo(
make_assign(
[lineinfo(ast.Name(name, ast.Store()))],
lineinfo(ast.List([], ast.Load())),
)
) | null |
185,707 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
def is_assigned(name: str) -> str:
return f"<assigned:{name}>" | null |
185,708 | from __future__ import annotations
import ast
from ast import (
alias,
AnnAssign,
arg,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Call,
ClassDef,
Constant,
copy_location,
DictComp,
expr,
FunctionDef,
GeneratorExp,
Global,
Import,
ImportFrom,
Lambda,
ListComp,
Module,
Name,
NodeVisitor,
SetComp,
stmt,
Str,
Try,
)
from symtable import SymbolTable
from types import ModuleType
from typing import (
cast,
Dict,
final,
Generic,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from ..common import (
AstRewriter,
get_symbol_map,
imported_name,
lineinfo,
mangle_priv_name,
ScopeStack,
SymbolMap,
SymbolScope,
)
class StrictModuleRewriter:
"""rewrites a module body so that all global variables are transformed into
local variables, and are closed over by the enclosing functions. This will
ultimately remove all LOAD_GLOBAL/STORE_GLOBAL opcodes, and therefore will
also have the side effect of making the module read-only as globals will
not be exposed."""
def __init__(
self,
root: Module,
table: SymbolTable,
filename: str,
modname: str,
mode: str,
optimize: int,
builtins: ModuleType | Mapping[str, object] = __builtins__,
is_static: bool = False,
) -> None:
if not isinstance(builtins, dict):
builtins = builtins.__dict__
self.root = root
self.table = table
self.filename = filename
self.modname = modname
self.mode = mode
self.optimize = optimize
self.builtins: Mapping[str, object] = builtins
self.symbol_map: SymbolMap = get_symbol_map(root, table)
scope: SymbolScope[None, None] = SymbolScope(table, None)
self.visitor = ImmutableVisitor(ScopeStack(scope, symbol_map=self.symbol_map))
# Top-level statements in the returned code object...
self.code_stmts: List[stmt] = []
self.is_static = is_static
def transform(self) -> ast.Module:
original_first_node = self.root.body[0] if self.root.body else None
self.visitor.visit(self.root)
self.visitor.global_sets.update(_IMPLICIT_GLOBALS)
for argname in _IMPLICIT_GLOBALS:
self.visitor.globals.add(argname)
mod = ast.Module(
[
*self.get_future_imports(),
*self.transform_body(),
]
)
if mod.body and original_first_node:
# this isn't obvious but the new mod body is empty
# if the original module body is empty. Therefore there
# is always a location to copy
copy_location(mod.body[0], original_first_node)
mod.type_ignores = []
return mod
def get_future_imports(self) -> Iterable[stmt]:
if self.visitor.future_imports:
yield lineinfo(
ImportFrom("__future__", list(self.visitor.future_imports), 0)
)
def del_global(self, name: str) -> stmt:
return lineinfo(ast.Delete([lineinfo(ast.Name(name, ast.Del()))]))
def store_global(self, name: str, value: expr) -> stmt:
return lineinfo(make_assign([lineinfo(ast.Name(name, ast.Store()))], value))
def load_global(self, name: str) -> expr:
return lineinfo(ast.Name(name, ast.Load()))
def create_annotations(self) -> stmt:
return self.store_global("__annotations__", lineinfo(ast.Dict([], [])))
def make_transformer(
self,
scopes: ScopeStack[None, ScopeData],
) -> ImmutableTransformer:
return ImmutableTransformer(
scopes,
self.modname,
self.builtins,
self.visitor.globals,
self.visitor.global_sets,
self.visitor.global_dels,
self.visitor.future_imports,
self.is_static,
)
def transform_body(self) -> Iterable[stmt]:
scopes = ScopeStack(
SymbolScope(self.table, ScopeData()), symbol_map=self.symbol_map
)
transformer = self.make_transformer(scopes)
body = transformer.visit(self.root).body
return body
class SymbolTable:
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "top":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
"""Return the type of the symbol table.
The values retuned are 'class', 'module' and
'function'.
"""
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
"""Return an identifier for the table.
"""
return self._table.id
def get_name(self):
"""Return the table's name.
This corresponds to the name of the class, function
or 'top' if the table is for a class, function or
global respectively.
"""
return self._table.name
def get_lineno(self):
"""Return the number of the first line in the
block for the table.
"""
return self._table.lineno
def is_optimized(self):
"""Return *True* if the locals in the table
are optimizable.
"""
return bool(self._table.type == _symtable.TYPE_FUNCTION)
def is_nested(self):
"""Return *True* if the block is a nested class
or function."""
return bool(self._table.nested)
def has_children(self):
"""Return *True* if the block has nested namespaces.
"""
return bool(self._table.children)
def get_identifiers(self):
"""Return a list of names of symbols in the table.
"""
return self._table.symbols.keys()
def lookup(self, name):
"""Lookup a *name* in the table.
Returns a *Symbol* instance.
"""
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
module_scope = (self._table.name == "top")
sym = self._symbols[name] = Symbol(name, flags, namespaces,
module_scope=module_scope)
return sym
def get_symbols(self):
"""Return a list of *Symbol* instances for
names in the table.
"""
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
"""Return a list of the nested symbol tables.
"""
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class ModuleType(Class):
def __init__(self, type_env: TypeEnvironment) -> None:
super().__init__(TypeName("types", "ModuleType"), type_env, is_exact=True)
Mapping = _alias(collections.abc.Mapping, 2)
def rewrite(
root: Module,
table: SymbolTable,
filename: str,
modname: str,
mode: str = "exec",
optimize: int = -1,
builtins: ModuleType | Mapping[str, object] = __builtins__,
is_static: bool = False,
) -> Module:
return StrictModuleRewriter(
root,
table,
filename,
modname,
mode,
optimize,
builtins,
is_static=is_static,
).transform() | null |
185,709 | from __future__ import annotations
import ast
from typing import final, TypeVar
class AnnotationRemover(ast.NodeTransformer):
def visit_single_arg(self, arg: ast.arg) -> ast.arg:
arg.annotation = None
return arg
def visit_fn_arguments(self, node: ast.arguments) -> ast.arguments:
if node.posonlyargs:
node.posonlyargs = [self.visit_single_arg(a) for a in node.posonlyargs]
if node.args:
node.args = [self.visit_single_arg(a) for a in node.args]
if node.kwonlyargs:
node.kwonlyargs = [self.visit_single_arg(a) for a in node.kwonlyargs]
vararg = node.vararg
if vararg:
node.vararg = self.visit_single_arg(vararg)
kwarg = node.kwarg
if kwarg:
node.kwarg = self.visit_single_arg(kwarg)
return node
def visit_function(self, node: FunctionDefNode) -> FunctionDefNode:
node.arguments = self.visit_fn_arguments(node.args)
node.returns = None
node.decorator_list = [
self.visit(decorator) for decorator in node.decorator_list
]
return node
def visit_FunctionDef(self, node: FunctionDefNode) -> FunctionDefNode:
return self.visit_function(node)
def visit_AsyncFunctionDef(self, node: FunctionDefNode) -> FunctionDefNode:
return self.visit_function(node)
def visit_AnnAssign(self, node: ast.AnnAssign) -> ast.Assign:
# Here, we replace `x: A = a` with just `x = a`. In case there's no value,
# we set the value to an `...`. E.g: `x: A` changes to `x = ...`.
#
# We need to be a little careful about ensuring that newly created nodes
# get the line and column information copied to them. This helps us avoid
# an extra pass over the AST with ast.fix_missing_locations()
value = node.value
if value is None:
value = ast.Ellipsis()
value.kind = None
_copy_attrs(node, value)
assign = ast.Assign(targets=[node.target], value=value, type_comment=None)
_copy_attrs(node, assign)
return assign
def remove_annotations(node: ast.AST) -> ast.Module:
return ast.fix_missing_locations(AnnotationRemover().visit(node)) | null |
185,710 | from __future__ import annotations
import ast
from typing import final, TypeVar
The provided code snippet includes necessary dependencies for implementing the `_copy_attrs` function. Write a Python function `def _copy_attrs(src: ast.AST, dest: ast.AST) -> None` to solve the following problem:
Copies line and column info from one node to another.
Here is the function:
def _copy_attrs(src: ast.AST, dest: ast.AST) -> None:
"""
Copies line and column info from one node to another.
"""
dest.lineno = src.lineno
dest.end_lineno = src.end_lineno
dest.col_offset = src.col_offset
dest.end_col_offset = src.end_col_offset | Copies line and column info from one node to another. |
185,711 | from __future__ import annotations
from typing import Type
TYPE_FREEZE_ENABLED = True
The provided code snippet includes necessary dependencies for implementing the `set_freeze_enabled` function. Write a Python function `def set_freeze_enabled(flag: bool) -> bool` to solve the following problem:
returns old value
Here is the function:
def set_freeze_enabled(flag: bool) -> bool:
"returns old value"
global TYPE_FREEZE_ENABLED
old = TYPE_FREEZE_ENABLED
TYPE_FREEZE_ENABLED = flag
return old | returns old value |
185,712 | from __future__ import annotations
from typing import Type
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
The provided code snippet includes necessary dependencies for implementing the `loose_slots` function. Write a Python function `def loose_slots(obj: Type[object]) -> Type[object]` to solve the following problem:
Indicates that a type defined in a strict module should support assigning additional variables to __dict__ to support migration.
Here is the function:
def loose_slots(obj: Type[object]) -> Type[object]:
"""Indicates that a type defined in a strict module should support assigning
additional variables to __dict__ to support migration."""
if warn_on_inst_dict is not None:
warn_on_inst_dict(obj)
return obj | Indicates that a type defined in a strict module should support assigning additional variables to __dict__ to support migration. |
185,713 | from __future__ import annotations
from typing import Type
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
The provided code snippet includes necessary dependencies for implementing the `strict_slots` function. Write a Python function `def strict_slots(obj: Type[object]) -> Type[object]` to solve the following problem:
Marks a type defined in a strict module to get slots automatically and no __dict__ is created
Here is the function:
def strict_slots(obj: Type[object]) -> Type[object]:
"""Marks a type defined in a strict module to get slots automatically
and no __dict__ is created"""
return obj | Marks a type defined in a strict module to get slots automatically and no __dict__ is created |
185,714 | from __future__ import annotations
from typing import Type
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
The provided code snippet includes necessary dependencies for implementing the `extra_slot` function. Write a Python function `def extra_slot(obj: Type[object], _name: str) -> Type[object]` to solve the following problem:
mark `name` to be part of __slots__ in obj
Here is the function:
def extra_slot(obj: Type[object], _name: str) -> Type[object]:
"""mark `name` to be part of __slots__ in obj"""
return obj | mark `name` to be part of __slots__ in obj |
185,715 | from __future__ import annotations
from typing import Type
def _mark_cached_property(obj: object, is_async: bool, original_dec: object) -> object:
return obj | null |
185,716 | from __future__ import annotations
import ast
import os.path
import symtable
import typing
from ast import (
alias,
AST,
AsyncFunctionDef,
ClassDef,
comprehension,
copy_location,
DictComp,
FunctionDef,
GeneratorExp,
iter_fields,
Lambda,
ListComp,
NodeVisitor,
SetComp,
Try,
)
from collections import deque
from symtable import Class, SymbolTable
from typing import (
Callable,
Dict,
final,
Generic,
List,
Mapping,
MutableMapping,
Optional,
Type,
TypeVar,
)
from .runtime import (
_mark_cached_property,
freeze_type,
loose_slots,
mutable,
strict_slots,
)
Mapping = _alias(collections.abc.Mapping, 2)
def freeze_type(obj: Type[object]) -> Type[object]:
if cinder_freeze is not None:
if TYPE_FREEZE_ENABLED:
cinder_freeze(obj)
elif warn_on_inst_dict is not None:
warn_on_inst_dict(obj)
return obj
def mutable(obj: Type[object]) -> Type[object]:
"""Marks a type defined in a strict module as supporting mutability"""
return obj
def make_fixed_modules() -> Mapping[str, Mapping[str, object]]:
typing_members = {}
for name in typing.__all__:
typing_members[name] = getattr(typing, name)
strict_mod_members = {
"freeze_type": freeze_type,
"mutable": mutable,
}
return {
"typing": typing_members,
"strict_modules": dict(strict_mod_members),
"__strict__": strict_mod_members,
} | null |
185,717 | from __future__ import annotations
import ast
import os.path
import symtable
import typing
from ast import (
alias,
AST,
AsyncFunctionDef,
ClassDef,
comprehension,
copy_location,
DictComp,
FunctionDef,
GeneratorExp,
iter_fields,
Lambda,
ListComp,
NodeVisitor,
SetComp,
Try,
)
from collections import deque
from symtable import Class, SymbolTable
from typing import (
Callable,
Dict,
final,
Generic,
List,
Mapping,
MutableMapping,
Optional,
Type,
TypeVar,
)
from .runtime import (
_mark_cached_property,
freeze_type,
loose_slots,
mutable,
strict_slots,
)
TVar = TypeVar("TScope")
TScopeData = TypeVar("TData", covariant=True)
class SymbolScope(Generic[TVar, TScopeData]):
def __init__(
self,
symbols: SymbolTable,
scope_data: TScopeData,
vars: Optional[MutableMapping[str, TVar]] = None,
invisible: bool = False,
) -> None:
self.symbols = symbols
self.vars = vars
self.scope_data = scope_data
self.invisible = invisible
def __getitem__(self, name: str) -> TVar:
v = self.vars
if v is None:
raise KeyError(name)
return v[name]
def __setitem__(self, name: str, value: TVar) -> None:
v = self.vars
if v is None:
v = self.vars = {}
v[name] = value
def __delitem__(self, name: str) -> None:
v = self.vars
if v is None:
raise KeyError(name)
del v[name]
def __contains__(self, name: str) -> bool:
v = self.vars
if v is None:
return False
return name in v
class Class(SymbolTable):
__methods = None
def get_methods(self):
"""Return a tuple of methods declared in the class.
"""
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
List = _alias(list, 1, inst=False, name='List')
def mangle_priv_name(name: str, scopes: List[SymbolScope[TVar, TScopeData]]) -> str:
if name.startswith("__") and not name.endswith("__"):
# symtable has name mangled private names. Walk the scope list
# backwards and apply the mangled class name
for scope in reversed(scopes):
if isinstance(scope.symbols, symtable.Class) and not scope.invisible:
return "_" + scope.symbols.get_name().lstrip("_") + name
return name | null |
185,718 | from __future__ import annotations
import ast
import os.path
import symtable
import typing
from ast import (
alias,
AST,
AsyncFunctionDef,
ClassDef,
comprehension,
copy_location,
DictComp,
FunctionDef,
GeneratorExp,
iter_fields,
Lambda,
ListComp,
NodeVisitor,
SetComp,
Try,
)
from collections import deque
from symtable import Class, SymbolTable
from typing import (
Callable,
Dict,
final,
Generic,
List,
Mapping,
MutableMapping,
Optional,
Type,
TypeVar,
)
from .runtime import (
_mark_cached_property,
freeze_type,
loose_slots,
mutable,
strict_slots,
)
def imported_name(name: alias) -> str:
return name.asname or name.name.partition(".")[0] | null |
185,719 | import _imp
import importlib
import sys
import zipimport
from importlib.machinery import (
BYTECODE_SUFFIXES,
ExtensionFileLoader,
FileFinder,
SOURCE_SUFFIXES,
SourceFileLoader,
SourcelessFileLoader,
)
from . import compile as python_compile
class PySourceFileLoader(SourceFileLoader):
def source_to_code(self, data, path, *, _optimize=-1):
def _install_source_loader_helper(source_loader_type):
def _install_py_loader():
_install_source_loader_helper(PySourceFileLoader) | null |
185,720 | import _imp
import importlib
import sys
import zipimport
from importlib.machinery import (
BYTECODE_SUFFIXES,
ExtensionFileLoader,
FileFinder,
SOURCE_SUFFIXES,
SourceFileLoader,
SourcelessFileLoader,
)
from . import compile as python_compile
def install() -> None:
"""Installs a loader which is capable of loading and validating strict modules"""
supported_loaders = _get_supported_file_loaders()
for index, hook in enumerate(sys.path_hooks):
if not isinstance(hook, type):
sys.path_hooks.insert(index, FileFinder.path_hook(*supported_loaders))
break
else:
sys.path_hooks.insert(0, FileFinder.path_hook(*supported_loaders))
# We need to clear the path_importer_cache so that our new FileFinder will
# start being used for existing directories we've loaded modules from.
sys.path_importer_cache.clear()
def _install_strict_loader():
from .strict.loader import install
install() | null |
185,721 | from __future__ import annotations
import ast
import sys
from typing import Callable, cast, Mapping, Tuple, Type
def make_qualname(parent_qualname: str | None, name: str) -> str:
if parent_qualname is None:
return name
return f"{parent_qualname}.{name}" | null |
185,722 | from __future__ import annotations
import ast
import sys
from typing import Callable, cast, Mapping, Tuple, Type
COMPARE_OPS: Mapping[Type[ast.cmpop], Callable[[object, object], bool]] = {
ast.Gt: lambda a, b: a > b,
ast.GtE: lambda a, b: a >= b,
ast.Lt: lambda a, b: a < b,
ast.LtE: lambda a, b: a <= b,
ast.Eq: lambda a, b: a == b,
ast.NotEq: lambda a, b: a != b,
ast.In: lambda a, b: a in b,
ast.Is: lambda a, b: a is b,
ast.IsNot: lambda a, b: a is not b,
}
def _is_sys_hexversion_attr_load(node: ast.expr) -> bool:
if isinstance(node, ast.Attribute):
container = node.value
if (
isinstance(container, ast.Name)
and container.id == "sys"
and isinstance(node.ctx, ast.Load)
and node.attr == "hexversion"
):
return True
return False
def _get_const_int(node: ast.expr) -> int | None:
if isinstance(node, ast.Constant):
value = node.value
return value if isinstance(value, int) else None
The provided code snippet includes necessary dependencies for implementing the `sys_hexversion_check` function. Write a Python function `def sys_hexversion_check( node: ast.If, ) -> bool | None` to solve the following problem:
A helper function, the result of this is used to determine whether we need to skip visiting dead code gated by sys.hexversion checks.
Here is the function:
def sys_hexversion_check(
node: ast.If,
) -> bool | None:
"""
A helper function, the result of this is used to determine whether
we need to skip visiting dead code gated by sys.hexversion checks.
"""
test_node = node.test
if isinstance(test_node, ast.Compare):
if len(test_node.comparators) != 1:
return None
assert len(test_node.ops) == 1
left = test_node.left
right = test_node.comparators[0]
op = test_node.ops[0]
if type(op) in (ast.In, ast.Is, ast.IsNot):
return None
if _is_sys_hexversion_attr_load(left):
left_value = sys.hexversion
right_value = _get_const_int(right)
elif _is_sys_hexversion_attr_load(right):
left_value = _get_const_int(left)
right_value = sys.hexversion
else:
return None
if left_value is None or right_value is None:
return None
return COMPARE_OPS[type(op)](left_value, right_value) | A helper function, the result of this is used to determine whether we need to skip visiting dead code gated by sys.hexversion checks. |
185,723 | from __future__ import annotations
import ast
from ast import (
AST,
Attribute,
BinOp,
Call,
ClassDef,
Constant,
Expression,
Name,
Subscript,
)
from contextlib import nullcontext
from enum import Enum
from typing import (
cast,
ContextManager,
Dict,
List,
Optional,
overload,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from ..errors import TypedSyntaxError
from ..symbols import ModuleScope, Scope
from .types import (
Callable,
Class,
ClassVar,
CType,
DataclassDecorator,
DynamicClass,
ExactClass,
FinalClass,
Function,
FunctionGroup,
InitVar,
KnownBoolean,
MethodType,
ModuleInstance,
NativeDecorator,
TType,
TypeDescr,
UnionType,
UnknownDecoratedMethod,
Value,
)
from .visitor import GenericVisitor
The provided code snippet includes necessary dependencies for implementing the `find_transitive_deps` function. Write a Python function `def find_transitive_deps( modname: str, all_deps: dict[str, dict[str, set[tuple[str, str]]]] ) -> set[str]` to solve the following problem:
Find all transitive dependency modules of `modname`. Given an `alldeps` dictionary of {modname: {name: {(module, name)}}}, return the transitive closure of module names depended on by `modname` (not including `modname` itself).
Here is the function:
def find_transitive_deps(
modname: str, all_deps: dict[str, dict[str, set[tuple[str, str]]]]
) -> set[str]:
"""Find all transitive dependency modules of `modname`.
Given an `alldeps` dictionary of {modname: {name: {(module, name)}}}, return
the transitive closure of module names depended on by `modname` (not
including `modname` itself).
"""
worklist = {dep for deps in all_deps.get(modname, {}).values() for dep in deps}
ret = set()
seen = set()
while worklist:
dep = worklist.pop()
seen.add(dep)
mod, name = dep
ret.add(mod)
worklist.update(all_deps.get(mod, {}).get(name, set()).difference(seen))
ret.discard(modname)
return ret | Find all transitive dependency modules of `modname`. Given an `alldeps` dictionary of {modname: {name: {(module, name)}}}, return the transitive closure of module names depended on by `modname` (not including `modname` itself). |
185,724 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class Value:
"""base class for all values tracked at compile time."""
def __init__(self, klass: Class) -> None:
"""klass: the Class of this object"""
self.klass = klass
def name(self) -> str:
return type(self).__name__
def name_with_exact(self) -> str:
return self.name
def exact(self) -> Value:
return self
def inexact(self) -> Value:
return self
def nonliteral(self) -> Value:
return self.klass.type_env.get_nonliteral_type(self)
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
return self
def make_generic_type(self, index: GenericTypeIndex) -> Optional[Class]:
pass
def get_iter_type(self, node: ast.expr, visitor: TypeBinder) -> Value:
"""returns the type that is produced when iterating over this value"""
visitor.syntax_error(f"cannot iterate over {self.name}", node)
return visitor.type_env.DYNAMIC
def as_oparg(self) -> int:
raise TypeError(f"{self.name} not valid here")
def can_override(self, override: Value, klass: Class, module: ModuleTable) -> bool:
return type(self) == type(override)
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
visitor.syntax_error(f"cannot load attribute from {self.name}", node)
def bind_attr(
self, node: ast.Attribute, visitor: TypeBinder, type_ctx: Optional[Class]
) -> None:
visitor.set_type(
node,
self.resolve_attr(node, visitor) or visitor.type_env.DYNAMIC,
)
def bind_await(
self, node: ast.Await, visitor: TypeBinder, type_ctx: Optional[Class]
) -> None:
visitor.set_type(node, visitor.type_env.DYNAMIC)
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
visitor.syntax_error(f"cannot call {self.name}", node)
return NO_EFFECT
def bind_descr_get(
self,
node: ast.Attribute,
inst: Optional[Object[TClassInv]],
ctx: TClassInv,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> None:
visitor.syntax_error(f"cannot get descriptor {self.name}", node)
def resolve_descr_get(
self,
node: ast.Attribute,
inst: Optional[Object[TClassInv]],
ctx: TClassInv,
visitor: GenericVisitor[object],
) -> Optional[Value]:
return self
def resolve_decorate_function(
self, fn: Function | DecoratedMethod, decorator: expr
) -> Optional[Function | DecoratedMethod]:
return None
def resolve_decorate_class(
self,
klass: Class,
decorator: expr,
visitor: DeclarationVisitor,
) -> Class:
return self.klass.type_env.dynamic
def bind_subscr(
self,
node: ast.Subscript,
type: Value,
visitor: TypeBinder,
type_ctx: Optional[Class] = None,
) -> None:
visitor.check_can_assign_from(visitor.type_env.dynamic, type.klass, node)
visitor.set_type(
node,
self.resolve_subscr(node, type, visitor) or visitor.type_env.DYNAMIC,
)
def resolve_subscr(
self,
node: ast.Subscript,
type: Value,
visitor: GenericVisitor[object],
) -> Optional[Value]:
visitor.syntax_error(f"cannot index {self.name}", node)
def emit_subscr(
self, node: ast.Subscript, code_gen: Static310CodeGenerator
) -> None:
code_gen.set_lineno(node)
code_gen.visit(node.value)
code_gen.visit(node.slice)
if isinstance(node.ctx, ast.Load):
return self.emit_load_subscr(node, code_gen)
elif isinstance(node.ctx, ast.Store):
return self.emit_store_subscr(node, code_gen)
else:
return self.emit_delete_subscr(node, code_gen)
def emit_load_subscr(
self, node: ast.Subscript, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("BINARY_SUBSCR")
def emit_store_subscr(
self, node: ast.Subscript, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("STORE_SUBSCR")
def emit_delete_subscr(
self, node: ast.Subscript, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("DELETE_SUBSCR")
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
code_gen.defaultVisit(node)
def emit_decorator_call(
self, class_def: ClassDef, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("CALL_FUNCTION", 1)
def emit_delete_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("DELETE_ATTR", code_gen.mangle(node.attr))
def emit_load_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
member = self.klass.members.get(node.attr, self.klass.type_env.DYNAMIC)
member.emit_load_attr_from(node, code_gen, self.klass)
def emit_load_attr_from(
self, node: Attribute, code_gen: Static310CodeGenerator, klass: Class
) -> None:
if klass is klass.type_env.dynamic:
code_gen.perf_warning(
"Define the object's class in a Static Python "
"module for more efficient attribute load",
node,
)
elif klass.type_env.dynamic in klass.bases:
code_gen.perf_warning(
f"Make the base class of {klass.instance_name} that defines "
f"attribute {node.attr} static for more efficient attribute load",
node,
)
code_gen.emit("LOAD_ATTR", code_gen.mangle(node.attr))
def emit_store_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
member = self.klass.members.get(node.attr, self.klass.type_env.DYNAMIC)
member.emit_store_attr_to(node, code_gen, self.klass)
def emit_store_attr_to(
self, node: Attribute, code_gen: Static310CodeGenerator, klass: Class
) -> None:
if klass is klass.type_env.dynamic:
code_gen.perf_warning(
f"Define the object's class in a Static Python "
"module for more efficient attribute store",
node,
)
elif klass.type_env.dynamic in klass.bases:
code_gen.perf_warning(
f"Make the base class of {klass.instance_name} that defines "
f"attribute {node.attr} static for more efficient attribute store",
node,
)
code_gen.emit("STORE_ATTR", code_gen.mangle(node.attr))
def emit_attr(self, node: ast.Attribute, code_gen: Static310CodeGenerator) -> None:
code_gen.visit(node.value)
if isinstance(node.ctx, ast.Store):
self.emit_store_attr(node, code_gen)
elif isinstance(node.ctx, ast.Del):
self.emit_delete_attr(node, code_gen)
else:
self.emit_load_attr(node, code_gen)
def bind_forloop_target(self, target: ast.expr, visitor: TypeBinder) -> None:
visitor.visit(target)
def bind_compare(
self,
node: ast.Compare,
left: expr,
op: cmpop,
right: expr,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> bool:
visitor.syntax_error(f"cannot compare with {self.name}", node)
return False
def bind_reverse_compare(
self,
node: ast.Compare,
left: expr,
op: cmpop,
right: expr,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> bool:
visitor.syntax_error(f"cannot reverse compare with {self.name}", node)
return False
def emit_compare(self, op: cmpop, code_gen: Static310CodeGenerator) -> None:
code_gen.defaultEmitCompare(op)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
visitor.syntax_error(f"cannot bin op with {self.name}", node)
return False
def bind_reverse_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
visitor.syntax_error(f"cannot reverse bin op with {self.name}", node)
return False
def bind_unaryop(
self, node: ast.UnaryOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> None:
visitor.syntax_error(f"cannot reverse unary op with {self.name}", node)
def emit_binop(self, node: ast.BinOp, code_gen: Static310CodeGenerator) -> None:
code_gen.defaultVisit(node)
def emit_continue(
self, node: ast.Continue, code_gen: Static310CodeGenerator
) -> None:
code_gen.defaultVisit(node)
def emit_forloop(self, node: ast.For, code_gen: Static310CodeGenerator) -> None:
start = code_gen.newBlock("default_forloop_start")
anchor = code_gen.newBlock("default_forloop_anchor")
after = code_gen.newBlock("default_forloop_after")
code_gen.set_lineno(node)
code_gen.push_loop(FOR_LOOP, start, after)
code_gen.visit(node.iter)
code_gen.emit("GET_ITER")
code_gen.nextBlock(start)
code_gen.emit("FOR_ITER", anchor)
code_gen.visit(node.target)
code_gen.visit(node.body)
code_gen.emit("JUMP_ABSOLUTE", start)
code_gen.nextBlock(anchor)
code_gen.pop_loop()
if node.orelse:
code_gen.visit(node.orelse)
code_gen.nextBlock(after)
def emit_unaryop(self, node: ast.UnaryOp, code_gen: Static310CodeGenerator) -> None:
code_gen.defaultVisit(node)
def emit_aug_rhs(
self, node: ast.AugAssign, code_gen: Static310CodeGenerator
) -> None:
code_gen.defaultCall(node, "emitAugRHS")
def bind_constant(self, node: ast.Constant, visitor: TypeBinder) -> None:
visitor.syntax_error(f"cannot constant with {self.name}", node)
def emit_constant(
self, node: ast.Constant, code_gen: Static310CodeGenerator
) -> None:
return code_gen.defaultVisit(node)
def emit_name(self, node: ast.Name, code_gen: Static310CodeGenerator) -> None:
if isinstance(node.ctx, ast.Load):
return self.emit_load_name(node, code_gen)
elif isinstance(node.ctx, ast.Store):
return self.emit_store_name(node, code_gen)
else:
return self.emit_delete_name(node, code_gen)
def emit_load_name(self, node: ast.Name, code_gen: Static310CodeGenerator) -> None:
code_gen.loadName(node.id)
def emit_store_name(self, node: ast.Name, code_gen: Static310CodeGenerator) -> None:
code_gen.storeName(node.id)
def emit_delete_name(
self, node: ast.Name, code_gen: Static310CodeGenerator
) -> None:
code_gen.delName(node.id)
def emit_jumpif(
self, test: AST, next: Block, is_if_true: bool, code_gen: Static310CodeGenerator
) -> None:
code_gen.visit(test)
self.emit_jumpif_only(next, is_if_true, code_gen)
def emit_jumpif_only(
self, next: Block, is_if_true: bool, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit("POP_JUMP_IF_TRUE" if is_if_true else "POP_JUMP_IF_FALSE", next)
def emit_jumpif_pop(
self, test: AST, next: Block, is_if_true: bool, code_gen: Static310CodeGenerator
) -> None:
code_gen.visit(test)
self.emit_jumpif_pop_only(next, is_if_true, code_gen)
def emit_jumpif_pop_only(
self, next: Block, is_if_true: bool, code_gen: Static310CodeGenerator
) -> None:
code_gen.emit(
"JUMP_IF_TRUE_OR_POP" if is_if_true else "JUMP_IF_FALSE_OR_POP", next
)
def emit_box(self, node: expr, code_gen: Static310CodeGenerator) -> None:
raise RuntimeError(f"Unsupported box type: {code_gen.get_type(node)}")
def emit_unbox(self, node: expr, code_gen: Static310CodeGenerator) -> None:
raise RuntimeError("Unsupported unbox type")
def get_fast_len_type(self) -> Optional[int]:
return None
def emit_len(
self, node: ast.Call, code_gen: Static310CodeGenerator, boxed: bool
) -> None:
if not boxed:
raise RuntimeError("Unsupported type for clen()")
return self.emit_call(node, code_gen)
def make_generic(
self, new_type: Class, name: GenericTypeName, type_env: TypeEnvironment
) -> Value:
return self
def make_literal(self, literal_value: object, type_env: TypeEnvironment) -> Value:
raise NotImplementedError(f"Type {self.name} does not support literals")
def emit_convert(self, from_type: Value, code_gen: Static310CodeGenerator) -> None:
pass
def is_truthy_literal(self) -> bool:
return False
def resolve_instance_attr(
node: ast.Attribute,
inst: Object[Class],
visitor: GenericVisitor[object],
) -> Optional[Value]:
for base in inst.klass.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, inst, inst.klass, visitor)
if res is not None:
return res
return inst.klass.type_env.DYNAMIC
class Object(Value, Generic[TClass]):
"""Represents an instance of a type at compile time"""
klass: TClass
def name(self) -> str:
return self.klass.readable_name
def name_with_exact(self) -> str:
return self.klass.instance_name_with_exact
def as_oparg(self) -> int:
return TYPED_OBJECT
def bind_dynamic_call(node: ast.Call, visitor: TypeBinder) -> NarrowingEffect:
visitor.set_type(node, visitor.type_env.DYNAMIC)
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
return NO_EFFECT
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
return self.bind_dynamic_call(node, visitor)
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
return resolve_instance_attr(node, self, visitor)
def emit_delete_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
if self.klass.find_slot(node) and node.attr != "__dict__":
code_gen.emit("DELETE_ATTR", node.attr)
return
super().emit_delete_attr(node, code_gen)
def emit_load_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
if (member := self.klass.find_slot(node)) and node.attr != "__dict__":
member.emit_load_from_slot(code_gen)
return
super().emit_load_attr(node, code_gen)
def emit_store_attr(
self, node: ast.Attribute, code_gen: Static310CodeGenerator
) -> None:
if (member := self.klass.find_slot(node)) and node.attr != "__dict__":
member.emit_store_to_slot(code_gen)
return
super().emit_store_attr(node, code_gen)
def bind_descr_get(
self,
node: ast.Attribute,
inst: Optional[Object[TClassInv]],
ctx: TClassInv,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> None:
visitor.set_type(
node,
self.resolve_descr_get(node, inst, ctx, visitor)
or visitor.type_env.DYNAMIC,
)
def resolve_subscr(
self,
node: ast.Subscript,
type: Value,
visitor: GenericVisitor[object],
) -> Optional[Value]:
return None
def bind_compare(
self,
node: ast.Compare,
left: expr,
op: cmpop,
right: expr,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> bool:
return False
def bind_reverse_compare(
self,
node: ast.Compare,
left: expr,
op: cmpop,
right: expr,
visitor: TypeBinder,
type_ctx: Optional[Class],
) -> bool:
visitor.set_type(op, visitor.type_env.DYNAMIC)
if isinstance(op, (ast.Is, ast.IsNot, ast.In, ast.NotIn)):
visitor.set_type(node, visitor.type_env.bool.instance)
return True
visitor.set_type(node, visitor.type_env.DYNAMIC)
return False
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
return False
def bind_reverse_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
# we'll set the type in case we're the only one called
visitor.set_type(node, visitor.type_env.DYNAMIC)
return False
def bind_unaryop(
self, node: ast.UnaryOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> None:
if isinstance(node.op, ast.Not):
visitor.set_type(node, visitor.type_env.bool.instance)
else:
visitor.set_type(node, visitor.type_env.DYNAMIC)
def bind_constant(self, node: ast.Constant, visitor: TypeBinder) -> None:
if type(node.value) is int:
node_type = visitor.type_env.get_literal_type(
visitor.type_env.int.instance, node.value
)
elif type(node.value) is bool:
node_type = visitor.type_env.get_literal_type(
visitor.type_env.bool.instance, node.value
)
else:
node_type = visitor.type_env.constant_types[type(node.value)]
visitor.set_type(node, node_type)
def get_iter_type(self, node: ast.expr, visitor: TypeBinder) -> Value:
"""returns the type that is produced when iterating over this value"""
return visitor.type_env.DYNAMIC
def __repr__(self) -> str:
return f"<{self.name}>"
class Class(Object["Class"]):
"""Represents a type object at compile time"""
def __init__(
self,
type_name: TypeName,
type_env: TypeEnvironment,
bases: Optional[List[Class]] = None,
instance: Optional[Value] = None,
klass: Optional[Class] = None,
members: Optional[Dict[str, Value]] = None,
is_exact: bool = False,
pytype: Optional[Type[object]] = None,
is_final: bool = False,
has_init_subclass: bool = False,
) -> None:
super().__init__(klass or type_env.type)
assert isinstance(bases, (type(None), list))
self.type_name = type_name
self.type_env = type_env
self.instance: Value = instance or Object(self)
self.bases: List[Class] = self._get_bases(bases)
self._mro: Optional[List[Class]] = None
# members are attributes or methods
self.members: Dict[str, Value] = members or {}
self.is_exact = is_exact
self.is_final = is_final
self.allow_weakrefs = False
self.donotcompile = False
# This will cause all built-in method calls on the type to be done dynamically
self.dynamic_builtinmethod_dispatch = False
self.pytype = pytype
if self.pytype is not None:
self.make_type_dict()
# True if this class overrides __init_subclass__
self.has_init_subclass = has_init_subclass
# track AST node of each member until finish_bind, for error reporting
self._member_nodes: Dict[str, AST] = {}
def _get_bases(self, bases: Optional[List[Class]]) -> List[Class]:
if bases is None:
return [self.klass.type_env.object]
ret = []
for b in bases:
ret.append(b)
# Can't check for dynamic because that'd be a cyclic dependency
if isinstance(b, DynamicClass):
# If any of the defined bases is dynamic,
# stop processing, because it doesn't matter
# what the rest of them are.
break
return ret
def make_type_dict(self) -> None:
pytype = self.pytype
if pytype is None:
return
result: Dict[str, Value] = {}
for k in pytype.__dict__.keys():
# Constructors might set custom members, make sure to respect those.
if k in self.members:
continue
try:
obj = pytype.__dict__[k]
except AttributeError:
continue
if isinstance(obj, (MethodDescriptorType, WrapperDescriptorType)):
result[k] = reflect_method_desc(obj, self, self.type_env)
elif isinstance(obj, BuiltinFunctionType):
result[k] = reflect_builtin_function(obj, self, self.type_env)
elif isinstance(obj, GetSetDescriptorType):
result[k] = GetSetDescriptor(self.type_env.getset_descriptor)
self.members.update(result)
def make_subclass(self, name: TypeName, bases: List[Class]) -> Class:
return Class(name, self.type_env, bases)
def name(self) -> str:
return f"Type[{self.instance_name}]"
def name_with_exact(self) -> str:
return f"Type[{self.instance_name_with_exact}]"
def instance_name(self) -> str:
# We need to break the loop for `builtins.type`, as `builtins.type`'s instance is a Class.
if type(self.instance) == Class:
return "type"
return self.instance.name
def instance_name_with_exact(self) -> str:
name = self.instance.name
if self.is_exact:
return f"Exact[{name}]"
return name
def qualname(self) -> str:
return self.type_name.qualname
def declare_class(self, node: ClassDef, klass: Class) -> None:
self._member_nodes[node.name] = node
self.members[node.name] = klass
def declare_variable(self, node: AnnAssign, module: ModuleTable) -> None:
# class C:
# x: foo
target = node.target
if isinstance(target, ast.Name):
self.define_slot(
target.id,
target,
TypeRef(module, self.qualname, node.annotation),
# Note down whether the slot has been assigned a value.
assignment=node if node.value else None,
declared_on_class=True,
)
def declare_variables(self, node: Assign, module: ModuleTable) -> None:
pass
def reflected_method_types(self, type_env: TypeEnvironment) -> Dict[str, Class]:
return {}
def patch_reflected_method_types(self, type_env: TypeEnvironment) -> None:
for name, return_type in self.reflected_method_types(type_env).items():
member = self.members[name]
assert isinstance(member, BuiltinMethodDescriptor)
member.return_type = ResolvedTypeRef(return_type)
def resolve_name(self, name: str) -> Optional[Value]:
return self.members.get(name)
def readable_name(self) -> str:
return self.type_name.readable_name
def is_generic_parameter(self) -> bool:
"""Returns True if this Class represents a generic parameter"""
return False
def contains_generic_parameters(self) -> bool:
"""Returns True if this class contains any generic parameters"""
return False
def is_generic_type(self) -> bool:
"""Returns True if this class is a generic type"""
return False
def is_generic_type_definition(self) -> bool:
"""Returns True if this class is a generic type definition.
It'll be a generic type which still has unbound generic type
parameters"""
return False
def generic_type_def(self) -> Optional[Class]:
"""Gets the generic type definition that defined this class"""
return None
def make_generic_type(
self,
index: Tuple[Class, ...],
) -> Optional[Class]:
"""Binds the generic type parameters to a generic type definition"""
return None
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
for base in self.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, None, self, visitor)
if res is not None:
return res
return super().resolve_attr(node, visitor)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
if isinstance(node.op, ast.BitOr):
rtype = visitor.get_type(node.right)
if rtype is visitor.type_env.none.instance:
rtype = visitor.type_env.none
if rtype is visitor.type_env.DYNAMIC:
rtype = visitor.type_env.dynamic
if not isinstance(rtype, Class):
visitor.syntax_error(
f"unsupported operand type(s) for |: {self.name} and {rtype.name}",
node,
)
return False
union = visitor.type_env.get_union((self, rtype))
visitor.set_type(node, union)
return True
return super().bind_binop(node, visitor, type_ctx)
def can_be_narrowed(self) -> bool:
return True
def type_descr(self) -> TypeDescr:
if self.is_exact:
return self.type_name.type_descr + ("!",)
return self.type_name.type_descr
def _resolve_dunder(self, name: str) -> Tuple[Class, Optional[Value]]:
klass = self.type_env.object
for klass in self.mro:
if klass is self.type_env.dynamic:
return self.type_env.dynamic, None
if val := klass.members.get(name):
return klass, val
assert klass.inexact_type() is self.type_env.object
return self.type_env.object, None
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
self_type = self.instance
new_mapping: Optional[ArgMapping] = None
init_mapping: Optional[ArgMapping] = None
dynamic_call = True
klass, new = self._resolve_dunder("__new__")
dynamic_new = klass is self.type_env.dynamic
object_new = klass.inexact_type() is self.type_env.object
if not object_new and isinstance(new, Callable):
new_mapping, self_type = new.map_call(
node,
visitor,
None,
[node.func] + node.args,
)
if new_mapping.can_call_statically():
dynamic_call = False
else:
dynamic_new = True
object_init = False
# if __new__ returns something that isn't a subclass of
# our type then __init__ isn't invoked
if not dynamic_new and self_type.klass.can_assign_from(self.instance.klass):
klass, init = self._resolve_dunder("__init__")
dynamic_call = dynamic_call or klass is self.type_env.dynamic
object_init = klass.inexact_type() is self.type_env.object
if not object_init and isinstance(init, Callable):
init_mapping = ArgMapping(init, node, visitor, None)
init_mapping.bind_args(visitor, True)
if init_mapping.can_call_statically():
dynamic_call = False
if object_new and object_init:
if node.args or node.keywords:
visitor.syntax_error(f"{self.instance_name}() takes no arguments", node)
else:
dynamic_call = False
if new_mapping is not None and init_mapping is not None:
# If we have both a __new__ and __init__ function we can't currently
# invoke it statically, as the arguments could have side effects.
# In the future we could potentially do better by shuffling into
# temporaries, but this is pretty rare.
dynamic_call = True
if not self.is_exact and not self.is_final:
dynamic_call = True
visitor.set_type(node, self_type)
visitor.set_node_data(
node, ClassCallInfo, ClassCallInfo(new_mapping, init_mapping, dynamic_call)
)
if dynamic_call:
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value,
visitor.type_env.DYNAMIC,
CALL_ARGUMENT_CANNOT_BE_PRIMITIVE,
)
return NO_EFFECT
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
call_info = code_gen.get_node_data(node, ClassCallInfo)
if call_info.dynamic_call:
return super().emit_call(node, code_gen)
new = call_info.new
if new:
new.emit(code_gen)
else:
code_gen.emit("TP_ALLOC", self.type_descr)
init = call_info.init
if init is not None:
code_gen.emit("DUP_TOP")
init.emit(code_gen)
code_gen.emit("POP_TOP") # pop None
def can_assign_from(self, src: Class) -> bool:
"""checks to see if the src value can be assigned to this value. Currently
you can assign a derived type to a base type. You cannot assign a primitive
type to an object type.
At some point we may also support some form of interfaces via protocols if we
implement a more efficient form of interface dispatch than doing the dictionary
lookup for the member."""
return src is self or (
(not self.is_exact or src.instance.nonliteral() is self.instance)
and not isinstance(src, CType)
and src.instance.nonliteral().klass.is_subclass_of(self)
)
def __repr__(self) -> str:
return f"<{self.name} class>"
def exact(self) -> Class:
return self
def inexact(self) -> Class:
return self
def exact_type(self) -> Class:
return self.type_env.get_exact_type(self)
def inexact_type(self) -> Class:
return self.type_env.get_inexact_type(self)
def _create_exact_type(self) -> Class:
instance = copy(self.instance)
klass = type(self)(
type_name=self.type_name,
type_env=self.type_env,
bases=self.bases,
klass=self.klass,
members=self.members,
instance=instance,
is_exact=True,
pytype=self.pytype,
is_final=self.is_final,
has_init_subclass=self.has_init_subclass,
)
# We need to point the instance's klass to the new class we just created.
instance.klass = klass
# `donotcompile` and `allow_weakrefs` are set via decorators after construction, and we
# need to persist these for consistency.
klass.donotcompile = self.donotcompile
klass.allow_weakrefs = self.allow_weakrefs
return klass
def isinstance(self, src: Value) -> bool:
return src.klass.is_subclass_of(self)
def is_subclass_of(self, src: Class) -> bool:
if isinstance(src, UnionType):
# This is an important subtlety - we want the subtyping relation to satisfy
# self < A | B if either self < A or self < B. Requiring both wouldn't be correct,
# as we want to allow assignments of A into A | B.
return any(self.is_subclass_of(t) for t in src.type_args)
return src.exact_type() in self.mro
def _check_compatible_property_override(
self, override: Value, inherited: Value
) -> bool:
# Properties can be overridden by cached properties, and vice-versa.
valid_sync_override = isinstance(
override, (CachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (CachedPropertyMethod, PropertyMethod))
valid_async_override = isinstance(
override, (AsyncCachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (AsyncCachedPropertyMethod, PropertyMethod))
return valid_sync_override or valid_async_override
def check_incompatible_override(
self, override: Value, inherited: Value, module: ModuleTable
) -> None:
# TODO: There's more checking we should be doing to ensure
# this is a compatible override
if isinstance(override, TransparentDecoratedMethod):
override = override.function
if not inherited.can_override(override, self, module):
raise TypedSyntaxError(f"class cannot hide inherited member: {inherited!r}")
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
todo = set(self.members.keys())
finished = set()
while todo:
name = todo.pop()
my_value = self.members[name]
new_value = self._finish_bind_one(name, my_value, module)
if new_value is None:
del self.members[name]
else:
self.members[name] = new_value
finished.add(name)
# account for the possibility that finish_bind of one member added new members
todo.update(self.members.keys())
todo.difference_update(finished)
# These were just for error reporting here, don't need them anymore
self._member_nodes = {}
return self
def _finish_bind_one(
self, name: str, my_value: Value, module: ModuleTable
) -> Value | None:
node = self.inexact_type()._member_nodes.get(name, None)
with module.error_context(node):
new_value = my_value.finish_bind(module, self)
if new_value is None:
return None
my_value = new_value
for base in self.mro[1:]:
value = base.members.get(name)
if value is not None:
self.check_incompatible_override(my_value, value, module)
if isinstance(value, Slot) and isinstance(my_value, Slot):
# use the base class slot
if value.is_final or not value.assigned_on_class:
return None
# For class values we are introducing a new slot which
# can be accessed from the derived type. We end up
# creating a slot with a default value so the value can
# be stored on the instance.
my_value.override = value
my_value.type_ref = value.type_ref
return my_value
def define_slot(
self,
name: str,
node: AST,
type_ref: Optional[TypeRef] = None,
assignment: Optional[AST] = None,
declared_on_class: bool = False,
) -> None:
existing = self.members.get(name)
if existing is None:
self._member_nodes[name] = node
self.members[name] = Slot(
type_ref,
name,
self,
assignment,
declared_on_class=declared_on_class,
)
elif isinstance(existing, Slot):
if not existing.type_ref:
existing.type_ref = type_ref
self._member_nodes[name] = node
elif type_ref:
raise TypedSyntaxError(
f"Cannot re-declare member '{name}' in '{self.instance.name}'"
)
existing.update(assignment, declared_on_class)
else:
raise TypedSyntaxError(
f"slot conflicts with other member {name} in {self.name}"
)
def declare_function(self, func: Function) -> None:
existing = self.members.get(func.func_name)
new_member = func
if existing is not None:
if isinstance(existing, Function):
new_member = FunctionGroup([existing, new_member], func.klass.type_env)
elif isinstance(existing, FunctionGroup):
existing.functions.append(new_member)
new_member = existing
else:
raise TypedSyntaxError(
f"function conflicts with other member {func.func_name} in {self.name}"
)
func.set_container_type(self)
self._member_nodes[func.func_name] = func.node
self.members[func.func_name] = new_member
if (
func.func_name == "__init__"
and isinstance(func, Function)
and func.node.args.args
):
node = func.node
if isinstance(node, FunctionDef):
InitVisitor(func.module, self, node).visit(node.body)
def mro(self) -> Sequence[Class]:
mro = self._mro
if mro is None:
if not all(self.bases):
# TODO: We can't compile w/ unknown bases
mro = []
else:
mro = _mro(self)
self._mro = mro
return mro
def bind_generics(
self,
name: GenericTypeName,
type_env: TypeEnvironment,
) -> Class:
return self
def find_slot(self, node: ast.Attribute) -> Optional[Slot[Class]]:
for base in self.mro:
member = base.members.get(node.attr)
if (
member is not None
and isinstance(member, Slot)
and not member.is_classvar
):
return member
return None
def get_own_member(self, name: str) -> Optional[Value]:
return self.members.get(name)
def get_parent_member(self, name: str) -> Optional[Value]:
# the first entry of mro is the class itself
for b in self.mro[1:]:
slot = b.members.get(name, None)
if slot:
return slot
def get_member(self, name: str) -> Optional[Value]:
member = self.get_own_member(name)
if member:
return member
return self.get_parent_member(name)
def get_own_final_method_names(self) -> Sequence[str]:
final_methods = []
for name, value in self.members.items():
if isinstance(value, DecoratedMethod) and value.is_final:
final_methods.append(name)
elif isinstance(value, Function) and value.is_final:
final_methods.append(name)
return final_methods
def unwrap(self) -> Class:
return self
def emit_type_check(self, src: Class, code_gen: Static310CodeGenerator) -> None:
if src is self.type_env.dynamic:
code_gen.emit("CAST", self.type_descr)
else:
assert self.can_assign_from(src)
def emit_extra_members(
self, node: ClassDef, code_gen: Static310CodeGenerator
) -> None:
pass
class GenericVisitor(ASTVisitor, Generic[TVisitRet]):
def __init__(self, module: ModuleTable) -> None:
super().__init__()
self.module = module
self.module_name: str = module.name
self.filename: str = module.filename
self.compiler: Compiler = module.compiler
self.error_sink: ErrorSink = module.compiler.error_sink
self.type_env: TypeEnvironment = module.compiler.type_env
# the qualname that should be the "requester" of types used (for dep tracking)
self._context_qualname: str = ""
# if true, all deps tracked in visiting should be considered decl deps
self.force_decl_deps: bool = False
def context_qualname(self) -> str:
return self._context_qualname
def temporary_context_qualname(
self, qualname: str | None, force_decl: bool = False
) -> Generator[None, None, None]:
old_qualname = self._context_qualname
self._context_qualname = qualname or ""
old_decl = self.force_decl_deps
self.force_decl_deps = force_decl
try:
yield
finally:
self._context_qualname = old_qualname
self.force_decl_deps = old_decl
def record_dependency(self, source: tuple[str, str]) -> None:
self.module.record_dependency(
self.context_qualname, source, force_decl=self.force_decl_deps
)
def visit(self, node: Union[AST, Sequence[AST]], *args: object) -> TVisitRet:
# if we have a sequence of nodes, don't catch TypedSyntaxError here;
# walk_list will call us back with each individual node in turn and we
# can catch errors and add node info then.
ctx = self.error_context(node) if isinstance(node, AST) else nullcontext()
with ctx:
return super().visit(node, *args)
def syntax_error(self, msg: str, node: AST) -> None:
return self.error_sink.syntax_error(msg, self.filename, node)
def perf_warning(self, msg: str, node: AST) -> None:
return self.error_sink.perf_warning(msg, self.filename, node)
def error_context(self, node: Optional[AST]) -> ContextManager[None]:
if node is None:
return nullcontext()
return self.error_sink.error_context(self.filename, node)
def temporary_error_sink(self, sink: ErrorSink) -> Generator[None, None, None]:
orig_sink = self.error_sink
self.error_sink = sink
try:
yield
finally:
self.error_sink = orig_sink
def resolve_instance_attr_by_name(
base: ast.expr,
attr: str,
inst: Object[Class],
visitor: GenericVisitor[object],
) -> Optional[Value]:
node = ast.Attribute(base, attr, ast.Load())
return resolve_instance_attr(node, inst, visitor) | null |
185,725 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class Class(Object["Class"]):
"""Represents a type object at compile time"""
def __init__(
self,
type_name: TypeName,
type_env: TypeEnvironment,
bases: Optional[List[Class]] = None,
instance: Optional[Value] = None,
klass: Optional[Class] = None,
members: Optional[Dict[str, Value]] = None,
is_exact: bool = False,
pytype: Optional[Type[object]] = None,
is_final: bool = False,
has_init_subclass: bool = False,
) -> None:
super().__init__(klass or type_env.type)
assert isinstance(bases, (type(None), list))
self.type_name = type_name
self.type_env = type_env
self.instance: Value = instance or Object(self)
self.bases: List[Class] = self._get_bases(bases)
self._mro: Optional[List[Class]] = None
# members are attributes or methods
self.members: Dict[str, Value] = members or {}
self.is_exact = is_exact
self.is_final = is_final
self.allow_weakrefs = False
self.donotcompile = False
# This will cause all built-in method calls on the type to be done dynamically
self.dynamic_builtinmethod_dispatch = False
self.pytype = pytype
if self.pytype is not None:
self.make_type_dict()
# True if this class overrides __init_subclass__
self.has_init_subclass = has_init_subclass
# track AST node of each member until finish_bind, for error reporting
self._member_nodes: Dict[str, AST] = {}
def _get_bases(self, bases: Optional[List[Class]]) -> List[Class]:
if bases is None:
return [self.klass.type_env.object]
ret = []
for b in bases:
ret.append(b)
# Can't check for dynamic because that'd be a cyclic dependency
if isinstance(b, DynamicClass):
# If any of the defined bases is dynamic,
# stop processing, because it doesn't matter
# what the rest of them are.
break
return ret
def make_type_dict(self) -> None:
pytype = self.pytype
if pytype is None:
return
result: Dict[str, Value] = {}
for k in pytype.__dict__.keys():
# Constructors might set custom members, make sure to respect those.
if k in self.members:
continue
try:
obj = pytype.__dict__[k]
except AttributeError:
continue
if isinstance(obj, (MethodDescriptorType, WrapperDescriptorType)):
result[k] = reflect_method_desc(obj, self, self.type_env)
elif isinstance(obj, BuiltinFunctionType):
result[k] = reflect_builtin_function(obj, self, self.type_env)
elif isinstance(obj, GetSetDescriptorType):
result[k] = GetSetDescriptor(self.type_env.getset_descriptor)
self.members.update(result)
def make_subclass(self, name: TypeName, bases: List[Class]) -> Class:
return Class(name, self.type_env, bases)
def name(self) -> str:
return f"Type[{self.instance_name}]"
def name_with_exact(self) -> str:
return f"Type[{self.instance_name_with_exact}]"
def instance_name(self) -> str:
# We need to break the loop for `builtins.type`, as `builtins.type`'s instance is a Class.
if type(self.instance) == Class:
return "type"
return self.instance.name
def instance_name_with_exact(self) -> str:
name = self.instance.name
if self.is_exact:
return f"Exact[{name}]"
return name
def qualname(self) -> str:
return self.type_name.qualname
def declare_class(self, node: ClassDef, klass: Class) -> None:
self._member_nodes[node.name] = node
self.members[node.name] = klass
def declare_variable(self, node: AnnAssign, module: ModuleTable) -> None:
# class C:
# x: foo
target = node.target
if isinstance(target, ast.Name):
self.define_slot(
target.id,
target,
TypeRef(module, self.qualname, node.annotation),
# Note down whether the slot has been assigned a value.
assignment=node if node.value else None,
declared_on_class=True,
)
def declare_variables(self, node: Assign, module: ModuleTable) -> None:
pass
def reflected_method_types(self, type_env: TypeEnvironment) -> Dict[str, Class]:
return {}
def patch_reflected_method_types(self, type_env: TypeEnvironment) -> None:
for name, return_type in self.reflected_method_types(type_env).items():
member = self.members[name]
assert isinstance(member, BuiltinMethodDescriptor)
member.return_type = ResolvedTypeRef(return_type)
def resolve_name(self, name: str) -> Optional[Value]:
return self.members.get(name)
def readable_name(self) -> str:
return self.type_name.readable_name
def is_generic_parameter(self) -> bool:
"""Returns True if this Class represents a generic parameter"""
return False
def contains_generic_parameters(self) -> bool:
"""Returns True if this class contains any generic parameters"""
return False
def is_generic_type(self) -> bool:
"""Returns True if this class is a generic type"""
return False
def is_generic_type_definition(self) -> bool:
"""Returns True if this class is a generic type definition.
It'll be a generic type which still has unbound generic type
parameters"""
return False
def generic_type_def(self) -> Optional[Class]:
"""Gets the generic type definition that defined this class"""
return None
def make_generic_type(
self,
index: Tuple[Class, ...],
) -> Optional[Class]:
"""Binds the generic type parameters to a generic type definition"""
return None
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
for base in self.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, None, self, visitor)
if res is not None:
return res
return super().resolve_attr(node, visitor)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
if isinstance(node.op, ast.BitOr):
rtype = visitor.get_type(node.right)
if rtype is visitor.type_env.none.instance:
rtype = visitor.type_env.none
if rtype is visitor.type_env.DYNAMIC:
rtype = visitor.type_env.dynamic
if not isinstance(rtype, Class):
visitor.syntax_error(
f"unsupported operand type(s) for |: {self.name} and {rtype.name}",
node,
)
return False
union = visitor.type_env.get_union((self, rtype))
visitor.set_type(node, union)
return True
return super().bind_binop(node, visitor, type_ctx)
def can_be_narrowed(self) -> bool:
return True
def type_descr(self) -> TypeDescr:
if self.is_exact:
return self.type_name.type_descr + ("!",)
return self.type_name.type_descr
def _resolve_dunder(self, name: str) -> Tuple[Class, Optional[Value]]:
klass = self.type_env.object
for klass in self.mro:
if klass is self.type_env.dynamic:
return self.type_env.dynamic, None
if val := klass.members.get(name):
return klass, val
assert klass.inexact_type() is self.type_env.object
return self.type_env.object, None
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
self_type = self.instance
new_mapping: Optional[ArgMapping] = None
init_mapping: Optional[ArgMapping] = None
dynamic_call = True
klass, new = self._resolve_dunder("__new__")
dynamic_new = klass is self.type_env.dynamic
object_new = klass.inexact_type() is self.type_env.object
if not object_new and isinstance(new, Callable):
new_mapping, self_type = new.map_call(
node,
visitor,
None,
[node.func] + node.args,
)
if new_mapping.can_call_statically():
dynamic_call = False
else:
dynamic_new = True
object_init = False
# if __new__ returns something that isn't a subclass of
# our type then __init__ isn't invoked
if not dynamic_new and self_type.klass.can_assign_from(self.instance.klass):
klass, init = self._resolve_dunder("__init__")
dynamic_call = dynamic_call or klass is self.type_env.dynamic
object_init = klass.inexact_type() is self.type_env.object
if not object_init and isinstance(init, Callable):
init_mapping = ArgMapping(init, node, visitor, None)
init_mapping.bind_args(visitor, True)
if init_mapping.can_call_statically():
dynamic_call = False
if object_new and object_init:
if node.args or node.keywords:
visitor.syntax_error(f"{self.instance_name}() takes no arguments", node)
else:
dynamic_call = False
if new_mapping is not None and init_mapping is not None:
# If we have both a __new__ and __init__ function we can't currently
# invoke it statically, as the arguments could have side effects.
# In the future we could potentially do better by shuffling into
# temporaries, but this is pretty rare.
dynamic_call = True
if not self.is_exact and not self.is_final:
dynamic_call = True
visitor.set_type(node, self_type)
visitor.set_node_data(
node, ClassCallInfo, ClassCallInfo(new_mapping, init_mapping, dynamic_call)
)
if dynamic_call:
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value,
visitor.type_env.DYNAMIC,
CALL_ARGUMENT_CANNOT_BE_PRIMITIVE,
)
return NO_EFFECT
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
call_info = code_gen.get_node_data(node, ClassCallInfo)
if call_info.dynamic_call:
return super().emit_call(node, code_gen)
new = call_info.new
if new:
new.emit(code_gen)
else:
code_gen.emit("TP_ALLOC", self.type_descr)
init = call_info.init
if init is not None:
code_gen.emit("DUP_TOP")
init.emit(code_gen)
code_gen.emit("POP_TOP") # pop None
def can_assign_from(self, src: Class) -> bool:
"""checks to see if the src value can be assigned to this value. Currently
you can assign a derived type to a base type. You cannot assign a primitive
type to an object type.
At some point we may also support some form of interfaces via protocols if we
implement a more efficient form of interface dispatch than doing the dictionary
lookup for the member."""
return src is self or (
(not self.is_exact or src.instance.nonliteral() is self.instance)
and not isinstance(src, CType)
and src.instance.nonliteral().klass.is_subclass_of(self)
)
def __repr__(self) -> str:
return f"<{self.name} class>"
def exact(self) -> Class:
return self
def inexact(self) -> Class:
return self
def exact_type(self) -> Class:
return self.type_env.get_exact_type(self)
def inexact_type(self) -> Class:
return self.type_env.get_inexact_type(self)
def _create_exact_type(self) -> Class:
instance = copy(self.instance)
klass = type(self)(
type_name=self.type_name,
type_env=self.type_env,
bases=self.bases,
klass=self.klass,
members=self.members,
instance=instance,
is_exact=True,
pytype=self.pytype,
is_final=self.is_final,
has_init_subclass=self.has_init_subclass,
)
# We need to point the instance's klass to the new class we just created.
instance.klass = klass
# `donotcompile` and `allow_weakrefs` are set via decorators after construction, and we
# need to persist these for consistency.
klass.donotcompile = self.donotcompile
klass.allow_weakrefs = self.allow_weakrefs
return klass
def isinstance(self, src: Value) -> bool:
return src.klass.is_subclass_of(self)
def is_subclass_of(self, src: Class) -> bool:
if isinstance(src, UnionType):
# This is an important subtlety - we want the subtyping relation to satisfy
# self < A | B if either self < A or self < B. Requiring both wouldn't be correct,
# as we want to allow assignments of A into A | B.
return any(self.is_subclass_of(t) for t in src.type_args)
return src.exact_type() in self.mro
def _check_compatible_property_override(
self, override: Value, inherited: Value
) -> bool:
# Properties can be overridden by cached properties, and vice-versa.
valid_sync_override = isinstance(
override, (CachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (CachedPropertyMethod, PropertyMethod))
valid_async_override = isinstance(
override, (AsyncCachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (AsyncCachedPropertyMethod, PropertyMethod))
return valid_sync_override or valid_async_override
def check_incompatible_override(
self, override: Value, inherited: Value, module: ModuleTable
) -> None:
# TODO: There's more checking we should be doing to ensure
# this is a compatible override
if isinstance(override, TransparentDecoratedMethod):
override = override.function
if not inherited.can_override(override, self, module):
raise TypedSyntaxError(f"class cannot hide inherited member: {inherited!r}")
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
todo = set(self.members.keys())
finished = set()
while todo:
name = todo.pop()
my_value = self.members[name]
new_value = self._finish_bind_one(name, my_value, module)
if new_value is None:
del self.members[name]
else:
self.members[name] = new_value
finished.add(name)
# account for the possibility that finish_bind of one member added new members
todo.update(self.members.keys())
todo.difference_update(finished)
# These were just for error reporting here, don't need them anymore
self._member_nodes = {}
return self
def _finish_bind_one(
self, name: str, my_value: Value, module: ModuleTable
) -> Value | None:
node = self.inexact_type()._member_nodes.get(name, None)
with module.error_context(node):
new_value = my_value.finish_bind(module, self)
if new_value is None:
return None
my_value = new_value
for base in self.mro[1:]:
value = base.members.get(name)
if value is not None:
self.check_incompatible_override(my_value, value, module)
if isinstance(value, Slot) and isinstance(my_value, Slot):
# use the base class slot
if value.is_final or not value.assigned_on_class:
return None
# For class values we are introducing a new slot which
# can be accessed from the derived type. We end up
# creating a slot with a default value so the value can
# be stored on the instance.
my_value.override = value
my_value.type_ref = value.type_ref
return my_value
def define_slot(
self,
name: str,
node: AST,
type_ref: Optional[TypeRef] = None,
assignment: Optional[AST] = None,
declared_on_class: bool = False,
) -> None:
existing = self.members.get(name)
if existing is None:
self._member_nodes[name] = node
self.members[name] = Slot(
type_ref,
name,
self,
assignment,
declared_on_class=declared_on_class,
)
elif isinstance(existing, Slot):
if not existing.type_ref:
existing.type_ref = type_ref
self._member_nodes[name] = node
elif type_ref:
raise TypedSyntaxError(
f"Cannot re-declare member '{name}' in '{self.instance.name}'"
)
existing.update(assignment, declared_on_class)
else:
raise TypedSyntaxError(
f"slot conflicts with other member {name} in {self.name}"
)
def declare_function(self, func: Function) -> None:
existing = self.members.get(func.func_name)
new_member = func
if existing is not None:
if isinstance(existing, Function):
new_member = FunctionGroup([existing, new_member], func.klass.type_env)
elif isinstance(existing, FunctionGroup):
existing.functions.append(new_member)
new_member = existing
else:
raise TypedSyntaxError(
f"function conflicts with other member {func.func_name} in {self.name}"
)
func.set_container_type(self)
self._member_nodes[func.func_name] = func.node
self.members[func.func_name] = new_member
if (
func.func_name == "__init__"
and isinstance(func, Function)
and func.node.args.args
):
node = func.node
if isinstance(node, FunctionDef):
InitVisitor(func.module, self, node).visit(node.body)
def mro(self) -> Sequence[Class]:
mro = self._mro
if mro is None:
if not all(self.bases):
# TODO: We can't compile w/ unknown bases
mro = []
else:
mro = _mro(self)
self._mro = mro
return mro
def bind_generics(
self,
name: GenericTypeName,
type_env: TypeEnvironment,
) -> Class:
return self
def find_slot(self, node: ast.Attribute) -> Optional[Slot[Class]]:
for base in self.mro:
member = base.members.get(node.attr)
if (
member is not None
and isinstance(member, Slot)
and not member.is_classvar
):
return member
return None
def get_own_member(self, name: str) -> Optional[Value]:
return self.members.get(name)
def get_parent_member(self, name: str) -> Optional[Value]:
# the first entry of mro is the class itself
for b in self.mro[1:]:
slot = b.members.get(name, None)
if slot:
return slot
def get_member(self, name: str) -> Optional[Value]:
member = self.get_own_member(name)
if member:
return member
return self.get_parent_member(name)
def get_own_final_method_names(self) -> Sequence[str]:
final_methods = []
for name, value in self.members.items():
if isinstance(value, DecoratedMethod) and value.is_final:
final_methods.append(name)
elif isinstance(value, Function) and value.is_final:
final_methods.append(name)
return final_methods
def unwrap(self) -> Class:
return self
def emit_type_check(self, src: Class, code_gen: Static310CodeGenerator) -> None:
if src is self.type_env.dynamic:
code_gen.emit("CAST", self.type_descr)
else:
assert self.can_assign_from(src)
def emit_extra_members(
self, node: ClassDef, code_gen: Static310CodeGenerator
) -> None:
pass
def _merge(seqs: Iterable[List[Class]]) -> List[Class]:
res = []
i = 0
while True:
nonemptyseqs = [seq for seq in seqs if seq]
if not nonemptyseqs:
return res
i += 1
cand = None
for seq in nonemptyseqs: # find merge candidates among seq heads
cand = seq[0]
nothead = [s for s in nonemptyseqs if cand in s[1:]]
if nothead:
cand = None # reject candidate
else:
break
if not cand:
types = {seq[0]: None for seq in nonemptyseqs}
raise SyntaxError(
"Cannot create a consistent method resolution order (MRO) for bases: "
+ ", ".join(t.name for t in types)
)
res.append(cand)
for seq in nonemptyseqs: # remove cand
if seq[0] == cand:
del seq[0]
The provided code snippet includes necessary dependencies for implementing the `_mro` function. Write a Python function `def _mro(C: Class) -> List[Class]` to solve the following problem:
Compute the class precedence list (mro) according to C3
Here is the function:
def _mro(C: Class) -> List[Class]:
"Compute the class precedence list (mro) according to C3"
bases = list(map(lambda base: base.exact_type(), C.bases))
return _merge([[C.exact_type()]] + list(map(_mro, bases)) + [bases]) | Compute the class precedence list (mro) according to C3 |
185,726 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
def is_subsequence(a: Iterable[object], b: Iterable[object]) -> bool:
# for loops go brrrr :)
# https://ericlippert.com/2020/03/27/new-grad-vs-senior-dev/
itr = iter(a)
for each in b:
if each not in itr:
return False
return True | null |
185,727 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class Class(Object["Class"]):
"""Represents a type object at compile time"""
def __init__(
self,
type_name: TypeName,
type_env: TypeEnvironment,
bases: Optional[List[Class]] = None,
instance: Optional[Value] = None,
klass: Optional[Class] = None,
members: Optional[Dict[str, Value]] = None,
is_exact: bool = False,
pytype: Optional[Type[object]] = None,
is_final: bool = False,
has_init_subclass: bool = False,
) -> None:
super().__init__(klass or type_env.type)
assert isinstance(bases, (type(None), list))
self.type_name = type_name
self.type_env = type_env
self.instance: Value = instance or Object(self)
self.bases: List[Class] = self._get_bases(bases)
self._mro: Optional[List[Class]] = None
# members are attributes or methods
self.members: Dict[str, Value] = members or {}
self.is_exact = is_exact
self.is_final = is_final
self.allow_weakrefs = False
self.donotcompile = False
# This will cause all built-in method calls on the type to be done dynamically
self.dynamic_builtinmethod_dispatch = False
self.pytype = pytype
if self.pytype is not None:
self.make_type_dict()
# True if this class overrides __init_subclass__
self.has_init_subclass = has_init_subclass
# track AST node of each member until finish_bind, for error reporting
self._member_nodes: Dict[str, AST] = {}
def _get_bases(self, bases: Optional[List[Class]]) -> List[Class]:
if bases is None:
return [self.klass.type_env.object]
ret = []
for b in bases:
ret.append(b)
# Can't check for dynamic because that'd be a cyclic dependency
if isinstance(b, DynamicClass):
# If any of the defined bases is dynamic,
# stop processing, because it doesn't matter
# what the rest of them are.
break
return ret
def make_type_dict(self) -> None:
pytype = self.pytype
if pytype is None:
return
result: Dict[str, Value] = {}
for k in pytype.__dict__.keys():
# Constructors might set custom members, make sure to respect those.
if k in self.members:
continue
try:
obj = pytype.__dict__[k]
except AttributeError:
continue
if isinstance(obj, (MethodDescriptorType, WrapperDescriptorType)):
result[k] = reflect_method_desc(obj, self, self.type_env)
elif isinstance(obj, BuiltinFunctionType):
result[k] = reflect_builtin_function(obj, self, self.type_env)
elif isinstance(obj, GetSetDescriptorType):
result[k] = GetSetDescriptor(self.type_env.getset_descriptor)
self.members.update(result)
def make_subclass(self, name: TypeName, bases: List[Class]) -> Class:
return Class(name, self.type_env, bases)
def name(self) -> str:
return f"Type[{self.instance_name}]"
def name_with_exact(self) -> str:
return f"Type[{self.instance_name_with_exact}]"
def instance_name(self) -> str:
# We need to break the loop for `builtins.type`, as `builtins.type`'s instance is a Class.
if type(self.instance) == Class:
return "type"
return self.instance.name
def instance_name_with_exact(self) -> str:
name = self.instance.name
if self.is_exact:
return f"Exact[{name}]"
return name
def qualname(self) -> str:
return self.type_name.qualname
def declare_class(self, node: ClassDef, klass: Class) -> None:
self._member_nodes[node.name] = node
self.members[node.name] = klass
def declare_variable(self, node: AnnAssign, module: ModuleTable) -> None:
# class C:
# x: foo
target = node.target
if isinstance(target, ast.Name):
self.define_slot(
target.id,
target,
TypeRef(module, self.qualname, node.annotation),
# Note down whether the slot has been assigned a value.
assignment=node if node.value else None,
declared_on_class=True,
)
def declare_variables(self, node: Assign, module: ModuleTable) -> None:
pass
def reflected_method_types(self, type_env: TypeEnvironment) -> Dict[str, Class]:
return {}
def patch_reflected_method_types(self, type_env: TypeEnvironment) -> None:
for name, return_type in self.reflected_method_types(type_env).items():
member = self.members[name]
assert isinstance(member, BuiltinMethodDescriptor)
member.return_type = ResolvedTypeRef(return_type)
def resolve_name(self, name: str) -> Optional[Value]:
return self.members.get(name)
def readable_name(self) -> str:
return self.type_name.readable_name
def is_generic_parameter(self) -> bool:
"""Returns True if this Class represents a generic parameter"""
return False
def contains_generic_parameters(self) -> bool:
"""Returns True if this class contains any generic parameters"""
return False
def is_generic_type(self) -> bool:
"""Returns True if this class is a generic type"""
return False
def is_generic_type_definition(self) -> bool:
"""Returns True if this class is a generic type definition.
It'll be a generic type which still has unbound generic type
parameters"""
return False
def generic_type_def(self) -> Optional[Class]:
"""Gets the generic type definition that defined this class"""
return None
def make_generic_type(
self,
index: Tuple[Class, ...],
) -> Optional[Class]:
"""Binds the generic type parameters to a generic type definition"""
return None
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
for base in self.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, None, self, visitor)
if res is not None:
return res
return super().resolve_attr(node, visitor)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
if isinstance(node.op, ast.BitOr):
rtype = visitor.get_type(node.right)
if rtype is visitor.type_env.none.instance:
rtype = visitor.type_env.none
if rtype is visitor.type_env.DYNAMIC:
rtype = visitor.type_env.dynamic
if not isinstance(rtype, Class):
visitor.syntax_error(
f"unsupported operand type(s) for |: {self.name} and {rtype.name}",
node,
)
return False
union = visitor.type_env.get_union((self, rtype))
visitor.set_type(node, union)
return True
return super().bind_binop(node, visitor, type_ctx)
def can_be_narrowed(self) -> bool:
return True
def type_descr(self) -> TypeDescr:
if self.is_exact:
return self.type_name.type_descr + ("!",)
return self.type_name.type_descr
def _resolve_dunder(self, name: str) -> Tuple[Class, Optional[Value]]:
klass = self.type_env.object
for klass in self.mro:
if klass is self.type_env.dynamic:
return self.type_env.dynamic, None
if val := klass.members.get(name):
return klass, val
assert klass.inexact_type() is self.type_env.object
return self.type_env.object, None
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
self_type = self.instance
new_mapping: Optional[ArgMapping] = None
init_mapping: Optional[ArgMapping] = None
dynamic_call = True
klass, new = self._resolve_dunder("__new__")
dynamic_new = klass is self.type_env.dynamic
object_new = klass.inexact_type() is self.type_env.object
if not object_new and isinstance(new, Callable):
new_mapping, self_type = new.map_call(
node,
visitor,
None,
[node.func] + node.args,
)
if new_mapping.can_call_statically():
dynamic_call = False
else:
dynamic_new = True
object_init = False
# if __new__ returns something that isn't a subclass of
# our type then __init__ isn't invoked
if not dynamic_new and self_type.klass.can_assign_from(self.instance.klass):
klass, init = self._resolve_dunder("__init__")
dynamic_call = dynamic_call or klass is self.type_env.dynamic
object_init = klass.inexact_type() is self.type_env.object
if not object_init and isinstance(init, Callable):
init_mapping = ArgMapping(init, node, visitor, None)
init_mapping.bind_args(visitor, True)
if init_mapping.can_call_statically():
dynamic_call = False
if object_new and object_init:
if node.args or node.keywords:
visitor.syntax_error(f"{self.instance_name}() takes no arguments", node)
else:
dynamic_call = False
if new_mapping is not None and init_mapping is not None:
# If we have both a __new__ and __init__ function we can't currently
# invoke it statically, as the arguments could have side effects.
# In the future we could potentially do better by shuffling into
# temporaries, but this is pretty rare.
dynamic_call = True
if not self.is_exact and not self.is_final:
dynamic_call = True
visitor.set_type(node, self_type)
visitor.set_node_data(
node, ClassCallInfo, ClassCallInfo(new_mapping, init_mapping, dynamic_call)
)
if dynamic_call:
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value,
visitor.type_env.DYNAMIC,
CALL_ARGUMENT_CANNOT_BE_PRIMITIVE,
)
return NO_EFFECT
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
call_info = code_gen.get_node_data(node, ClassCallInfo)
if call_info.dynamic_call:
return super().emit_call(node, code_gen)
new = call_info.new
if new:
new.emit(code_gen)
else:
code_gen.emit("TP_ALLOC", self.type_descr)
init = call_info.init
if init is not None:
code_gen.emit("DUP_TOP")
init.emit(code_gen)
code_gen.emit("POP_TOP") # pop None
def can_assign_from(self, src: Class) -> bool:
"""checks to see if the src value can be assigned to this value. Currently
you can assign a derived type to a base type. You cannot assign a primitive
type to an object type.
At some point we may also support some form of interfaces via protocols if we
implement a more efficient form of interface dispatch than doing the dictionary
lookup for the member."""
return src is self or (
(not self.is_exact or src.instance.nonliteral() is self.instance)
and not isinstance(src, CType)
and src.instance.nonliteral().klass.is_subclass_of(self)
)
def __repr__(self) -> str:
return f"<{self.name} class>"
def exact(self) -> Class:
return self
def inexact(self) -> Class:
return self
def exact_type(self) -> Class:
return self.type_env.get_exact_type(self)
def inexact_type(self) -> Class:
return self.type_env.get_inexact_type(self)
def _create_exact_type(self) -> Class:
instance = copy(self.instance)
klass = type(self)(
type_name=self.type_name,
type_env=self.type_env,
bases=self.bases,
klass=self.klass,
members=self.members,
instance=instance,
is_exact=True,
pytype=self.pytype,
is_final=self.is_final,
has_init_subclass=self.has_init_subclass,
)
# We need to point the instance's klass to the new class we just created.
instance.klass = klass
# `donotcompile` and `allow_weakrefs` are set via decorators after construction, and we
# need to persist these for consistency.
klass.donotcompile = self.donotcompile
klass.allow_weakrefs = self.allow_weakrefs
return klass
def isinstance(self, src: Value) -> bool:
return src.klass.is_subclass_of(self)
def is_subclass_of(self, src: Class) -> bool:
if isinstance(src, UnionType):
# This is an important subtlety - we want the subtyping relation to satisfy
# self < A | B if either self < A or self < B. Requiring both wouldn't be correct,
# as we want to allow assignments of A into A | B.
return any(self.is_subclass_of(t) for t in src.type_args)
return src.exact_type() in self.mro
def _check_compatible_property_override(
self, override: Value, inherited: Value
) -> bool:
# Properties can be overridden by cached properties, and vice-versa.
valid_sync_override = isinstance(
override, (CachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (CachedPropertyMethod, PropertyMethod))
valid_async_override = isinstance(
override, (AsyncCachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (AsyncCachedPropertyMethod, PropertyMethod))
return valid_sync_override or valid_async_override
def check_incompatible_override(
self, override: Value, inherited: Value, module: ModuleTable
) -> None:
# TODO: There's more checking we should be doing to ensure
# this is a compatible override
if isinstance(override, TransparentDecoratedMethod):
override = override.function
if not inherited.can_override(override, self, module):
raise TypedSyntaxError(f"class cannot hide inherited member: {inherited!r}")
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
todo = set(self.members.keys())
finished = set()
while todo:
name = todo.pop()
my_value = self.members[name]
new_value = self._finish_bind_one(name, my_value, module)
if new_value is None:
del self.members[name]
else:
self.members[name] = new_value
finished.add(name)
# account for the possibility that finish_bind of one member added new members
todo.update(self.members.keys())
todo.difference_update(finished)
# These were just for error reporting here, don't need them anymore
self._member_nodes = {}
return self
def _finish_bind_one(
self, name: str, my_value: Value, module: ModuleTable
) -> Value | None:
node = self.inexact_type()._member_nodes.get(name, None)
with module.error_context(node):
new_value = my_value.finish_bind(module, self)
if new_value is None:
return None
my_value = new_value
for base in self.mro[1:]:
value = base.members.get(name)
if value is not None:
self.check_incompatible_override(my_value, value, module)
if isinstance(value, Slot) and isinstance(my_value, Slot):
# use the base class slot
if value.is_final or not value.assigned_on_class:
return None
# For class values we are introducing a new slot which
# can be accessed from the derived type. We end up
# creating a slot with a default value so the value can
# be stored on the instance.
my_value.override = value
my_value.type_ref = value.type_ref
return my_value
def define_slot(
self,
name: str,
node: AST,
type_ref: Optional[TypeRef] = None,
assignment: Optional[AST] = None,
declared_on_class: bool = False,
) -> None:
existing = self.members.get(name)
if existing is None:
self._member_nodes[name] = node
self.members[name] = Slot(
type_ref,
name,
self,
assignment,
declared_on_class=declared_on_class,
)
elif isinstance(existing, Slot):
if not existing.type_ref:
existing.type_ref = type_ref
self._member_nodes[name] = node
elif type_ref:
raise TypedSyntaxError(
f"Cannot re-declare member '{name}' in '{self.instance.name}'"
)
existing.update(assignment, declared_on_class)
else:
raise TypedSyntaxError(
f"slot conflicts with other member {name} in {self.name}"
)
def declare_function(self, func: Function) -> None:
existing = self.members.get(func.func_name)
new_member = func
if existing is not None:
if isinstance(existing, Function):
new_member = FunctionGroup([existing, new_member], func.klass.type_env)
elif isinstance(existing, FunctionGroup):
existing.functions.append(new_member)
new_member = existing
else:
raise TypedSyntaxError(
f"function conflicts with other member {func.func_name} in {self.name}"
)
func.set_container_type(self)
self._member_nodes[func.func_name] = func.node
self.members[func.func_name] = new_member
if (
func.func_name == "__init__"
and isinstance(func, Function)
and func.node.args.args
):
node = func.node
if isinstance(node, FunctionDef):
InitVisitor(func.module, self, node).visit(node.body)
def mro(self) -> Sequence[Class]:
mro = self._mro
if mro is None:
if not all(self.bases):
# TODO: We can't compile w/ unknown bases
mro = []
else:
mro = _mro(self)
self._mro = mro
return mro
def bind_generics(
self,
name: GenericTypeName,
type_env: TypeEnvironment,
) -> Class:
return self
def find_slot(self, node: ast.Attribute) -> Optional[Slot[Class]]:
for base in self.mro:
member = base.members.get(node.attr)
if (
member is not None
and isinstance(member, Slot)
and not member.is_classvar
):
return member
return None
def get_own_member(self, name: str) -> Optional[Value]:
return self.members.get(name)
def get_parent_member(self, name: str) -> Optional[Value]:
# the first entry of mro is the class itself
for b in self.mro[1:]:
slot = b.members.get(name, None)
if slot:
return slot
def get_member(self, name: str) -> Optional[Value]:
member = self.get_own_member(name)
if member:
return member
return self.get_parent_member(name)
def get_own_final_method_names(self) -> Sequence[str]:
final_methods = []
for name, value in self.members.items():
if isinstance(value, DecoratedMethod) and value.is_final:
final_methods.append(name)
elif isinstance(value, Function) and value.is_final:
final_methods.append(name)
return final_methods
def unwrap(self) -> Class:
return self
def emit_type_check(self, src: Class, code_gen: Static310CodeGenerator) -> None:
if src is self.type_env.dynamic:
code_gen.emit("CAST", self.type_descr)
else:
assert self.can_assign_from(src)
def emit_extra_members(
self, node: ClassDef, code_gen: Static310CodeGenerator
) -> None:
pass
def resolve_assign_error_msg(
dest: Class, src: Class, reason: str = "type mismatch: {} cannot be assigned to {}"
) -> str:
if dest.inexact_type().can_assign_from(src):
reason = reason.format(
src.instance.name_with_exact, dest.instance.name_with_exact
)
else:
reason = reason.format(src.instance.name, dest.instance.name)
return reason | null |
185,728 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class Num(Constant, metaclass=_ABC):
_fields = ('n',)
__new__ = _new
class Str(Constant, metaclass=_ABC):
_fields = ('s',)
__new__ = _new
class Bytes(Constant, metaclass=_ABC):
_fields = ('s',)
__new__ = _new
class NameConstant(Constant, metaclass=_ABC):
__new__ = _new
class AstOptimizer(ASTRewriter):
def __init__(self, optimize: bool = False, string_anns: bool = False) -> None:
super().__init__()
self.optimize = optimize
self.string_anns = string_anns
def skip_field(self, node: ast.AST, field: str) -> bool:
if self.string_anns:
if (
isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef))
and field == "returns"
):
return True
if isinstance(node, ast.arg) and field == "annotation":
return True
if isinstance(node, ast.AnnAssign) and field == "annotation":
return True
return False
def visitUnaryOp(self, node: ast.UnaryOp) -> ast.expr:
op = self.visit(node.operand)
if isinstance(op, Constant):
conv = UNARY_OPS[type(node.op)]
try:
return copy_location(Constant(conv(op.value)), node)
except Exception:
pass
elif (
isinstance(node.op, ast.Not)
and isinstance(op, ast.Compare)
and len(op.ops) == 1
):
cmp_op = op.ops[0]
new_op = INVERSE_OPS.get(type(cmp_op))
if new_op is not None:
return self.update_node(op, ops=[new_op()])
return self.update_node(node, operand=op)
def visitBinOp(self, node: ast.BinOp) -> ast.expr:
left = self.visit(node.left)
right = self.visit(node.right)
if isinstance(left, Constant) and isinstance(right, Constant):
handler = BIN_OPS.get(type(node.op))
if handler is not None:
try:
return copy_location(
Constant(handler(left.value, right.value)), node
)
except Exception:
pass
return self.update_node(node, left=left, right=right)
def makeConstTuple(self, elts: Iterable[ast.expr]) -> Optional[Constant]:
if all(isinstance(elt, Constant) for elt in elts):
# pyre-ignore[16]: each elt is a constant at this point.
return Constant(tuple(elt.value for elt in elts))
return None
def visitTuple(self, node: ast.Tuple) -> ast.expr:
elts = self.walk_list(node.elts)
if isinstance(node.ctx, ast.Load):
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
def visitSubscript(self, node: ast.Subscript) -> ast.expr:
value = self.visit(node.value)
slice = self.visit(node.slice)
if (
isinstance(node.ctx, ast.Load)
and isinstance(value, Constant)
and isinstance(slice, Constant)
):
try:
return copy_location(
Constant(value.value[slice.value]),
node,
)
except Exception:
pass
return self.update_node(node, value=value, slice=slice)
def _visitIter(self, node: ast.expr) -> ast.expr:
if isinstance(node, ast.List):
elts = self.walk_list(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
if not any(isinstance(e, ast.Starred) for e in elts):
return copy_location(ast.Tuple(elts=elts, ctx=node.ctx), node)
return self.update_node(node, elts=elts)
elif isinstance(node, ast.Set):
elts = self.walk_list(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(Constant(frozenset(res.value)), node)
return self.update_node(node, elts=elts)
return self.generic_visit(node)
def visitcomprehension(self, node: ast.comprehension) -> ast.comprehension:
target = self.visit(node.target)
iter = self.visit(node.iter)
ifs = self.walk_list(node.ifs)
iter = self._visitIter(iter)
return self.update_node(node, target=target, iter=iter, ifs=ifs)
def visitFor(self, node: ast.For) -> ast.For:
target = self.visit(node.target)
iter = self.visit(node.iter)
body = self.walk_list(node.body)
orelse = self.walk_list(node.orelse)
iter = self._visitIter(iter)
return self.update_node(
node, target=target, iter=iter, body=body, orelse=orelse
)
def visitCompare(self, node: ast.Compare) -> ast.expr:
left = self.visit(node.left)
comparators = self.walk_list(node.comparators)
if isinstance(node.ops[-1], (ast.In, ast.NotIn)):
new_iter = self._visitIter(comparators[-1])
if new_iter is not None and new_iter is not comparators[-1]:
comparators = list(comparators)
comparators[-1] = new_iter
return self.update_node(node, left=left, comparators=comparators)
def visitName(self, node: ast.Name) -> ast.Name | ast.Constant:
if node.id == "__debug__":
return copy_location(Constant(not self.optimize), node)
return self.generic_visit(node)
def visitNamedExpr(self, node: ast.NamedExpr) -> ast.NamedExpr:
return self.generic_visit(node)
def get_default_value(default: expr) -> object:
if not isinstance(default, (Constant, Str, Num, Bytes, NameConstant, ast.Ellipsis)):
default = AstOptimizer().visit(default)
if isinstance(default, Str):
return default.s
elif isinstance(default, Num):
return default.n
elif isinstance(default, Bytes):
return default.s
elif isinstance(default, ast.Ellipsis):
return ...
elif isinstance(default, (ast.Constant, ast.NameConstant)):
return default.value
else:
return default | null |
185,729 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class TypeEnvironment:
def __init__(self) -> None:
self._generic_types: GenericTypesDict = {}
self._literal_types: Dict[Tuple[Value, object], Value] = {}
self._nonliteral_types: Dict[Value, Value] = {}
self._exact_types: Dict[Class, Class] = {}
self._inexact_types: Dict[Class, Class] = {}
# Bringing up the type system is a little special as we have dependencies
# amongst type and object
self.type: Class = Class.__new__(Class)
self.type.type_name = TypeName("builtins", "type")
self.type.type_env = self
self.type.klass = self.type
self.type.instance = self.type
self.type.members = {}
self.type.is_exact = False
self.type.is_final = False
self.type.allow_weakrefs = False
self.type.donotcompile = False
self.type._mro = None
self.type.pytype = type
self.type._member_nodes = {}
self.type.dynamic_builtinmethod_dispatch = False
self.type.has_init_subclass = False
self.object: Class = BuiltinObject(
TypeName("builtins", "object"),
self,
bases=[],
)
self.type.bases = [self.object]
self.dynamic = DynamicClass(self)
self.builtin_method_desc = Class(
TypeName("types", "MethodDescriptorType"),
self,
is_exact=True,
)
self.builtin_method = Class(
TypeName("types", "BuiltinMethodType"), self, is_exact=True
)
self.getset_descriptor: Class = Class(
TypeName("builtins", "getset_descriptor"), self, [self.object]
)
# We special case make_type_dict() on object for bootstrapping purposes.
self.object.pytype = object
self.object.members["__class__"] = ClassGetSetDescriptor(self.getset_descriptor)
self.object.make_type_dict()
self.type.make_type_dict()
self.type.members["__name__"] = TypeDunderNameGetSetDescriptor(
self.getset_descriptor, self
)
self.getset_descriptor.pytype = GetSetDescriptorType
self.getset_descriptor.make_type_dict()
self.str = StrClass(self)
self.int = NumClass(TypeName("builtins", "int"), self, pytype=int)
self.float = NumClass(TypeName("builtins", "float"), self, pytype=float)
self.complex = NumClass(TypeName("builtins", "complex"), self, pytype=complex)
self.bytes = Class(
TypeName("builtins", "bytes"), self, [self.object], pytype=bytes
)
self.bool: Class = BoolClass(self)
self.cbool: CIntType = CIntType(TYPED_BOOL, self, name_override="cbool")
self.range: Class = Class(
TypeName("builtins", "range"), self, [self.object], pytype=range
)
self.int8: CIntType = CIntType(TYPED_INT8, self)
self.int16: CIntType = CIntType(TYPED_INT16, self)
self.int32: CIntType = CIntType(TYPED_INT32, self)
self.int64: CIntType = CIntType(TYPED_INT64, self)
self.uint8: CIntType = CIntType(TYPED_UINT8, self)
self.uint16: CIntType = CIntType(TYPED_UINT16, self)
self.uint32: CIntType = CIntType(TYPED_UINT32, self)
self.uint64: CIntType = CIntType(TYPED_UINT64, self)
# TODO uses of these to check if something is a CInt wrongly exclude literals
self.signed_cint_types: Sequence[CIntType] = [
self.int8,
self.int16,
self.int32,
self.int64,
]
self.unsigned_cint_types: Sequence[CIntType] = [
self.uint8,
self.uint16,
self.uint32,
self.uint64,
]
self.all_cint_types: Sequence[CIntType] = (
self.signed_cint_types + self.unsigned_cint_types
)
self.none = NoneType(self)
self.optional = OptionalType(self)
self.name_to_type: Mapping[str, Class] = {
"NoneType": self.none,
"object": self.object,
"str": self.str,
"__static__.int8": self.int8,
"__static__.int16": self.int16,
"__static__.int32": self.int32,
"__static__.int64": self.int64,
"__static__.uint8": self.uint8,
"__static__.uint16": self.uint16,
"__static__.uint32": self.uint32,
"__static__.uint64": self.uint64,
}
if spamobj is not None:
self.spam_obj: Optional[GenericClass] = GenericClass(
GenericTypeName(
"xxclassloader", "spamobj", (GenericParameter("T", 0, self),)
),
self,
[self.object],
pytype=spamobj,
)
else:
self.spam_obj = None
checked_dict_type_name = GenericTypeName(
"__static__",
"chkdict",
(GenericParameter("K", 0, self), GenericParameter("V", 1, self)),
)
checked_list_type_name = GenericTypeName(
"__static__", "chklist", (GenericParameter("T", 0, self),)
)
self.checked_dict = CheckedDict(
checked_dict_type_name,
self,
[self.object],
pytype=chkdict,
is_exact=True,
)
self.checked_list = CheckedList(
checked_list_type_name,
self,
[self.object],
pytype=chklist,
is_exact=True,
)
self.ellipsis = Class(
TypeName("builtins", "ellipsis"),
self,
[self.object],
pytype=type(...),
is_exact=True,
)
self.dict = DictClass(self, is_exact=False)
self.list = ListClass(self)
self.set = SetClass(self, is_exact=False)
self.frozenset = FrozenSetClass(self, is_exact=False)
self.tuple = TupleClass(self)
self.function = Class(TypeName("types", "FunctionType"), self, is_exact=True)
self.method = Class(TypeName("types", "MethodType"), self, is_exact=True)
self.member = Class(
TypeName("types", "MemberDescriptorType"), self, is_exact=True
)
self.slice = Class(TypeName("builtins", "slice"), self, is_exact=True)
self.super = SuperClass(self, is_exact=True)
self.char = CIntType(TYPED_INT8, self, name_override="char")
self.module = ModuleType(self)
self.double = CDoubleType(self)
self.array = ArrayClass(
GenericTypeName("__static__", "Array", (GenericParameter("T", 0, self),)),
self,
is_exact=True,
)
# We have found no strong reason (yet) to support arrays of other types of
# primitives
self.allowed_array_types: List[Class] = [
self.int64,
]
self.static_method = StaticMethodDecorator(
TypeName("builtins", "staticmethod"),
self,
pytype=staticmethod,
)
self.class_method = ClassMethodDecorator(
TypeName("builtins", "classmethod"),
self,
pytype=classmethod,
)
self.final_method = TypingFinalDecorator(TypeName("typing", "final"), self)
self.awaitable = AwaitableType(self)
self.union = UnionType(self)
self.final = FinalClass(
GenericTypeName("typing", "Final", (GenericParameter("T", 0, self),)), self
)
self.classvar = ClassVar(
GenericTypeName("typing", "ClassVar", (GenericParameter("T", 0, self),)),
self,
)
self.initvar = InitVar(
GenericTypeName(
"dataclasses", "InitVar", (GenericParameter("T", 0, self),)
),
self,
)
self.exact = ExactClass(
GenericTypeName("typing", "Exact", (GenericParameter("T", 0, self),)), self
)
self.named_tuple = Class(TypeName("typing", "NamedTuple"), self)
self.protocol = Class(TypeName("typing", "Protocol"), self)
self.typed_dict = Class(TypeName("typing", "TypedDict"), self)
self.literal = LiteralType(TypeName("typing", "Literal"), self)
self.annotated = AnnotatedType(TypeName("typing", "Annotated"), self)
self.not_implemented = Class(
TypeName("builtins", "NotImplementedType"),
self,
bases=[self.object],
pytype=type(NotImplemented),
)
self.base_exception = Class(
TypeName("builtins", "BaseException"), self, pytype=BaseException
)
self.exception = Class(
TypeName("builtins", "Exception"),
self,
bases=[self.base_exception],
pytype=Exception,
)
self.value_error: Class = self._builtin_exception_class(ValueError)
self.os_error: Class = self._builtin_exception_class(OSError)
self.runtime_error: Class = self._builtin_exception_class(RuntimeError)
self.syntax_error: Class = self._builtin_exception_class(SyntaxError)
self.arithmetic_error: Class = self._builtin_exception_class(ArithmeticError)
self.assertion_error: Class = self._builtin_exception_class(AssertionError)
self.attribute_error: Class = self._builtin_exception_class(AttributeError)
self.blocking_io_error: Class = self._builtin_exception_class(
BlockingIOError, base=self.os_error
)
self.buffer_error: Class = self._builtin_exception_class(BufferError)
self.child_process_error: Class = self._builtin_exception_class(
ChildProcessError, base=self.os_error
)
self.connection_error: Class = self._builtin_exception_class(
ConnectionError, base=self.os_error
)
self.broken_pipe_error: Class = self._builtin_exception_class(
BrokenPipeError, self.connection_error
)
self.connection_aborted_error: Class = self._builtin_exception_class(
ConnectionAbortedError, base=self.connection_error
)
self.connection_refused_error: Class = self._builtin_exception_class(
ConnectionRefusedError, base=self.connection_error
)
self.connection_reset_error: Class = self._builtin_exception_class(
ConnectionResetError, base=self.connection_error
)
self.environment_error: Class = self._builtin_exception_class(EnvironmentError)
self.eof_error: Class = self._builtin_exception_class(EOFError)
self.file_exists_error: Class = self._builtin_exception_class(
FileExistsError, base=self.os_error
)
self.file_not_found_error: Class = self._builtin_exception_class(
FileNotFoundError, base=self.os_error
)
self.floating_point_error: Class = self._builtin_exception_class(
FloatingPointError, base=self.arithmetic_error
)
self.generator_exit: Class = self._builtin_exception_class(
GeneratorExit, base=self.base_exception
)
self.import_error: Class = self._builtin_exception_class(ImportError)
self.indentation_error: Class = self._builtin_exception_class(
IndentationError, base=self.syntax_error
)
self.index_error: Class = self._builtin_exception_class(IndexError)
self.interrupted_error: Class = self._builtin_exception_class(
InterruptedError, base=self.os_error
)
self.io_error: Class = self._builtin_exception_class(IOError)
self.is_a_directory_error: Class = self._builtin_exception_class(
IsADirectoryError, base=self.os_error
)
self.key_error: Class = self._builtin_exception_class(KeyError)
self.keyboard_interrupt: Class = self._builtin_exception_class(
KeyboardInterrupt, base=self.base_exception
)
self.lookup_error: Class = self._builtin_exception_class(LookupError)
self.memory_error: Class = self._builtin_exception_class(MemoryError)
self.module_not_found_error: Class = self._builtin_exception_class(
ModuleNotFoundError, base=self.import_error
)
self.name_error: Class = self._builtin_exception_class(NameError)
self.not_a_directory_error: Class = self._builtin_exception_class(
NotADirectoryError, base=self.os_error
)
self.not_implemented_error: Class = self._builtin_exception_class(
NotImplementedError, base=self.runtime_error
)
self.overflow_error: Class = self._builtin_exception_class(
OverflowError, base=self.arithmetic_error
)
self.permission_error: Class = self._builtin_exception_class(
PermissionError, base=self.os_error
)
self.process_lookup_error: Class = self._builtin_exception_class(
ProcessLookupError, base=self.os_error
)
self.recursion_error: Class = self._builtin_exception_class(
RecursionError, base=self.runtime_error
)
self.reference_error: Class = self._builtin_exception_class(ReferenceError)
self.stop_async_iteration: Class = self._builtin_exception_class(
StopAsyncIteration
)
self.stop_iteration: Class = self._builtin_exception_class(StopIteration)
self.system_error: Class = self._builtin_exception_class(SystemError)
self.system_exit: Class = self._builtin_exception_class(
SystemExit, base=self.base_exception
)
self.tab_error: Class = self._builtin_exception_class(
TabError, base=self.indentation_error
)
self.timeout_error: Class = self._builtin_exception_class(
TimeoutError, base=self.os_error
)
self.type_error: Class = self._builtin_exception_class(TypeError)
self.unicode_error: Class = self._builtin_exception_class(
UnicodeError, base=self.value_error
)
self.unbound_local_error: Class = self._builtin_exception_class(
UnboundLocalError, base=self.name_error
)
self.unicode_decode_error: Class = self._builtin_exception_class(
UnicodeDecodeError, base=self.unicode_error
)
self.unicode_encode_error: Class = self._builtin_exception_class(
UnicodeEncodeError, base=self.unicode_error
)
self.unicode_translate_error: Class = self._builtin_exception_class(
UnicodeTranslateError, base=self.unicode_error
)
self.zero_division_error: Class = self._builtin_exception_class(
ZeroDivisionError, base=self.arithmetic_error
)
self.warning: Class = self._builtin_exception_class(Warning)
self.bytes_warning: Class = self._builtin_exception_class(
BytesWarning, base=self.warning
)
self.deprecation_warning: Class = self._builtin_exception_class(
DeprecationWarning, base=self.warning
)
self.future_warning: Class = self._builtin_exception_class(
FutureWarning, base=self.warning
)
self.import_warning: Class = self._builtin_exception_class(
ImportWarning, base=self.warning
)
self.pending_deprecation_warning: Class = self._builtin_exception_class(
PendingDeprecationWarning, base=self.warning
)
self.resource_warning: Class = self._builtin_exception_class(
ResourceWarning, base=self.warning
)
self.runtime_warning: Class = self._builtin_exception_class(
RuntimeWarning, base=self.warning
)
self.syntax_warning: Class = self._builtin_exception_class(
SyntaxWarning, base=self.warning
)
self.unicode_warning: Class = self._builtin_exception_class(
UnicodeWarning, base=self.warning
)
self.user_warning: Class = self._builtin_exception_class(
UserWarning, base=self.warning
)
self.allow_weakrefs = AllowWeakrefsDecorator(
TypeName("__static__", "allow_weakrefs"), self
)
self.dynamic_return = DynamicReturnDecorator(
TypeName("__static__", "dynamic_return"), self
)
self.inline = InlineFunctionDecorator(TypeName("__static__", "inline"), self)
self.donotcompile = DoNotCompileDecorator(
TypeName("__static__", "_donotcompile"), self
)
self.property = PropertyDecorator(
TypeName("builtins", "property"),
self,
pytype=property,
)
self.overload = OverloadDecorator(
TypeName("typing", "overload"),
self,
)
self.cached_property = CachedPropertyDecorator(
TypeName("cinder", "cached_property"), self
)
self.async_cached_property = AsyncCachedPropertyDecorator(
TypeName("cinder", "async_cached_property"), self
)
self.dataclass = DataclassDecorator(self)
self.dataclass_field = DataclassFieldType(self)
self.dataclass_field_function = DataclassFieldFunction(self)
self.constant_types: Mapping[Type[object], Value] = {
str: self.str.exact_type().instance,
int: self.int.exact_type().instance,
float: self.float.exact_type().instance,
complex: self.complex.exact_type().instance,
bytes: self.bytes.instance,
bool: self.bool.instance,
type(None): self.none.instance,
tuple: self.tuple.exact_type().instance,
type(...): self.ellipsis.instance,
frozenset: self.set.instance,
}
self.enum: EnumType = EnumType(self)
self.int_enum: IntEnumType = IntEnumType(self)
self.string_enum: StringEnumType = StringEnumType(self)
self.exc_context_decorator = ContextDecoratorClass(
self, TypeName("__static__", "ExcContextDecorator")
)
self.context_decorator = ContextDecoratorClass(
self,
TypeName("__static__", "ContextDecorator"),
bases=[self.exc_context_decorator],
)
self.crange_iterator = CRangeIterator(self.type)
self.str.exact_type().patch_reflected_method_types(self)
self.native_decorator = NativeDecorator(self)
if spamobj is not None:
T = GenericParameter("T", 0, self)
U = GenericParameter("U", 1, self)
XXGENERIC_TYPE_NAME = GenericTypeName("xxclassloader", "XXGeneric", (T, U))
self.xx_generic: XXGeneric = XXGeneric(
XXGENERIC_TYPE_NAME, self, [self.object]
)
def _builtin_exception_class(
self, exception_type: Type[object], base: Optional[Class] = None
) -> Class:
if base is None:
base = self.exception
return Class(
TypeName("builtins", exception_type.__name__),
self,
bases=[base],
pytype=exception_type,
)
def get_generic_type(
self, generic_type: GenericClass, index: GenericTypeIndex
) -> Class:
instantiations = self._generic_types.setdefault(generic_type, {})
instance = instantiations.get(index)
if instance is not None:
return instance
concrete = generic_type.make_generic_type(index)
instantiations[index] = concrete
concrete.members.update(
{
# pyre-ignore[6]: We trust that the type name is generic here.
k: v.make_generic(concrete, concrete.type_name, self)
for k, v in generic_type.members.items()
}
)
return concrete
def get_literal_type(self, base_type: Value, literal_value: object) -> Value:
# Literals are always exact
base_type = base_type.exact()
key = (base_type, literal_value)
if key not in self._literal_types:
self._literal_types[key] = literal_type = base_type.make_literal(
literal_value, self
)
self._nonliteral_types[literal_type] = base_type
return self._literal_types[key]
def get_nonliteral_type(self, literal_type: Value) -> Value:
return self._nonliteral_types.get(literal_type, literal_type)
def get_exact_type(self, klass: Class) -> Class:
if klass.is_exact:
return klass
if klass in self._exact_types:
return self._exact_types[klass]
exact_klass = klass._create_exact_type()
self._exact_types[klass] = exact_klass
self._inexact_types[exact_klass] = klass
return exact_klass
def get_inexact_type(self, klass: Class) -> Class:
if not klass.is_exact:
return klass
# Some types are always exact by default and have no inexact version. In that case,
# the exact type is the correct value to return.
if klass not in self._inexact_types:
return klass
return self._inexact_types[klass]
def DYNAMIC(self) -> Value:
return self.dynamic.instance
def OBJECT(self) -> Value:
return self.object.instance
def get_union(self, index: GenericTypeIndex) -> Class:
return self.get_generic_type(self.union, index)
class ResolvedTypeRef(TypeRef):
def __init__(self, type: Class) -> None:
self._resolved = type
def resolved(self, is_declaration: bool = False) -> Class:
return self._resolved
def __repr__(self) -> str:
return f"ResolvedTypeRef({self.resolved()})"
class Class(Object["Class"]):
"""Represents a type object at compile time"""
def __init__(
self,
type_name: TypeName,
type_env: TypeEnvironment,
bases: Optional[List[Class]] = None,
instance: Optional[Value] = None,
klass: Optional[Class] = None,
members: Optional[Dict[str, Value]] = None,
is_exact: bool = False,
pytype: Optional[Type[object]] = None,
is_final: bool = False,
has_init_subclass: bool = False,
) -> None:
super().__init__(klass or type_env.type)
assert isinstance(bases, (type(None), list))
self.type_name = type_name
self.type_env = type_env
self.instance: Value = instance or Object(self)
self.bases: List[Class] = self._get_bases(bases)
self._mro: Optional[List[Class]] = None
# members are attributes or methods
self.members: Dict[str, Value] = members or {}
self.is_exact = is_exact
self.is_final = is_final
self.allow_weakrefs = False
self.donotcompile = False
# This will cause all built-in method calls on the type to be done dynamically
self.dynamic_builtinmethod_dispatch = False
self.pytype = pytype
if self.pytype is not None:
self.make_type_dict()
# True if this class overrides __init_subclass__
self.has_init_subclass = has_init_subclass
# track AST node of each member until finish_bind, for error reporting
self._member_nodes: Dict[str, AST] = {}
def _get_bases(self, bases: Optional[List[Class]]) -> List[Class]:
if bases is None:
return [self.klass.type_env.object]
ret = []
for b in bases:
ret.append(b)
# Can't check for dynamic because that'd be a cyclic dependency
if isinstance(b, DynamicClass):
# If any of the defined bases is dynamic,
# stop processing, because it doesn't matter
# what the rest of them are.
break
return ret
def make_type_dict(self) -> None:
pytype = self.pytype
if pytype is None:
return
result: Dict[str, Value] = {}
for k in pytype.__dict__.keys():
# Constructors might set custom members, make sure to respect those.
if k in self.members:
continue
try:
obj = pytype.__dict__[k]
except AttributeError:
continue
if isinstance(obj, (MethodDescriptorType, WrapperDescriptorType)):
result[k] = reflect_method_desc(obj, self, self.type_env)
elif isinstance(obj, BuiltinFunctionType):
result[k] = reflect_builtin_function(obj, self, self.type_env)
elif isinstance(obj, GetSetDescriptorType):
result[k] = GetSetDescriptor(self.type_env.getset_descriptor)
self.members.update(result)
def make_subclass(self, name: TypeName, bases: List[Class]) -> Class:
return Class(name, self.type_env, bases)
def name(self) -> str:
return f"Type[{self.instance_name}]"
def name_with_exact(self) -> str:
return f"Type[{self.instance_name_with_exact}]"
def instance_name(self) -> str:
# We need to break the loop for `builtins.type`, as `builtins.type`'s instance is a Class.
if type(self.instance) == Class:
return "type"
return self.instance.name
def instance_name_with_exact(self) -> str:
name = self.instance.name
if self.is_exact:
return f"Exact[{name}]"
return name
def qualname(self) -> str:
return self.type_name.qualname
def declare_class(self, node: ClassDef, klass: Class) -> None:
self._member_nodes[node.name] = node
self.members[node.name] = klass
def declare_variable(self, node: AnnAssign, module: ModuleTable) -> None:
# class C:
# x: foo
target = node.target
if isinstance(target, ast.Name):
self.define_slot(
target.id,
target,
TypeRef(module, self.qualname, node.annotation),
# Note down whether the slot has been assigned a value.
assignment=node if node.value else None,
declared_on_class=True,
)
def declare_variables(self, node: Assign, module: ModuleTable) -> None:
pass
def reflected_method_types(self, type_env: TypeEnvironment) -> Dict[str, Class]:
return {}
def patch_reflected_method_types(self, type_env: TypeEnvironment) -> None:
for name, return_type in self.reflected_method_types(type_env).items():
member = self.members[name]
assert isinstance(member, BuiltinMethodDescriptor)
member.return_type = ResolvedTypeRef(return_type)
def resolve_name(self, name: str) -> Optional[Value]:
return self.members.get(name)
def readable_name(self) -> str:
return self.type_name.readable_name
def is_generic_parameter(self) -> bool:
"""Returns True if this Class represents a generic parameter"""
return False
def contains_generic_parameters(self) -> bool:
"""Returns True if this class contains any generic parameters"""
return False
def is_generic_type(self) -> bool:
"""Returns True if this class is a generic type"""
return False
def is_generic_type_definition(self) -> bool:
"""Returns True if this class is a generic type definition.
It'll be a generic type which still has unbound generic type
parameters"""
return False
def generic_type_def(self) -> Optional[Class]:
"""Gets the generic type definition that defined this class"""
return None
def make_generic_type(
self,
index: Tuple[Class, ...],
) -> Optional[Class]:
"""Binds the generic type parameters to a generic type definition"""
return None
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
for base in self.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, None, self, visitor)
if res is not None:
return res
return super().resolve_attr(node, visitor)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
if isinstance(node.op, ast.BitOr):
rtype = visitor.get_type(node.right)
if rtype is visitor.type_env.none.instance:
rtype = visitor.type_env.none
if rtype is visitor.type_env.DYNAMIC:
rtype = visitor.type_env.dynamic
if not isinstance(rtype, Class):
visitor.syntax_error(
f"unsupported operand type(s) for |: {self.name} and {rtype.name}",
node,
)
return False
union = visitor.type_env.get_union((self, rtype))
visitor.set_type(node, union)
return True
return super().bind_binop(node, visitor, type_ctx)
def can_be_narrowed(self) -> bool:
return True
def type_descr(self) -> TypeDescr:
if self.is_exact:
return self.type_name.type_descr + ("!",)
return self.type_name.type_descr
def _resolve_dunder(self, name: str) -> Tuple[Class, Optional[Value]]:
klass = self.type_env.object
for klass in self.mro:
if klass is self.type_env.dynamic:
return self.type_env.dynamic, None
if val := klass.members.get(name):
return klass, val
assert klass.inexact_type() is self.type_env.object
return self.type_env.object, None
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
self_type = self.instance
new_mapping: Optional[ArgMapping] = None
init_mapping: Optional[ArgMapping] = None
dynamic_call = True
klass, new = self._resolve_dunder("__new__")
dynamic_new = klass is self.type_env.dynamic
object_new = klass.inexact_type() is self.type_env.object
if not object_new and isinstance(new, Callable):
new_mapping, self_type = new.map_call(
node,
visitor,
None,
[node.func] + node.args,
)
if new_mapping.can_call_statically():
dynamic_call = False
else:
dynamic_new = True
object_init = False
# if __new__ returns something that isn't a subclass of
# our type then __init__ isn't invoked
if not dynamic_new and self_type.klass.can_assign_from(self.instance.klass):
klass, init = self._resolve_dunder("__init__")
dynamic_call = dynamic_call or klass is self.type_env.dynamic
object_init = klass.inexact_type() is self.type_env.object
if not object_init and isinstance(init, Callable):
init_mapping = ArgMapping(init, node, visitor, None)
init_mapping.bind_args(visitor, True)
if init_mapping.can_call_statically():
dynamic_call = False
if object_new and object_init:
if node.args or node.keywords:
visitor.syntax_error(f"{self.instance_name}() takes no arguments", node)
else:
dynamic_call = False
if new_mapping is not None and init_mapping is not None:
# If we have both a __new__ and __init__ function we can't currently
# invoke it statically, as the arguments could have side effects.
# In the future we could potentially do better by shuffling into
# temporaries, but this is pretty rare.
dynamic_call = True
if not self.is_exact and not self.is_final:
dynamic_call = True
visitor.set_type(node, self_type)
visitor.set_node_data(
node, ClassCallInfo, ClassCallInfo(new_mapping, init_mapping, dynamic_call)
)
if dynamic_call:
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value,
visitor.type_env.DYNAMIC,
CALL_ARGUMENT_CANNOT_BE_PRIMITIVE,
)
return NO_EFFECT
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
call_info = code_gen.get_node_data(node, ClassCallInfo)
if call_info.dynamic_call:
return super().emit_call(node, code_gen)
new = call_info.new
if new:
new.emit(code_gen)
else:
code_gen.emit("TP_ALLOC", self.type_descr)
init = call_info.init
if init is not None:
code_gen.emit("DUP_TOP")
init.emit(code_gen)
code_gen.emit("POP_TOP") # pop None
def can_assign_from(self, src: Class) -> bool:
"""checks to see if the src value can be assigned to this value. Currently
you can assign a derived type to a base type. You cannot assign a primitive
type to an object type.
At some point we may also support some form of interfaces via protocols if we
implement a more efficient form of interface dispatch than doing the dictionary
lookup for the member."""
return src is self or (
(not self.is_exact or src.instance.nonliteral() is self.instance)
and not isinstance(src, CType)
and src.instance.nonliteral().klass.is_subclass_of(self)
)
def __repr__(self) -> str:
return f"<{self.name} class>"
def exact(self) -> Class:
return self
def inexact(self) -> Class:
return self
def exact_type(self) -> Class:
return self.type_env.get_exact_type(self)
def inexact_type(self) -> Class:
return self.type_env.get_inexact_type(self)
def _create_exact_type(self) -> Class:
instance = copy(self.instance)
klass = type(self)(
type_name=self.type_name,
type_env=self.type_env,
bases=self.bases,
klass=self.klass,
members=self.members,
instance=instance,
is_exact=True,
pytype=self.pytype,
is_final=self.is_final,
has_init_subclass=self.has_init_subclass,
)
# We need to point the instance's klass to the new class we just created.
instance.klass = klass
# `donotcompile` and `allow_weakrefs` are set via decorators after construction, and we
# need to persist these for consistency.
klass.donotcompile = self.donotcompile
klass.allow_weakrefs = self.allow_weakrefs
return klass
def isinstance(self, src: Value) -> bool:
return src.klass.is_subclass_of(self)
def is_subclass_of(self, src: Class) -> bool:
if isinstance(src, UnionType):
# This is an important subtlety - we want the subtyping relation to satisfy
# self < A | B if either self < A or self < B. Requiring both wouldn't be correct,
# as we want to allow assignments of A into A | B.
return any(self.is_subclass_of(t) for t in src.type_args)
return src.exact_type() in self.mro
def _check_compatible_property_override(
self, override: Value, inherited: Value
) -> bool:
# Properties can be overridden by cached properties, and vice-versa.
valid_sync_override = isinstance(
override, (CachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (CachedPropertyMethod, PropertyMethod))
valid_async_override = isinstance(
override, (AsyncCachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (AsyncCachedPropertyMethod, PropertyMethod))
return valid_sync_override or valid_async_override
def check_incompatible_override(
self, override: Value, inherited: Value, module: ModuleTable
) -> None:
# TODO: There's more checking we should be doing to ensure
# this is a compatible override
if isinstance(override, TransparentDecoratedMethod):
override = override.function
if not inherited.can_override(override, self, module):
raise TypedSyntaxError(f"class cannot hide inherited member: {inherited!r}")
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
todo = set(self.members.keys())
finished = set()
while todo:
name = todo.pop()
my_value = self.members[name]
new_value = self._finish_bind_one(name, my_value, module)
if new_value is None:
del self.members[name]
else:
self.members[name] = new_value
finished.add(name)
# account for the possibility that finish_bind of one member added new members
todo.update(self.members.keys())
todo.difference_update(finished)
# These were just for error reporting here, don't need them anymore
self._member_nodes = {}
return self
def _finish_bind_one(
self, name: str, my_value: Value, module: ModuleTable
) -> Value | None:
node = self.inexact_type()._member_nodes.get(name, None)
with module.error_context(node):
new_value = my_value.finish_bind(module, self)
if new_value is None:
return None
my_value = new_value
for base in self.mro[1:]:
value = base.members.get(name)
if value is not None:
self.check_incompatible_override(my_value, value, module)
if isinstance(value, Slot) and isinstance(my_value, Slot):
# use the base class slot
if value.is_final or not value.assigned_on_class:
return None
# For class values we are introducing a new slot which
# can be accessed from the derived type. We end up
# creating a slot with a default value so the value can
# be stored on the instance.
my_value.override = value
my_value.type_ref = value.type_ref
return my_value
def define_slot(
self,
name: str,
node: AST,
type_ref: Optional[TypeRef] = None,
assignment: Optional[AST] = None,
declared_on_class: bool = False,
) -> None:
existing = self.members.get(name)
if existing is None:
self._member_nodes[name] = node
self.members[name] = Slot(
type_ref,
name,
self,
assignment,
declared_on_class=declared_on_class,
)
elif isinstance(existing, Slot):
if not existing.type_ref:
existing.type_ref = type_ref
self._member_nodes[name] = node
elif type_ref:
raise TypedSyntaxError(
f"Cannot re-declare member '{name}' in '{self.instance.name}'"
)
existing.update(assignment, declared_on_class)
else:
raise TypedSyntaxError(
f"slot conflicts with other member {name} in {self.name}"
)
def declare_function(self, func: Function) -> None:
existing = self.members.get(func.func_name)
new_member = func
if existing is not None:
if isinstance(existing, Function):
new_member = FunctionGroup([existing, new_member], func.klass.type_env)
elif isinstance(existing, FunctionGroup):
existing.functions.append(new_member)
new_member = existing
else:
raise TypedSyntaxError(
f"function conflicts with other member {func.func_name} in {self.name}"
)
func.set_container_type(self)
self._member_nodes[func.func_name] = func.node
self.members[func.func_name] = new_member
if (
func.func_name == "__init__"
and isinstance(func, Function)
and func.node.args.args
):
node = func.node
if isinstance(node, FunctionDef):
InitVisitor(func.module, self, node).visit(node.body)
def mro(self) -> Sequence[Class]:
mro = self._mro
if mro is None:
if not all(self.bases):
# TODO: We can't compile w/ unknown bases
mro = []
else:
mro = _mro(self)
self._mro = mro
return mro
def bind_generics(
self,
name: GenericTypeName,
type_env: TypeEnvironment,
) -> Class:
return self
def find_slot(self, node: ast.Attribute) -> Optional[Slot[Class]]:
for base in self.mro:
member = base.members.get(node.attr)
if (
member is not None
and isinstance(member, Slot)
and not member.is_classvar
):
return member
return None
def get_own_member(self, name: str) -> Optional[Value]:
return self.members.get(name)
def get_parent_member(self, name: str) -> Optional[Value]:
# the first entry of mro is the class itself
for b in self.mro[1:]:
slot = b.members.get(name, None)
if slot:
return slot
def get_member(self, name: str) -> Optional[Value]:
member = self.get_own_member(name)
if member:
return member
return self.get_parent_member(name)
def get_own_final_method_names(self) -> Sequence[str]:
final_methods = []
for name, value in self.members.items():
if isinstance(value, DecoratedMethod) and value.is_final:
final_methods.append(name)
elif isinstance(value, Function) and value.is_final:
final_methods.append(name)
return final_methods
def unwrap(self) -> Class:
return self
def emit_type_check(self, src: Class, code_gen: Static310CodeGenerator) -> None:
if src is self.type_env.dynamic:
code_gen.emit("CAST", self.type_descr)
else:
assert self.can_assign_from(src)
def emit_extra_members(
self, node: ClassDef, code_gen: Static310CodeGenerator
) -> None:
pass
class BuiltinMethodDescriptor(Callable[Class]):
def __init__(
self,
func_name: str,
container_type: Class,
args: Optional[List[Parameter]] = None,
return_type: Optional[TypeRef] = None,
dynamic_dispatch: bool = False,
valid_on_subclasses: bool = False,
) -> None:
assert isinstance(return_type, (TypeRef, type(None)))
self.type_env: TypeEnvironment = container_type.type_env
args_by_name = (
{}
if args is None
else {arg.name: arg for arg in args if arg.style is not ParamStyle.POSONLY}
)
super().__init__(
self.type_env.builtin_method_desc,
func_name,
container_type.type_name.module,
args,
args_by_name,
0,
None,
None,
return_type or ResolvedTypeRef(container_type.type_env.dynamic),
)
# When `dynamic_dispatch` is True, we will not emit INVOKE_* on this
# method.
self.dynamic_dispatch = dynamic_dispatch
self.set_container_type(container_type)
self.valid_on_subclasses = valid_on_subclasses
def can_override(self, override: Value, klass: Class, module: ModuleTable) -> bool:
if not isinstance(override, (BuiltinMethodDescriptor, Function)):
raise TypedSyntaxError(f"class cannot hide inherited member: {self!r}")
return super().can_override(override, klass, module)
def bind_call_self(
self,
node: ast.Call,
visitor: TypeBinder,
type_ctx: Optional[Class],
self_expr: Optional[expr] = None,
) -> NarrowingEffect:
if self.args is not None:
return super().bind_call_self(node, visitor, type_ctx, self_expr)
elif node.keywords:
return super().bind_call(node, visitor, type_ctx)
visitor.set_type(node, visitor.type_env.DYNAMIC)
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
return NO_EFFECT
def resolve_descr_get(
self,
node: ast.Attribute,
inst: Optional[Object[TClassInv]],
ctx: TClassInv,
visitor: GenericVisitor[object],
) -> Optional[Value]:
if inst is None:
return self
else:
if self.dynamic_dispatch:
return visitor.type_env.DYNAMIC
if isinstance(self.return_type, SelfTypeRef):
ret_type = ResolvedTypeRef(inst.klass)
bound = self.return_type.resolved()
assert bound.can_assign_from(inst.klass)
else:
ret_type = self.return_type
# Type must either match exactly or the method must be explicitly
# annotated as being valid on arbitrary subclasses, too.
if not (inst.klass.is_exact or self.valid_on_subclasses):
ret_type = ResolvedTypeRef(visitor.type_env.dynamic)
return BuiltinMethod(self, node.value, ret_type)
def make_generic(
self, new_type: Class, name: GenericTypeName, type_env: TypeEnvironment
) -> Value:
cur_args = self.args
cur_ret_type = self.return_type
if cur_args is not None and cur_ret_type is not None:
new_args = list(arg.bind_generics(name, type_env) for arg in cur_args)
new_ret_type = cur_ret_type.resolved().bind_generics(name, type_env)
return BuiltinMethodDescriptor(
self.func_name,
new_type,
new_args,
ResolvedTypeRef(new_ret_type),
)
else:
return BuiltinMethodDescriptor(self.func_name, new_type)
def parse_typed_signature(
sig: Dict[str, object],
klass: Optional[Class],
type_env: TypeEnvironment,
) -> Tuple[List[Parameter], Class]:
args = sig["args"]
assert isinstance(args, list)
if klass is not None:
signature = [
Parameter(
"self", 0, ResolvedTypeRef(klass), False, None, ParamStyle.POSONLY
)
]
else:
signature = []
for idx, arg in enumerate(args):
signature.append(parse_param(arg, idx + 1, type_env))
return_info = sig["return"]
assert isinstance(return_info, dict)
return_type = parse_type(return_info, type_env)
return signature, return_type
WrapperDescriptorType = type(object.__init__)
MethodDescriptorType = type(str.join)
def reflect_method_desc(
obj: MethodDescriptorType | WrapperDescriptorType,
klass: Class,
type_env: TypeEnvironment,
) -> BuiltinMethodDescriptor:
sig = getattr(obj, "__typed_signature__", None)
if sig is not None:
signature, return_type = parse_typed_signature(sig, klass, type_env)
method = BuiltinMethodDescriptor(
obj.__name__,
klass,
signature,
ResolvedTypeRef(return_type),
dynamic_dispatch=klass.dynamic_builtinmethod_dispatch,
)
else:
method = BuiltinMethodDescriptor(
obj.__name__, klass, dynamic_dispatch=klass.dynamic_builtinmethod_dispatch
)
return method | null |
185,730 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class TypeEnvironment:
def __init__(self) -> None:
self._generic_types: GenericTypesDict = {}
self._literal_types: Dict[Tuple[Value, object], Value] = {}
self._nonliteral_types: Dict[Value, Value] = {}
self._exact_types: Dict[Class, Class] = {}
self._inexact_types: Dict[Class, Class] = {}
# Bringing up the type system is a little special as we have dependencies
# amongst type and object
self.type: Class = Class.__new__(Class)
self.type.type_name = TypeName("builtins", "type")
self.type.type_env = self
self.type.klass = self.type
self.type.instance = self.type
self.type.members = {}
self.type.is_exact = False
self.type.is_final = False
self.type.allow_weakrefs = False
self.type.donotcompile = False
self.type._mro = None
self.type.pytype = type
self.type._member_nodes = {}
self.type.dynamic_builtinmethod_dispatch = False
self.type.has_init_subclass = False
self.object: Class = BuiltinObject(
TypeName("builtins", "object"),
self,
bases=[],
)
self.type.bases = [self.object]
self.dynamic = DynamicClass(self)
self.builtin_method_desc = Class(
TypeName("types", "MethodDescriptorType"),
self,
is_exact=True,
)
self.builtin_method = Class(
TypeName("types", "BuiltinMethodType"), self, is_exact=True
)
self.getset_descriptor: Class = Class(
TypeName("builtins", "getset_descriptor"), self, [self.object]
)
# We special case make_type_dict() on object for bootstrapping purposes.
self.object.pytype = object
self.object.members["__class__"] = ClassGetSetDescriptor(self.getset_descriptor)
self.object.make_type_dict()
self.type.make_type_dict()
self.type.members["__name__"] = TypeDunderNameGetSetDescriptor(
self.getset_descriptor, self
)
self.getset_descriptor.pytype = GetSetDescriptorType
self.getset_descriptor.make_type_dict()
self.str = StrClass(self)
self.int = NumClass(TypeName("builtins", "int"), self, pytype=int)
self.float = NumClass(TypeName("builtins", "float"), self, pytype=float)
self.complex = NumClass(TypeName("builtins", "complex"), self, pytype=complex)
self.bytes = Class(
TypeName("builtins", "bytes"), self, [self.object], pytype=bytes
)
self.bool: Class = BoolClass(self)
self.cbool: CIntType = CIntType(TYPED_BOOL, self, name_override="cbool")
self.range: Class = Class(
TypeName("builtins", "range"), self, [self.object], pytype=range
)
self.int8: CIntType = CIntType(TYPED_INT8, self)
self.int16: CIntType = CIntType(TYPED_INT16, self)
self.int32: CIntType = CIntType(TYPED_INT32, self)
self.int64: CIntType = CIntType(TYPED_INT64, self)
self.uint8: CIntType = CIntType(TYPED_UINT8, self)
self.uint16: CIntType = CIntType(TYPED_UINT16, self)
self.uint32: CIntType = CIntType(TYPED_UINT32, self)
self.uint64: CIntType = CIntType(TYPED_UINT64, self)
# TODO uses of these to check if something is a CInt wrongly exclude literals
self.signed_cint_types: Sequence[CIntType] = [
self.int8,
self.int16,
self.int32,
self.int64,
]
self.unsigned_cint_types: Sequence[CIntType] = [
self.uint8,
self.uint16,
self.uint32,
self.uint64,
]
self.all_cint_types: Sequence[CIntType] = (
self.signed_cint_types + self.unsigned_cint_types
)
self.none = NoneType(self)
self.optional = OptionalType(self)
self.name_to_type: Mapping[str, Class] = {
"NoneType": self.none,
"object": self.object,
"str": self.str,
"__static__.int8": self.int8,
"__static__.int16": self.int16,
"__static__.int32": self.int32,
"__static__.int64": self.int64,
"__static__.uint8": self.uint8,
"__static__.uint16": self.uint16,
"__static__.uint32": self.uint32,
"__static__.uint64": self.uint64,
}
if spamobj is not None:
self.spam_obj: Optional[GenericClass] = GenericClass(
GenericTypeName(
"xxclassloader", "spamobj", (GenericParameter("T", 0, self),)
),
self,
[self.object],
pytype=spamobj,
)
else:
self.spam_obj = None
checked_dict_type_name = GenericTypeName(
"__static__",
"chkdict",
(GenericParameter("K", 0, self), GenericParameter("V", 1, self)),
)
checked_list_type_name = GenericTypeName(
"__static__", "chklist", (GenericParameter("T", 0, self),)
)
self.checked_dict = CheckedDict(
checked_dict_type_name,
self,
[self.object],
pytype=chkdict,
is_exact=True,
)
self.checked_list = CheckedList(
checked_list_type_name,
self,
[self.object],
pytype=chklist,
is_exact=True,
)
self.ellipsis = Class(
TypeName("builtins", "ellipsis"),
self,
[self.object],
pytype=type(...),
is_exact=True,
)
self.dict = DictClass(self, is_exact=False)
self.list = ListClass(self)
self.set = SetClass(self, is_exact=False)
self.frozenset = FrozenSetClass(self, is_exact=False)
self.tuple = TupleClass(self)
self.function = Class(TypeName("types", "FunctionType"), self, is_exact=True)
self.method = Class(TypeName("types", "MethodType"), self, is_exact=True)
self.member = Class(
TypeName("types", "MemberDescriptorType"), self, is_exact=True
)
self.slice = Class(TypeName("builtins", "slice"), self, is_exact=True)
self.super = SuperClass(self, is_exact=True)
self.char = CIntType(TYPED_INT8, self, name_override="char")
self.module = ModuleType(self)
self.double = CDoubleType(self)
self.array = ArrayClass(
GenericTypeName("__static__", "Array", (GenericParameter("T", 0, self),)),
self,
is_exact=True,
)
# We have found no strong reason (yet) to support arrays of other types of
# primitives
self.allowed_array_types: List[Class] = [
self.int64,
]
self.static_method = StaticMethodDecorator(
TypeName("builtins", "staticmethod"),
self,
pytype=staticmethod,
)
self.class_method = ClassMethodDecorator(
TypeName("builtins", "classmethod"),
self,
pytype=classmethod,
)
self.final_method = TypingFinalDecorator(TypeName("typing", "final"), self)
self.awaitable = AwaitableType(self)
self.union = UnionType(self)
self.final = FinalClass(
GenericTypeName("typing", "Final", (GenericParameter("T", 0, self),)), self
)
self.classvar = ClassVar(
GenericTypeName("typing", "ClassVar", (GenericParameter("T", 0, self),)),
self,
)
self.initvar = InitVar(
GenericTypeName(
"dataclasses", "InitVar", (GenericParameter("T", 0, self),)
),
self,
)
self.exact = ExactClass(
GenericTypeName("typing", "Exact", (GenericParameter("T", 0, self),)), self
)
self.named_tuple = Class(TypeName("typing", "NamedTuple"), self)
self.protocol = Class(TypeName("typing", "Protocol"), self)
self.typed_dict = Class(TypeName("typing", "TypedDict"), self)
self.literal = LiteralType(TypeName("typing", "Literal"), self)
self.annotated = AnnotatedType(TypeName("typing", "Annotated"), self)
self.not_implemented = Class(
TypeName("builtins", "NotImplementedType"),
self,
bases=[self.object],
pytype=type(NotImplemented),
)
self.base_exception = Class(
TypeName("builtins", "BaseException"), self, pytype=BaseException
)
self.exception = Class(
TypeName("builtins", "Exception"),
self,
bases=[self.base_exception],
pytype=Exception,
)
self.value_error: Class = self._builtin_exception_class(ValueError)
self.os_error: Class = self._builtin_exception_class(OSError)
self.runtime_error: Class = self._builtin_exception_class(RuntimeError)
self.syntax_error: Class = self._builtin_exception_class(SyntaxError)
self.arithmetic_error: Class = self._builtin_exception_class(ArithmeticError)
self.assertion_error: Class = self._builtin_exception_class(AssertionError)
self.attribute_error: Class = self._builtin_exception_class(AttributeError)
self.blocking_io_error: Class = self._builtin_exception_class(
BlockingIOError, base=self.os_error
)
self.buffer_error: Class = self._builtin_exception_class(BufferError)
self.child_process_error: Class = self._builtin_exception_class(
ChildProcessError, base=self.os_error
)
self.connection_error: Class = self._builtin_exception_class(
ConnectionError, base=self.os_error
)
self.broken_pipe_error: Class = self._builtin_exception_class(
BrokenPipeError, self.connection_error
)
self.connection_aborted_error: Class = self._builtin_exception_class(
ConnectionAbortedError, base=self.connection_error
)
self.connection_refused_error: Class = self._builtin_exception_class(
ConnectionRefusedError, base=self.connection_error
)
self.connection_reset_error: Class = self._builtin_exception_class(
ConnectionResetError, base=self.connection_error
)
self.environment_error: Class = self._builtin_exception_class(EnvironmentError)
self.eof_error: Class = self._builtin_exception_class(EOFError)
self.file_exists_error: Class = self._builtin_exception_class(
FileExistsError, base=self.os_error
)
self.file_not_found_error: Class = self._builtin_exception_class(
FileNotFoundError, base=self.os_error
)
self.floating_point_error: Class = self._builtin_exception_class(
FloatingPointError, base=self.arithmetic_error
)
self.generator_exit: Class = self._builtin_exception_class(
GeneratorExit, base=self.base_exception
)
self.import_error: Class = self._builtin_exception_class(ImportError)
self.indentation_error: Class = self._builtin_exception_class(
IndentationError, base=self.syntax_error
)
self.index_error: Class = self._builtin_exception_class(IndexError)
self.interrupted_error: Class = self._builtin_exception_class(
InterruptedError, base=self.os_error
)
self.io_error: Class = self._builtin_exception_class(IOError)
self.is_a_directory_error: Class = self._builtin_exception_class(
IsADirectoryError, base=self.os_error
)
self.key_error: Class = self._builtin_exception_class(KeyError)
self.keyboard_interrupt: Class = self._builtin_exception_class(
KeyboardInterrupt, base=self.base_exception
)
self.lookup_error: Class = self._builtin_exception_class(LookupError)
self.memory_error: Class = self._builtin_exception_class(MemoryError)
self.module_not_found_error: Class = self._builtin_exception_class(
ModuleNotFoundError, base=self.import_error
)
self.name_error: Class = self._builtin_exception_class(NameError)
self.not_a_directory_error: Class = self._builtin_exception_class(
NotADirectoryError, base=self.os_error
)
self.not_implemented_error: Class = self._builtin_exception_class(
NotImplementedError, base=self.runtime_error
)
self.overflow_error: Class = self._builtin_exception_class(
OverflowError, base=self.arithmetic_error
)
self.permission_error: Class = self._builtin_exception_class(
PermissionError, base=self.os_error
)
self.process_lookup_error: Class = self._builtin_exception_class(
ProcessLookupError, base=self.os_error
)
self.recursion_error: Class = self._builtin_exception_class(
RecursionError, base=self.runtime_error
)
self.reference_error: Class = self._builtin_exception_class(ReferenceError)
self.stop_async_iteration: Class = self._builtin_exception_class(
StopAsyncIteration
)
self.stop_iteration: Class = self._builtin_exception_class(StopIteration)
self.system_error: Class = self._builtin_exception_class(SystemError)
self.system_exit: Class = self._builtin_exception_class(
SystemExit, base=self.base_exception
)
self.tab_error: Class = self._builtin_exception_class(
TabError, base=self.indentation_error
)
self.timeout_error: Class = self._builtin_exception_class(
TimeoutError, base=self.os_error
)
self.type_error: Class = self._builtin_exception_class(TypeError)
self.unicode_error: Class = self._builtin_exception_class(
UnicodeError, base=self.value_error
)
self.unbound_local_error: Class = self._builtin_exception_class(
UnboundLocalError, base=self.name_error
)
self.unicode_decode_error: Class = self._builtin_exception_class(
UnicodeDecodeError, base=self.unicode_error
)
self.unicode_encode_error: Class = self._builtin_exception_class(
UnicodeEncodeError, base=self.unicode_error
)
self.unicode_translate_error: Class = self._builtin_exception_class(
UnicodeTranslateError, base=self.unicode_error
)
self.zero_division_error: Class = self._builtin_exception_class(
ZeroDivisionError, base=self.arithmetic_error
)
self.warning: Class = self._builtin_exception_class(Warning)
self.bytes_warning: Class = self._builtin_exception_class(
BytesWarning, base=self.warning
)
self.deprecation_warning: Class = self._builtin_exception_class(
DeprecationWarning, base=self.warning
)
self.future_warning: Class = self._builtin_exception_class(
FutureWarning, base=self.warning
)
self.import_warning: Class = self._builtin_exception_class(
ImportWarning, base=self.warning
)
self.pending_deprecation_warning: Class = self._builtin_exception_class(
PendingDeprecationWarning, base=self.warning
)
self.resource_warning: Class = self._builtin_exception_class(
ResourceWarning, base=self.warning
)
self.runtime_warning: Class = self._builtin_exception_class(
RuntimeWarning, base=self.warning
)
self.syntax_warning: Class = self._builtin_exception_class(
SyntaxWarning, base=self.warning
)
self.unicode_warning: Class = self._builtin_exception_class(
UnicodeWarning, base=self.warning
)
self.user_warning: Class = self._builtin_exception_class(
UserWarning, base=self.warning
)
self.allow_weakrefs = AllowWeakrefsDecorator(
TypeName("__static__", "allow_weakrefs"), self
)
self.dynamic_return = DynamicReturnDecorator(
TypeName("__static__", "dynamic_return"), self
)
self.inline = InlineFunctionDecorator(TypeName("__static__", "inline"), self)
self.donotcompile = DoNotCompileDecorator(
TypeName("__static__", "_donotcompile"), self
)
self.property = PropertyDecorator(
TypeName("builtins", "property"),
self,
pytype=property,
)
self.overload = OverloadDecorator(
TypeName("typing", "overload"),
self,
)
self.cached_property = CachedPropertyDecorator(
TypeName("cinder", "cached_property"), self
)
self.async_cached_property = AsyncCachedPropertyDecorator(
TypeName("cinder", "async_cached_property"), self
)
self.dataclass = DataclassDecorator(self)
self.dataclass_field = DataclassFieldType(self)
self.dataclass_field_function = DataclassFieldFunction(self)
self.constant_types: Mapping[Type[object], Value] = {
str: self.str.exact_type().instance,
int: self.int.exact_type().instance,
float: self.float.exact_type().instance,
complex: self.complex.exact_type().instance,
bytes: self.bytes.instance,
bool: self.bool.instance,
type(None): self.none.instance,
tuple: self.tuple.exact_type().instance,
type(...): self.ellipsis.instance,
frozenset: self.set.instance,
}
self.enum: EnumType = EnumType(self)
self.int_enum: IntEnumType = IntEnumType(self)
self.string_enum: StringEnumType = StringEnumType(self)
self.exc_context_decorator = ContextDecoratorClass(
self, TypeName("__static__", "ExcContextDecorator")
)
self.context_decorator = ContextDecoratorClass(
self,
TypeName("__static__", "ContextDecorator"),
bases=[self.exc_context_decorator],
)
self.crange_iterator = CRangeIterator(self.type)
self.str.exact_type().patch_reflected_method_types(self)
self.native_decorator = NativeDecorator(self)
if spamobj is not None:
T = GenericParameter("T", 0, self)
U = GenericParameter("U", 1, self)
XXGENERIC_TYPE_NAME = GenericTypeName("xxclassloader", "XXGeneric", (T, U))
self.xx_generic: XXGeneric = XXGeneric(
XXGENERIC_TYPE_NAME, self, [self.object]
)
def _builtin_exception_class(
self, exception_type: Type[object], base: Optional[Class] = None
) -> Class:
if base is None:
base = self.exception
return Class(
TypeName("builtins", exception_type.__name__),
self,
bases=[base],
pytype=exception_type,
)
def get_generic_type(
self, generic_type: GenericClass, index: GenericTypeIndex
) -> Class:
instantiations = self._generic_types.setdefault(generic_type, {})
instance = instantiations.get(index)
if instance is not None:
return instance
concrete = generic_type.make_generic_type(index)
instantiations[index] = concrete
concrete.members.update(
{
# pyre-ignore[6]: We trust that the type name is generic here.
k: v.make_generic(concrete, concrete.type_name, self)
for k, v in generic_type.members.items()
}
)
return concrete
def get_literal_type(self, base_type: Value, literal_value: object) -> Value:
# Literals are always exact
base_type = base_type.exact()
key = (base_type, literal_value)
if key not in self._literal_types:
self._literal_types[key] = literal_type = base_type.make_literal(
literal_value, self
)
self._nonliteral_types[literal_type] = base_type
return self._literal_types[key]
def get_nonliteral_type(self, literal_type: Value) -> Value:
return self._nonliteral_types.get(literal_type, literal_type)
def get_exact_type(self, klass: Class) -> Class:
if klass.is_exact:
return klass
if klass in self._exact_types:
return self._exact_types[klass]
exact_klass = klass._create_exact_type()
self._exact_types[klass] = exact_klass
self._inexact_types[exact_klass] = klass
return exact_klass
def get_inexact_type(self, klass: Class) -> Class:
if not klass.is_exact:
return klass
# Some types are always exact by default and have no inexact version. In that case,
# the exact type is the correct value to return.
if klass not in self._inexact_types:
return klass
return self._inexact_types[klass]
def DYNAMIC(self) -> Value:
return self.dynamic.instance
def OBJECT(self) -> Value:
return self.object.instance
def get_union(self, index: GenericTypeIndex) -> Class:
return self.get_generic_type(self.union, index)
class ResolvedTypeRef(TypeRef):
def __init__(self, type: Class) -> None:
self._resolved = type
def resolved(self, is_declaration: bool = False) -> Class:
return self._resolved
def __repr__(self) -> str:
return f"ResolvedTypeRef({self.resolved()})"
class Class(Object["Class"]):
"""Represents a type object at compile time"""
def __init__(
self,
type_name: TypeName,
type_env: TypeEnvironment,
bases: Optional[List[Class]] = None,
instance: Optional[Value] = None,
klass: Optional[Class] = None,
members: Optional[Dict[str, Value]] = None,
is_exact: bool = False,
pytype: Optional[Type[object]] = None,
is_final: bool = False,
has_init_subclass: bool = False,
) -> None:
super().__init__(klass or type_env.type)
assert isinstance(bases, (type(None), list))
self.type_name = type_name
self.type_env = type_env
self.instance: Value = instance or Object(self)
self.bases: List[Class] = self._get_bases(bases)
self._mro: Optional[List[Class]] = None
# members are attributes or methods
self.members: Dict[str, Value] = members or {}
self.is_exact = is_exact
self.is_final = is_final
self.allow_weakrefs = False
self.donotcompile = False
# This will cause all built-in method calls on the type to be done dynamically
self.dynamic_builtinmethod_dispatch = False
self.pytype = pytype
if self.pytype is not None:
self.make_type_dict()
# True if this class overrides __init_subclass__
self.has_init_subclass = has_init_subclass
# track AST node of each member until finish_bind, for error reporting
self._member_nodes: Dict[str, AST] = {}
def _get_bases(self, bases: Optional[List[Class]]) -> List[Class]:
if bases is None:
return [self.klass.type_env.object]
ret = []
for b in bases:
ret.append(b)
# Can't check for dynamic because that'd be a cyclic dependency
if isinstance(b, DynamicClass):
# If any of the defined bases is dynamic,
# stop processing, because it doesn't matter
# what the rest of them are.
break
return ret
def make_type_dict(self) -> None:
pytype = self.pytype
if pytype is None:
return
result: Dict[str, Value] = {}
for k in pytype.__dict__.keys():
# Constructors might set custom members, make sure to respect those.
if k in self.members:
continue
try:
obj = pytype.__dict__[k]
except AttributeError:
continue
if isinstance(obj, (MethodDescriptorType, WrapperDescriptorType)):
result[k] = reflect_method_desc(obj, self, self.type_env)
elif isinstance(obj, BuiltinFunctionType):
result[k] = reflect_builtin_function(obj, self, self.type_env)
elif isinstance(obj, GetSetDescriptorType):
result[k] = GetSetDescriptor(self.type_env.getset_descriptor)
self.members.update(result)
def make_subclass(self, name: TypeName, bases: List[Class]) -> Class:
return Class(name, self.type_env, bases)
def name(self) -> str:
return f"Type[{self.instance_name}]"
def name_with_exact(self) -> str:
return f"Type[{self.instance_name_with_exact}]"
def instance_name(self) -> str:
# We need to break the loop for `builtins.type`, as `builtins.type`'s instance is a Class.
if type(self.instance) == Class:
return "type"
return self.instance.name
def instance_name_with_exact(self) -> str:
name = self.instance.name
if self.is_exact:
return f"Exact[{name}]"
return name
def qualname(self) -> str:
return self.type_name.qualname
def declare_class(self, node: ClassDef, klass: Class) -> None:
self._member_nodes[node.name] = node
self.members[node.name] = klass
def declare_variable(self, node: AnnAssign, module: ModuleTable) -> None:
# class C:
# x: foo
target = node.target
if isinstance(target, ast.Name):
self.define_slot(
target.id,
target,
TypeRef(module, self.qualname, node.annotation),
# Note down whether the slot has been assigned a value.
assignment=node if node.value else None,
declared_on_class=True,
)
def declare_variables(self, node: Assign, module: ModuleTable) -> None:
pass
def reflected_method_types(self, type_env: TypeEnvironment) -> Dict[str, Class]:
return {}
def patch_reflected_method_types(self, type_env: TypeEnvironment) -> None:
for name, return_type in self.reflected_method_types(type_env).items():
member = self.members[name]
assert isinstance(member, BuiltinMethodDescriptor)
member.return_type = ResolvedTypeRef(return_type)
def resolve_name(self, name: str) -> Optional[Value]:
return self.members.get(name)
def readable_name(self) -> str:
return self.type_name.readable_name
def is_generic_parameter(self) -> bool:
"""Returns True if this Class represents a generic parameter"""
return False
def contains_generic_parameters(self) -> bool:
"""Returns True if this class contains any generic parameters"""
return False
def is_generic_type(self) -> bool:
"""Returns True if this class is a generic type"""
return False
def is_generic_type_definition(self) -> bool:
"""Returns True if this class is a generic type definition.
It'll be a generic type which still has unbound generic type
parameters"""
return False
def generic_type_def(self) -> Optional[Class]:
"""Gets the generic type definition that defined this class"""
return None
def make_generic_type(
self,
index: Tuple[Class, ...],
) -> Optional[Class]:
"""Binds the generic type parameters to a generic type definition"""
return None
def resolve_attr(
self, node: ast.Attribute, visitor: GenericVisitor[object]
) -> Optional[Value]:
for base in self.mro:
member = base.members.get(node.attr)
if member is not None:
res = member.resolve_descr_get(node, None, self, visitor)
if res is not None:
return res
return super().resolve_attr(node, visitor)
def bind_binop(
self, node: ast.BinOp, visitor: TypeBinder, type_ctx: Optional[Class]
) -> bool:
if isinstance(node.op, ast.BitOr):
rtype = visitor.get_type(node.right)
if rtype is visitor.type_env.none.instance:
rtype = visitor.type_env.none
if rtype is visitor.type_env.DYNAMIC:
rtype = visitor.type_env.dynamic
if not isinstance(rtype, Class):
visitor.syntax_error(
f"unsupported operand type(s) for |: {self.name} and {rtype.name}",
node,
)
return False
union = visitor.type_env.get_union((self, rtype))
visitor.set_type(node, union)
return True
return super().bind_binop(node, visitor, type_ctx)
def can_be_narrowed(self) -> bool:
return True
def type_descr(self) -> TypeDescr:
if self.is_exact:
return self.type_name.type_descr + ("!",)
return self.type_name.type_descr
def _resolve_dunder(self, name: str) -> Tuple[Class, Optional[Value]]:
klass = self.type_env.object
for klass in self.mro:
if klass is self.type_env.dynamic:
return self.type_env.dynamic, None
if val := klass.members.get(name):
return klass, val
assert klass.inexact_type() is self.type_env.object
return self.type_env.object, None
def bind_call(
self, node: ast.Call, visitor: TypeBinder, type_ctx: Optional[Class]
) -> NarrowingEffect:
self_type = self.instance
new_mapping: Optional[ArgMapping] = None
init_mapping: Optional[ArgMapping] = None
dynamic_call = True
klass, new = self._resolve_dunder("__new__")
dynamic_new = klass is self.type_env.dynamic
object_new = klass.inexact_type() is self.type_env.object
if not object_new and isinstance(new, Callable):
new_mapping, self_type = new.map_call(
node,
visitor,
None,
[node.func] + node.args,
)
if new_mapping.can_call_statically():
dynamic_call = False
else:
dynamic_new = True
object_init = False
# if __new__ returns something that isn't a subclass of
# our type then __init__ isn't invoked
if not dynamic_new and self_type.klass.can_assign_from(self.instance.klass):
klass, init = self._resolve_dunder("__init__")
dynamic_call = dynamic_call or klass is self.type_env.dynamic
object_init = klass.inexact_type() is self.type_env.object
if not object_init and isinstance(init, Callable):
init_mapping = ArgMapping(init, node, visitor, None)
init_mapping.bind_args(visitor, True)
if init_mapping.can_call_statically():
dynamic_call = False
if object_new and object_init:
if node.args or node.keywords:
visitor.syntax_error(f"{self.instance_name}() takes no arguments", node)
else:
dynamic_call = False
if new_mapping is not None and init_mapping is not None:
# If we have both a __new__ and __init__ function we can't currently
# invoke it statically, as the arguments could have side effects.
# In the future we could potentially do better by shuffling into
# temporaries, but this is pretty rare.
dynamic_call = True
if not self.is_exact and not self.is_final:
dynamic_call = True
visitor.set_type(node, self_type)
visitor.set_node_data(
node, ClassCallInfo, ClassCallInfo(new_mapping, init_mapping, dynamic_call)
)
if dynamic_call:
for arg in node.args:
visitor.visitExpectedType(
arg, visitor.type_env.DYNAMIC, CALL_ARGUMENT_CANNOT_BE_PRIMITIVE
)
for arg in node.keywords:
visitor.visitExpectedType(
arg.value,
visitor.type_env.DYNAMIC,
CALL_ARGUMENT_CANNOT_BE_PRIMITIVE,
)
return NO_EFFECT
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
call_info = code_gen.get_node_data(node, ClassCallInfo)
if call_info.dynamic_call:
return super().emit_call(node, code_gen)
new = call_info.new
if new:
new.emit(code_gen)
else:
code_gen.emit("TP_ALLOC", self.type_descr)
init = call_info.init
if init is not None:
code_gen.emit("DUP_TOP")
init.emit(code_gen)
code_gen.emit("POP_TOP") # pop None
def can_assign_from(self, src: Class) -> bool:
"""checks to see if the src value can be assigned to this value. Currently
you can assign a derived type to a base type. You cannot assign a primitive
type to an object type.
At some point we may also support some form of interfaces via protocols if we
implement a more efficient form of interface dispatch than doing the dictionary
lookup for the member."""
return src is self or (
(not self.is_exact or src.instance.nonliteral() is self.instance)
and not isinstance(src, CType)
and src.instance.nonliteral().klass.is_subclass_of(self)
)
def __repr__(self) -> str:
return f"<{self.name} class>"
def exact(self) -> Class:
return self
def inexact(self) -> Class:
return self
def exact_type(self) -> Class:
return self.type_env.get_exact_type(self)
def inexact_type(self) -> Class:
return self.type_env.get_inexact_type(self)
def _create_exact_type(self) -> Class:
instance = copy(self.instance)
klass = type(self)(
type_name=self.type_name,
type_env=self.type_env,
bases=self.bases,
klass=self.klass,
members=self.members,
instance=instance,
is_exact=True,
pytype=self.pytype,
is_final=self.is_final,
has_init_subclass=self.has_init_subclass,
)
# We need to point the instance's klass to the new class we just created.
instance.klass = klass
# `donotcompile` and `allow_weakrefs` are set via decorators after construction, and we
# need to persist these for consistency.
klass.donotcompile = self.donotcompile
klass.allow_weakrefs = self.allow_weakrefs
return klass
def isinstance(self, src: Value) -> bool:
return src.klass.is_subclass_of(self)
def is_subclass_of(self, src: Class) -> bool:
if isinstance(src, UnionType):
# This is an important subtlety - we want the subtyping relation to satisfy
# self < A | B if either self < A or self < B. Requiring both wouldn't be correct,
# as we want to allow assignments of A into A | B.
return any(self.is_subclass_of(t) for t in src.type_args)
return src.exact_type() in self.mro
def _check_compatible_property_override(
self, override: Value, inherited: Value
) -> bool:
# Properties can be overridden by cached properties, and vice-versa.
valid_sync_override = isinstance(
override, (CachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (CachedPropertyMethod, PropertyMethod))
valid_async_override = isinstance(
override, (AsyncCachedPropertyMethod, PropertyMethod)
) and isinstance(inherited, (AsyncCachedPropertyMethod, PropertyMethod))
return valid_sync_override or valid_async_override
def check_incompatible_override(
self, override: Value, inherited: Value, module: ModuleTable
) -> None:
# TODO: There's more checking we should be doing to ensure
# this is a compatible override
if isinstance(override, TransparentDecoratedMethod):
override = override.function
if not inherited.can_override(override, self, module):
raise TypedSyntaxError(f"class cannot hide inherited member: {inherited!r}")
def finish_bind(self, module: ModuleTable, klass: Class | None) -> Optional[Value]:
todo = set(self.members.keys())
finished = set()
while todo:
name = todo.pop()
my_value = self.members[name]
new_value = self._finish_bind_one(name, my_value, module)
if new_value is None:
del self.members[name]
else:
self.members[name] = new_value
finished.add(name)
# account for the possibility that finish_bind of one member added new members
todo.update(self.members.keys())
todo.difference_update(finished)
# These were just for error reporting here, don't need them anymore
self._member_nodes = {}
return self
def _finish_bind_one(
self, name: str, my_value: Value, module: ModuleTable
) -> Value | None:
node = self.inexact_type()._member_nodes.get(name, None)
with module.error_context(node):
new_value = my_value.finish_bind(module, self)
if new_value is None:
return None
my_value = new_value
for base in self.mro[1:]:
value = base.members.get(name)
if value is not None:
self.check_incompatible_override(my_value, value, module)
if isinstance(value, Slot) and isinstance(my_value, Slot):
# use the base class slot
if value.is_final or not value.assigned_on_class:
return None
# For class values we are introducing a new slot which
# can be accessed from the derived type. We end up
# creating a slot with a default value so the value can
# be stored on the instance.
my_value.override = value
my_value.type_ref = value.type_ref
return my_value
def define_slot(
self,
name: str,
node: AST,
type_ref: Optional[TypeRef] = None,
assignment: Optional[AST] = None,
declared_on_class: bool = False,
) -> None:
existing = self.members.get(name)
if existing is None:
self._member_nodes[name] = node
self.members[name] = Slot(
type_ref,
name,
self,
assignment,
declared_on_class=declared_on_class,
)
elif isinstance(existing, Slot):
if not existing.type_ref:
existing.type_ref = type_ref
self._member_nodes[name] = node
elif type_ref:
raise TypedSyntaxError(
f"Cannot re-declare member '{name}' in '{self.instance.name}'"
)
existing.update(assignment, declared_on_class)
else:
raise TypedSyntaxError(
f"slot conflicts with other member {name} in {self.name}"
)
def declare_function(self, func: Function) -> None:
existing = self.members.get(func.func_name)
new_member = func
if existing is not None:
if isinstance(existing, Function):
new_member = FunctionGroup([existing, new_member], func.klass.type_env)
elif isinstance(existing, FunctionGroup):
existing.functions.append(new_member)
new_member = existing
else:
raise TypedSyntaxError(
f"function conflicts with other member {func.func_name} in {self.name}"
)
func.set_container_type(self)
self._member_nodes[func.func_name] = func.node
self.members[func.func_name] = new_member
if (
func.func_name == "__init__"
and isinstance(func, Function)
and func.node.args.args
):
node = func.node
if isinstance(node, FunctionDef):
InitVisitor(func.module, self, node).visit(node.body)
def mro(self) -> Sequence[Class]:
mro = self._mro
if mro is None:
if not all(self.bases):
# TODO: We can't compile w/ unknown bases
mro = []
else:
mro = _mro(self)
self._mro = mro
return mro
def bind_generics(
self,
name: GenericTypeName,
type_env: TypeEnvironment,
) -> Class:
return self
def find_slot(self, node: ast.Attribute) -> Optional[Slot[Class]]:
for base in self.mro:
member = base.members.get(node.attr)
if (
member is not None
and isinstance(member, Slot)
and not member.is_classvar
):
return member
return None
def get_own_member(self, name: str) -> Optional[Value]:
return self.members.get(name)
def get_parent_member(self, name: str) -> Optional[Value]:
# the first entry of mro is the class itself
for b in self.mro[1:]:
slot = b.members.get(name, None)
if slot:
return slot
def get_member(self, name: str) -> Optional[Value]:
member = self.get_own_member(name)
if member:
return member
return self.get_parent_member(name)
def get_own_final_method_names(self) -> Sequence[str]:
final_methods = []
for name, value in self.members.items():
if isinstance(value, DecoratedMethod) and value.is_final:
final_methods.append(name)
elif isinstance(value, Function) and value.is_final:
final_methods.append(name)
return final_methods
def unwrap(self) -> Class:
return self
def emit_type_check(self, src: Class, code_gen: Static310CodeGenerator) -> None:
if src is self.type_env.dynamic:
code_gen.emit("CAST", self.type_descr)
else:
assert self.can_assign_from(src)
def emit_extra_members(
self, node: ClassDef, code_gen: Static310CodeGenerator
) -> None:
pass
class BuiltinFunction(Callable[Class]):
def __init__(
self,
func_name: str,
module_name: str,
klass: Optional[Class],
type_env: TypeEnvironment,
args: Optional[List[Parameter]] = None,
return_type: Optional[TypeRef] = None,
) -> None:
assert isinstance(return_type, (TypeRef, type(None)))
args_by_name = (
{}
if args is None
else {arg.name: arg for arg in args if arg.style is not ParamStyle.POSONLY}
)
super().__init__(
type_env.builtin_method_desc,
func_name,
module_name,
args,
args_by_name,
0,
None,
None,
return_type or ResolvedTypeRef(type_env.dynamic),
)
self.set_container_type(klass)
def can_override(self, override: Value, klass: Class, module: ModuleTable) -> bool:
if not isinstance(override, Function):
raise TypedSyntaxError(f"class cannot hide inherited member: {self!r}")
return super().can_override(override, klass, module)
def emit_call(self, node: ast.Call, code_gen: Static310CodeGenerator) -> None:
if node.keywords:
return super().emit_call(node, code_gen)
code_gen.set_lineno(node)
self.emit_call_self(node, code_gen)
def make_generic(
self, new_type: Class, name: GenericTypeName, type_env: TypeEnvironment
) -> Value:
cur_args = self.args
cur_ret_type = self.return_type
if cur_args is not None and cur_ret_type is not None:
new_args = list(arg.bind_generics(name, type_env) for arg in cur_args)
new_ret_type = cur_ret_type.resolved().bind_generics(name, type_env)
return BuiltinFunction(
self.func_name,
self.module_name,
new_type,
new_type.type_env,
new_args,
ResolvedTypeRef(new_ret_type),
)
else:
return BuiltinFunction(
self.func_name,
self.module_name,
new_type,
new_type.type_env,
None,
self.return_type,
)
class BuiltinNewFunction(BuiltinFunction):
def map_call(
self,
node: ast.Call,
visitor: TypeBinder,
self_expr: Optional[ast.expr] = None,
args_override: Optional[List[ast.expr]] = None,
descr_override: Optional[TypeDescr] = None,
) -> Tuple[ArgMapping, Value]:
arg_mapping = ArgMapping(
self, node, visitor, self_expr, args_override, descr_override
)
arg_mapping.bind_args(visitor)
ret_type = visitor.type_env.DYNAMIC
if args_override:
cls_type = visitor.get_type(args_override[0])
if isinstance(cls_type, Class):
ret_type = cls_type.instance
if ret_type is self.klass.type_env.type:
# if we get a generic "type" then we don't really know
# what type we're producing
ret_type = visitor.type_env.DYNAMIC
return arg_mapping, ret_type
def parse_typed_signature(
sig: Dict[str, object],
klass: Optional[Class],
type_env: TypeEnvironment,
) -> Tuple[List[Parameter], Class]:
args = sig["args"]
assert isinstance(args, list)
if klass is not None:
signature = [
Parameter(
"self", 0, ResolvedTypeRef(klass), False, None, ParamStyle.POSONLY
)
]
else:
signature = []
for idx, arg in enumerate(args):
signature.append(parse_param(arg, idx + 1, type_env))
return_info = sig["return"]
assert isinstance(return_info, dict)
return_type = parse_type(return_info, type_env)
return signature, return_type
BuiltinFunctionType = type(len)
def reflect_builtin_function(
obj: BuiltinFunctionType,
klass: Optional[Class],
type_env: TypeEnvironment,
) -> BuiltinFunction:
sig = getattr(obj, "__typed_signature__", None)
if sig is not None:
signature, return_type = parse_typed_signature(sig, None, type_env)
method = BuiltinFunction(
obj.__name__,
obj.__module__,
klass,
type_env,
signature,
ResolvedTypeRef(return_type),
)
else:
if obj.__name__ == "__new__" and klass is not None:
method = BuiltinNewFunction(obj.__name__, obj.__module__, klass, type_env)
else:
method = BuiltinFunction(obj.__name__, obj.__module__, klass, type_env)
return method | null |
185,731 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
def common_sequence_emit_len(
node: ast.Call, code_gen: Static310CodeGenerator, oparg: int, boxed: bool
) -> None:
if len(node.args) != 1:
raise code_gen.syntax_error(
f"Can only pass a single argument when checking sequence length", node
)
code_gen.visit(node.args[0])
code_gen.emit("FAST_LEN", oparg)
if boxed:
code_gen.emit("PRIMITIVE_BOX", TYPED_INT64) | null |
185,732 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
class Block:
allocated_block_count: ClassVar[int] = 0
def __init__(self, label=""):
self.insts: List[Instruction] = []
self.outEdges = set()
self.label: str = label
self.bid: int | None = None
self.next: Block | None = None
self.prev: Block | None = None
self.returns: bool = False
self.offset: int = 0
self.seen: bool = False # visited during stack depth calculation
self.startdepth: int = -1
self.is_exit: bool = False
self.no_fallthrough: bool = False
self.num_predecessors: int = 0
self.alloc_id: int = Block.allocated_block_count
Block.allocated_block_count += 1
def __repr__(self):
data = []
data.append(f"id={self.bid}")
data.append(f"startdepth={self.startdepth}")
if self.next:
data.append(f"next={self.next.bid}")
extras = ", ".join(data)
if self.label:
return f"<block {self.label} {extras}>"
else:
return f"<block {extras}>"
def __str__(self):
insts = map(str, self.insts)
insts = "\n".join(insts)
return f"<block label={self.label} bid={self.bid} startdepth={self.startdepth}: {insts}>"
def emit(self, instr: Instruction) -> None:
# TODO(T128853358): The RETURN_PRIMITIVE logic should live in the Static flow graph.
if instr.opname in ("RETURN_VALUE", "RETURN_PRIMITIVE"):
self.returns = True
self.insts.append(instr)
def getInstructions(self):
return self.insts
def addOutEdge(self, block):
self.outEdges.add(block)
def addNext(self, block):
assert self.next is None, self.next
self.next = block
assert block.prev is None, block.prev
block.prev = self
def removeNext(self):
assert self.next is not None
next = self.next
next.prev = None
self.next = None
def has_return(self):
# TODO(T128853358): The RETURN_PRIMITIVE logic should live in the Static flow graph.
return self.insts and self.insts[-1].opname in (
"RETURN_VALUE",
"RETURN_PRIMITIVE",
)
def get_children(self):
return list(self.outEdges) + ([self.next] if self.next is not None else [])
def getContainedGraphs(self):
"""Return all graphs contained within this block.
For example, a MAKE_FUNCTION block will contain a reference to
the graph for the function body.
"""
contained = []
for inst in self.insts:
if len(inst) == 1:
continue
op = inst[1]
if hasattr(op, "graph"):
contained.append(op.graph)
return contained
def copy(self):
# Cannot copy block if it has fallthrough, since a block can have only one
# fallthrough predecessor
assert self.no_fallthrough
result = Block()
result.insts = [instr.copy() for instr in self.insts]
result.is_exit = self.is_exit
result.no_fallthrough = True
return result
def common_sequence_emit_jumpif(
test: AST,
next: Block,
is_if_true: bool,
code_gen: Static310CodeGenerator,
oparg: int,
) -> None:
code_gen.visit(test)
code_gen.emit("FAST_LEN", oparg)
code_gen.emit("POP_JUMP_IF_NONZERO" if is_if_true else "POP_JUMP_IF_ZERO", next) | null |
185,733 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
FOR_LOOP = 2
def common_sequence_emit_forloop(
node: ast.For, code_gen: Static310CodeGenerator, seq_type: int
) -> None:
if seq_type == SEQ_TUPLE:
fast_len_oparg = FAST_LEN_TUPLE
else:
fast_len_oparg = FAST_LEN_LIST
descr = ("__static__", "int64", "#")
start = code_gen.newBlock(f"seq_forloop_start")
anchor = code_gen.newBlock(f"seq_forloop_anchor")
after = code_gen.newBlock(f"seq_forloop_after")
with code_gen.new_loopidx() as loop_idx:
code_gen.set_lineno(node)
code_gen.push_loop(FOR_LOOP, start, after)
code_gen.visit(node.iter)
code_gen.emit("PRIMITIVE_LOAD_CONST", (0, TYPED_INT64))
code_gen.emit("STORE_LOCAL", (loop_idx, descr))
code_gen.nextBlock(start)
code_gen.emit("DUP_TOP") # used for SEQUENCE_GET
code_gen.emit("DUP_TOP") # used for FAST_LEN
code_gen.emit("FAST_LEN", fast_len_oparg)
code_gen.emit("LOAD_LOCAL", (loop_idx, descr))
code_gen.emit("PRIMITIVE_COMPARE_OP", PRIM_OP_GT_INT)
code_gen.emit("POP_JUMP_IF_ZERO", anchor)
code_gen.emit("LOAD_LOCAL", (loop_idx, descr))
if seq_type == SEQ_TUPLE:
# todo - we need to implement TUPLE_GET which supports primitive index
code_gen.emit("PRIMITIVE_BOX", TYPED_INT64)
code_gen.emit("BINARY_SUBSCR", 2)
else:
code_gen.emit("SEQUENCE_GET", seq_type | SEQ_SUBSCR_UNCHECKED)
code_gen.emit("LOAD_LOCAL", (loop_idx, descr))
code_gen.emit("PRIMITIVE_LOAD_CONST", (1, TYPED_INT64))
code_gen.emit("PRIMITIVE_BINARY_OP", PRIM_OP_ADD_INT)
code_gen.emit("STORE_LOCAL", (loop_idx, descr))
code_gen.visit(node.target)
code_gen.visit(node.body)
code_gen.emit("JUMP_ABSOLUTE", start)
code_gen.nextBlock(anchor)
code_gen.emit("POP_TOP") # Pop loop index
code_gen.emit("POP_TOP") # Pop list
code_gen.pop_loop()
if node.orelse:
code_gen.visit(node.orelse)
code_gen.nextBlock(after) | null |
185,734 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
def common_literal_emit_type_check(
literal_value: object,
comp_opname: str,
comp_opcode: object,
code_gen: Static310CodeGenerator,
) -> None:
code_gen.emit("DUP_TOP")
code_gen.emit("LOAD_CONST", literal_value)
code_gen.emit(comp_opname, comp_opcode)
end = code_gen.newBlock()
code_gen.emit("POP_JUMP_IF_TRUE", end)
code_gen.nextBlock()
code_gen.emit("LOAD_GLOBAL", "TypeError")
code_gen.emit("ROT_TWO")
code_gen.emit("LOAD_CONST", f"expected {literal_value}, got ")
code_gen.emit("ROT_TWO")
code_gen.emit("FORMAT_VALUE")
code_gen.emit("BUILD_STRING", 2)
code_gen.emit("CALL_FUNCTION", 1)
code_gen.emit("RAISE_VARARGS", 1)
code_gen.nextBlock(end) | null |
185,735 | from __future__ import annotations
from __static__ import chkdict, chklist
import ast
import dataclasses
import sys
from ast import (
AnnAssign,
Assign,
AST,
AsyncFunctionDef,
Attribute,
Bytes,
Call,
ClassDef,
cmpop,
Constant,
copy_location,
expr,
FunctionDef,
NameConstant,
Num,
Return,
Starred,
Str,
)
from copy import copy
from enum import Enum, IntEnum
from functools import cached_property
from types import (
BuiltinFunctionType,
GetSetDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
)
from typing import (
Callable as typingCallable,
cast,
ClassVar as typingClassVar,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from cinderx.static import ( # noqa: F401
FAST_LEN_ARRAY,
FAST_LEN_DICT,
FAST_LEN_INEXACT,
FAST_LEN_LIST,
FAST_LEN_SET,
FAST_LEN_STR,
FAST_LEN_TUPLE,
PRIM_OP_ADD_DBL,
PRIM_OP_ADD_INT,
PRIM_OP_AND_INT,
PRIM_OP_DIV_DBL,
PRIM_OP_DIV_INT,
PRIM_OP_DIV_UN_INT,
PRIM_OP_EQ_DBL,
PRIM_OP_EQ_INT,
PRIM_OP_GE_DBL,
PRIM_OP_GE_INT,
PRIM_OP_GE_UN_INT,
PRIM_OP_GT_DBL,
PRIM_OP_GT_INT,
PRIM_OP_GT_UN_INT,
PRIM_OP_INV_INT,
PRIM_OP_LE_DBL,
PRIM_OP_LE_INT,
PRIM_OP_LE_UN_INT,
PRIM_OP_LSHIFT_INT,
PRIM_OP_LT_DBL,
PRIM_OP_LT_INT,
PRIM_OP_LT_UN_INT,
PRIM_OP_MOD_DBL,
PRIM_OP_MOD_INT,
PRIM_OP_MOD_UN_INT,
PRIM_OP_MUL_DBL,
PRIM_OP_MUL_INT,
PRIM_OP_NE_DBL,
PRIM_OP_NE_INT,
PRIM_OP_NEG_DBL,
PRIM_OP_NEG_INT,
PRIM_OP_NOT_INT,
PRIM_OP_OR_INT,
PRIM_OP_POW_DBL,
PRIM_OP_POW_INT,
PRIM_OP_POW_UN_INT,
PRIM_OP_RSHIFT_INT,
PRIM_OP_RSHIFT_UN_INT,
PRIM_OP_SUB_DBL,
PRIM_OP_SUB_INT,
PRIM_OP_XOR_INT,
SEQ_ARRAY_INT64,
SEQ_CHECKED_LIST,
SEQ_LIST,
SEQ_LIST_INEXACT,
SEQ_REPEAT_INEXACT_NUM,
SEQ_REPEAT_INEXACT_SEQ,
SEQ_REPEAT_PRIMITIVE_NUM,
SEQ_REPEAT_REVERSED,
SEQ_SUBSCR_UNCHECKED,
SEQ_TUPLE,
TYPED_BOOL,
TYPED_DOUBLE,
TYPED_INT16,
TYPED_INT32,
TYPED_INT64,
TYPED_INT8,
TYPED_INT_64BIT,
TYPED_OBJECT,
TYPED_UINT16,
TYPED_UINT32,
TYPED_UINT64,
TYPED_UINT8,
)
from ..consts import CO_STATICALLY_COMPILED
from ..errors import TypedSyntaxError
from ..optimizer import AstOptimizer
from ..pyassem import Block, FVC_REPR
from ..pycodegen import CodeGenerator, FOR_LOOP
from ..symbols import FunctionScope
from ..unparse import to_expr
from ..visitor import ASTRewriter, TAst
from .effects import NarrowingEffect, NO_EFFECT, TypeState
from .visitor import GenericVisitor
def access_path(node: ast.AST) -> List[str]:
path = []
while not isinstance(node, ast.Name):
if not isinstance(node, ast.Attribute):
return []
path.append(node.attr)
node = node.value
path.append(node.id)
return list(reversed(path)) | null |
185,736 | import os
import sys
try:
from importlib.machinery import SourceFileLoader
except ImportError:
import imp
def find_module(modname):
"""Finds and returns a module in the local dist/checkout.
"""
modpath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "Lib")
return imp.load_module(modname, *imp.find_module(modname, [modpath]))
else:
def find_module(modname):
"""Finds and returns a module in the local dist/checkout.
"""
modpath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "Lib", modname + ".py")
return SourceFileLoader(modname, modpath).load_module()
The provided code snippet includes necessary dependencies for implementing the `write_contents` function. Write a Python function `def write_contents(f)` to solve the following problem:
Write C code contents to the target file object.
Here is the function:
def write_contents(f):
"""Write C code contents to the target file object.
"""
opcode = find_module('opcode')
targets = [None] * 256
for opname, op in opcode.opmap.items():
targets[op] = opname
f.write("static void *opcode_targets[256] = {\n")
for s in targets:
if s is None:
f.write(f" &&_unknown_opcode,\n")
continue
f.write(f" &&TARGET_{s},\n")
f.write("\n};\n") | Write C code contents to the target file object. |
185,751 | import argparse
import ast
import io
import re
import subprocess
import sys
import textwrap
from io import TextIOWrapper
from typing import List, Tuple
import unparse
def run_fuzzer(code_str: str, subprocesses: int, output_file: TextIOWrapper) -> None:
subprocess_arr = []
for i in range(subprocesses):
subprocess_arr.append(
subprocess.Popen(
[
sys.executable,
"-X",
"jit",
__file__.replace("executor.py", "fuzzer.py"),
"--codestr",
code_str,
],
stdout=output_file,
stderr=output_file,
)
)
for i in subprocess_arr:
i.wait()
output_file.flush()
def extract_functions_from_file(file_location: str) -> List[str]:
funcs = []
with open(file_location, "r") as code_file:
text = code_file.read()
node = ast.parse(text)
s = [i for i in node.body]
# extract all embedded function objects by recursing with a stack
while s:
curr = s.pop()
str_io = io.StringIO()
# unparse function objects and append them to funcs
if isinstance(curr, ast.FunctionDef):
unparsed_func = unparse.Unparser(curr, str_io)
funcs.append(str_io.getvalue())
if hasattr(curr, "body"):
s += [
i
for i in curr.body
if isinstance(i, ast.FunctionDef) or isinstance(i, ast.ClassDef)
]
return funcs
def run_fuzzer_on_test_file(
file_location: str, subprocesses: int, output_file: TextIOWrapper
) -> None:
funcs = extract_functions_from_file(file_location)
subprocess_arr = []
for i in funcs:
run_fuzzer(i, subprocesses, output_file) | null |
185,752 | import ast
import io
import os
import sys
The provided code snippet includes necessary dependencies for implementing the `interleave` function. Write a Python function `def interleave(inter, f, seq)` to solve the following problem:
Call f on each item in seq, calling inter() in between.
Here is the function:
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between."""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x) | Call f on each item in seq, calling inter() in between. |
185,753 | import ast
import io
import os
import sys
def roundtrip(filename, output=sys.stdout):
def testdir(a):
try:
names = [n for n in os.listdir(a) if n.endswith(".py")]
except OSError:
print("Directory not readable: %s" % a, file=sys.stderr)
else:
for n in names:
fullname = os.path.join(a, n)
if os.path.isfile(fullname):
output = io.StringIO()
print("Testing %s" % fullname)
try:
roundtrip(fullname, output)
except Exception as e:
print(" Failed to compile, exception is %s" % repr(e))
elif os.path.isdir(fullname):
testdir(fullname) | null |
185,754 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
try:
import cinderjit
except ImportError:
cinderjit = None
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flow_graph = PyFlowGraphFuzzer
self.oparg_randomizations = {}
# overriding to set definitions
def _setupGraphDelegation(self):
self.emitWithBlock = self.graph.emitWithBlock
self.newBlock = self.graph.newBlock
self.nextBlock = self.graph.nextBlock
# Overriding emit call to fuzz certain opargs stored in names, varnames, consts
# Will update to fuzz more types of opargs, and fuzz instructions as well
def emit(self, opcode: str, oparg: object = 0) -> None:
self.graph.maybeEmitSetLineno()
if opcode != "SET_LINENO" and isinstance(oparg, pyassem.Block):
if not self.graph.do_not_emit_bytecode:
self.graph.current.addOutEdge(oparg)
self.graph.current.emit(pyassem.Instruction(opcode, 0, 0, target=oparg))
return
ioparg = self.graph.convertArg(opcode, oparg)
randomized_opcode = randomize_opcode(opcode)
"""
# We can fuzz opcodes if 3 conditions are met
# 1. randomized_opcode != opcode (certain opcodes are left unrandomized, such as branch instructions)
# 2. we can safely replace the original oparg with a new one (for the new instruction)
without the length of a tuple (i.e. co_names, co_varnames) hitting zero (or it will fail assertions)
# 3. random chance based on INSTR_RANDOMIZATION_CHANCE
"""
if (
random.randint(1, 100) <= INSTR_RANDOMIZATION_CHANCE
and randomized_opcode != opcode
and can_replace_oparg(
opcode,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.closure,
)
):
# if we are fuzzing this opcode
# create a new oparg corresponding to that opcode
# and emit
new_oparg = generate_oparg_for_randomized_opcode(
opcode,
randomized_opcode,
oparg,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.freevars,
self.graph.cellvars,
)
# get new ioparg
ioparg = self.graph.convertArg(randomized_opcode, new_oparg)
self.graph.current.emit(
pyassem.Instruction(randomized_opcode, new_oparg, ioparg)
)
else:
# otherwise, just randomize the oparg and emit
self.randomize_oparg(opcode, oparg, ioparg)
if opcode == "SET_LINENO" and not self.graph.first_inst_lineno:
self.graph.first_inst_lineno = ioparg
# randomizes an existing oparg and emits an instruction with the randomized oparg and ioparg
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
if not self.graph.do_not_emit_bytecode:
# storing oparg to randomized version as a key value pair
if oparg in self.oparg_randomizations:
randomized_oparg = self.oparg_randomizations[oparg]
else:
randomized_oparg = randomize_variable(oparg)
self.oparg_randomizations[oparg] = randomized_oparg
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.names)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.varnames)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif (
opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I randomize the oparg to a LOAD_CONST the code object generation could fail
# Therefore it is not being randomized at the moment
and opcode != "LOAD_CONST"
):
ioparg = replace_const_var(
self.graph.get_const_key(oparg),
self.graph.get_const_key(randomized_oparg),
self.graph.consts,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
ioparg = replace_closure_var(
oparg,
randomized_oparg,
ioparg,
self.graph.freevars,
self.graph.cellvars,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
else:
ioparg = generate_random_ioparg(opcode, ioparg)
self.graph.current.emit(pyassem.Instruction(opcode, oparg, ioparg))
INSTRS_WITH_OPARG_IN_CONSTS = {
"LOAD_CONST",
"LOAD_CLASS",
"INVOKE_FUNCTION",
"INVOKE_METHOD",
"LOAD_FIELD",
"STORE_FIELD",
"CAST",
"PRIMITIVE_BOX",
"PRIMITIVE_UNBOX",
"TP_ALLOC",
"BUILD_CHECKED_MAP",
"BUILD_CHECKED_LIST",
"PRIMITIVE_LOAD_CONST",
"LOAD_LOCAL",
"STORE_LOCAL",
"REFINE_TYPE",
"LOAD_METHOD_SUPER",
"LOAD_ATTR_SUPER",
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
"LOAD_FAST",
"STORE_FAST",
"DELETE_FAST",
}
INSTRS_WITH_OPARG_IN_NAMES = {
"LOAD_NAME",
"LOAD_GLOBAL",
"STORE_GLOBAL",
"DELETE_GLOBAL",
"STORE_NAME",
"DELETE_NAME",
"IMPORT_NAME",
"IMPORT_FROM",
"STORE_ATTR",
"LOAD_ATTR",
"DELETE_ATTR",
"LOAD_METHOD",
}
INSTRS_WITH_OPARG_IN_CLOSURE = {
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
"LOAD_CLOSURE",
}
INSTRS_WITH_BRANCHES = {
"FOR_ITER",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_NOT_EXC_MATCH",
"JUMP_IF_TRUE_OR_POP",
"POP_JUMP_IF_FALSE",
"POP_JUMP_IF_TRUE",
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
}
def opcodes_with_stack_effect(n, without=()):
all_opcodes = {
op for op, eff in opcode_cinder.opcode.stack_effects.items() if eff == n
}
for opcode in without:
assert opcode in all_opcodes, f"Opcode {opcode} not found in list"
result = all_opcodes - set(without)
assert (
len(result) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
return result
INSTRS_WITH_STACK_EFFECT_0 = opcodes_with_stack_effect(0)
INSTRS_WITH_STACK_EFFECT_0_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_0)
# WITH_EXCEPT_START expects 7 things on the stack as a precondition.
INSTRS_WITH_STACK_EFFECT_1 = opcodes_with_stack_effect(1, {"WITH_EXCEPT_START"})
INSTRS_WITH_STACK_EFFECT_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_1)
INSTRS_WITH_STACK_EFFECT_2 = opcodes_with_stack_effect(2)
INSTRS_WITH_STACK_EFFECT_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_2)
INSTRS_WITH_STACK_EFFECT_NEG_1 = opcodes_with_stack_effect(-1)
INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_1)
INSTRS_WITH_STACK_EFFECT_NEG_2 = opcodes_with_stack_effect(
-2,
)
INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_2)
# RERAISE has some preconditions about the blockstack.
INSTRS_WITH_STACK_EFFECT_NEG_3 = opcodes_with_stack_effect(-3, {"RERAISE"})
INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_3)
INSTRS_WITH_OPARG_AFFECTING_STACK = {
op
for op, eff in opcode_cinder.opcode.stack_effects.items()
if not isinstance(eff, int)
} - {
# TODO(emacs): Figure out why BUILD_SLICE is excluded.
"BUILD_SLICE",
# TODO(emacs): Figure out why FOR_ITER is excluded.
"FOR_ITER",
# TODO(emacs): Figure out why FORMAT_VALUE is excluded.
"FORMAT_VALUE",
# TODO(emacs): Figure out why INVOKE_METHOD' is excluded.
"INVOKE_METHOD",
# TODO(emacs): Figure out why JUMP_IF_X_OR_POP group is excluded.
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_TRUE_OR_POP",
# Exclude instructions that modify the blockstack.
"SETUP_ASYNC_WITH",
"SETUP_FINALLY",
"SETUP_WITH",
}
assert (
len(INSTRS_WITH_OPARG_AFFECTING_STACK) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
class FuzzerReturnTypes(enum.Enum):
SYNTAX_ERROR = 0
FUZZER_CODEGEN_ERROR = 2
ERROR_CAUGHT_BY_JIT = 3
VERIFICATION_ERROR = 4
SUCCESS = 5
def code_object_to_string(code: types.CodeType) -> str:
res = ""
res += "CODE OBJECT:\n"
stack = [(code, 0)]
while stack:
code_obj, level = stack.pop()
res += f"Code object at level {level}\n"
res += f"Bytecode: {code_obj.co_code}\n"
res += f"Formatted bytecode:\n"
bytecode = code_obj.co_code
i = 0
while i < len(bytecode):
if i % 2 == 0:
res += f"{dis.opname[bytecode[i]]} : "
else:
res += f"{bytecode[i]}, "
i += 1
res += "\n"
res += f"Consts: {code_obj.co_consts}\n"
res += f"Names: {code_obj.co_names}\n"
res += f"Varnames: {code_obj.co_varnames}\n"
res += f"Cellvars: {code_obj.co_cellvars}\n"
res += f"Freevars: {code_obj.co_freevars}\n"
for i in code_obj.co_consts:
if isinstance(i, types.CodeType):
stack.append((i, level + 1))
return res
class VerificationError(Exception):
def __init__(self, reason: str, bytecode_op: BytecodeOp = None):
super().__init__(reason, bytecode_op)
self.reason = reason
self.bytecode_op = bytecode_op
def __str__(self):
if self.bytecode_op is not None:
return f"{self.reason} for operation {self.bytecode_op.name} @ offset {self.bytecode_op.idx * CODEUNIT_SIZE}"
else:
return f"{self.reason}"
class Verifier:
def validate_code(source: types.CodeType) -> bool:
stack = [source]
while stack:
module = stack.pop()
Verifier.visit_code(module)
for i in module.co_consts:
if isinstance(i, types.CodeType):
stack.append(i)
return True
def visit_code(source: types.CodeType) -> None:
bytecode = source.co_code
Verifier.check_length(bytecode)
bytecode_list = Verifier.parse_bytecode(bytecode)
Verifier.check_opargs(source, bytecode_list)
block_map = Verifier.create_blocks(bytecode_list)
Verifier.add_successors(block_map)
Verifier.check_stack_depth(source, block_map.idx_to_block[0])
def check_length(bytecode: bytes) -> None:
# length cannot be zero or odd
if len(bytecode) <= 0:
raise VerificationError("Bytecode length cannot be zero or negative")
if len(bytecode) % CODEUNIT_SIZE != 0:
raise VerificationError("Bytecode length cannot be odd")
def parse_bytecode(bytecode: bytes) -> List[BytecodeOp]:
# Changing from bytecode into a more convenient data structure
num_instrs = len(bytecode) // CODEUNIT_SIZE
result = [None] * num_instrs
i, idx = 0, 0
while i < len(bytecode):
op = bytecode[i]
try:
name = dis.opname[op]
except IndexError:
raise VerificationError(f"Operation {op} at offset {i} out of bounds")
if name[0] == "<":
# if the opcode doesn't bind to a legitimate instruction, it will just be the number inside "<>"
raise VerificationError(f"Operation {op} at offset {i} does not exist")
result[idx] = BytecodeOp(op, bytecode[i + 1], idx, name)
if result[idx].is_branch():
if result[idx].jump_target_idx() >= num_instrs:
raise VerificationError(
f"Operation {name} can not jump out of bounds"
)
i += CODEUNIT_SIZE
idx += 1
return result
def create_blocks(instrs: List[BytecodeOp]) -> BlockMap:
# This function creates the CFG by determining an ordering for each block of bytecode
# Through analyzing the order in which they can be executed (via branches, returns, raises, and fall throughs)
# View https://bernsteinbear.com/blog/discovering-basic-blocks/ for code source and more information
# Note that the blog post uses 3.8 semantics while this code uses 3.10
block_starts = set([0])
num_instrs = len(instrs)
for instr in instrs:
if instr.is_branch():
block_starts.add(instr.next_instr_idx())
block_starts.add(instr.jump_target_idx())
elif instr.is_return() or instr.is_raise():
next_instr_idx = instr.next_instr_idx()
if next_instr_idx < num_instrs:
block_starts.add(next_instr_idx)
num_blocks = len(block_starts)
block_starts_ordered = sorted(block_starts)
block_map = BlockMap()
for i, start_idx in enumerate(block_starts_ordered):
end_idx = block_starts_ordered[i + 1] if i + 1 < num_blocks else num_instrs
block_instrs = instrs[start_idx:end_idx]
block_map.add_block(start_idx, Block(i, block_instrs))
return block_map
def add_successors(block_map: BlockMap):
# adding successors to each block prior to stack validation
for i in block_map:
last_instr = i.bytecode[-1]
if last_instr.is_branch():
i.jump_to = block_map.idx_to_block[last_instr.jump_target_idx()]
if (
last_instr.next_instr_idx() in block_map.idx_to_block
and not last_instr.is_uncond_transfer()
):
i.fall_through = block_map.idx_to_block[last_instr.next_instr_idx()]
def assert_depth_within_bounds(
depth: int, min_: int = 0, max_: int = inf, op: BytecodeOp = None
):
if not min_ <= depth:
raise VerificationError(
f"Stack depth {depth} dips below minimum of {min_}", op
)
if not max_ >= depth:
raise VerificationError(
f"Stack depth {depth} exceeds maximum of {max_}", op
)
def push_block(worklist: List[Block], block: Block, depth: int) -> bool:
# push_block ensures that we only ever re-analyze a block if we visit it when the stack depth has increased
# this way loops can be analyzed properly
if not (block.start_depth < 0 or block.start_depth >= depth):
return False
if block.start_depth < depth:
block.start_depth = depth
worklist.append(block)
return True
def check_stack_depth(source: types.CodeType, start: Block) -> None:
# makes sure the stack size never goes negative or above the limit
max_depth = source.co_stacksize
worklist = []
Verifier.push_block(worklist, start, 0)
while worklist:
block = worklist.pop()
depth = block.start_depth
for op in block.bytecode:
Verifier.assert_depth_within_bounds(depth, 0, max_depth, op)
new_depth = depth + Verifier.get_stack_effect(source, op, False)
if op.is_branch():
target_depth = depth + Verifier.get_stack_effect(source, op, True)
Verifier.assert_depth_within_bounds(target_depth, 0, max_depth, op)
Verifier.push_block(worklist, block.jump_to, target_depth)
depth = new_depth
Verifier.assert_depth_within_bounds(depth, 0, max_depth, op)
if block.fall_through:
Verifier.push_block(worklist, block.fall_through, depth)
def get_stack_effect(source: types.CodeType, op: BytecodeOp, jump: bool) -> int:
# returns the stack effect for a particular operation
effect = opcodes.opcode.stack_effects.get(op.name)
if isinstance(effect, int):
return effect
else:
# if real oparg is stored in one of the code object's tuples, use that instead
oparg_location = Verifier.resolve_oparg_location(source, op.op)
if oparg_location is not None:
return effect(oparg_location[op.arg], jump)
return effect(op.arg, jump)
def check_opargs(source: types.CodeType, ops: List[BytecodeOp]) -> None:
for op in ops:
oparg_location = Verifier.resolve_oparg_location(source, op.op)
if oparg_location is not None:
Verifier.check_oparg_location(oparg_location, op)
def resolve_oparg_location(source: types.CodeType, op: int) -> Union[List, Tuple]:
if op in Verifier.INSTRS_WITH_OPARG_IN_CONSTS:
return source.co_consts
elif op in Verifier.INSTRS_WITH_OPARG_IN_VARNAMES:
return source.co_varnames
elif op in Verifier.INSTRS_WITH_OPARG_IN_NAMES:
return source.co_names
elif op in Verifier.DEREF_INSTRS:
return [source.co_freevars, source.co_cellvars + source.co_freevars]
elif op == opcodes.opcode.LOAD_CLOSURE:
return source.co_cellvars + source.co_freevars
elif op == opcodes.opcode.COMPARE_OP:
return opcodes.opcode.CMP_OP
return None
def resolve_expected_oparg_type(op: int) -> type:
if op == opcodes.opcode.LOAD_CONST:
return object
elif op == opcodes.opcode.PRIMITIVE_LOAD_CONST:
return int
elif op in Verifier.INSTRS_WITH_OPARG_TYPE_STRING:
return str
elif op in Verifier.INSTRS_WITH_OPARG_TYPE_TUPLE:
return tuple
return None
def check_oparg_location(
oparg_location: Union[List, Tuple], op: BytecodeOp
) -> None:
if type(oparg_location) == tuple:
Verifier.check_oparg_index_and_type(oparg_location, op)
else: # deref case which has to check both freevars and closure (cellvars + freevars)
Verifier.check_oparg_index_and_type_deref_case(oparg_location, op)
def check_oparg_index_and_type(oparg_location: tuple, op: BytecodeOp) -> None:
expected_type = Verifier.resolve_expected_oparg_type(op.op)
if not 0 <= op.arg < len(oparg_location):
raise VerificationError(
f"Argument index {op.arg} out of bounds for size {len(oparg_location)}",
op,
)
if not isinstance(oparg_location[op.arg], expected_type):
raise VerificationError(
f"Incorrect oparg type of {type(oparg_location[op.arg]).__name__}, expected {expected_type.__name__}",
op,
)
def check_oparg_index_and_type_deref_case(
oparg_locations: list, op: BytecodeOp
) -> None:
expected_type = Verifier.resolve_expected_oparg_type(op.op)
freevars = oparg_locations[0]
closure = oparg_locations[1]
if not 0 <= op.arg < len(freevars) and not 0 <= op.arg < len(closure):
raise VerificationError(
f"Argument index {op.arg} out of bounds for size {len(closure)}", op
)
if not (
0 <= op.arg < len(freevars) and isinstance(freevars[op.arg], expected_type)
) and not (
0 <= op.arg < len(closure) and isinstance(closure[op.arg], expected_type)
):
raise VerificationError(
f"Incorrect oparg type, expected {expected_type.__name__}", op
)
INSTRS_WITH_OPARG_IN_CONSTS = {
opcodes.opcode.LOAD_CONST,
opcodes.opcode.LOAD_CLASS,
opcodes.opcode.INVOKE_FUNCTION,
opcodes.opcode.INVOKE_METHOD,
opcodes.opcode.LOAD_FIELD,
opcodes.opcode.STORE_FIELD,
opcodes.opcode.CAST,
opcodes.opcode.PRIMITIVE_BOX,
opcodes.opcode.PRIMITIVE_UNBOX,
opcodes.opcode.TP_ALLOC,
opcodes.opcode.BUILD_CHECKED_MAP,
opcodes.opcode.BUILD_CHECKED_LIST,
opcodes.opcode.PRIMITIVE_LOAD_CONST,
opcodes.opcode.LOAD_LOCAL,
opcodes.opcode.STORE_LOCAL,
opcodes.opcode.REFINE_TYPE,
opcodes.opcode.LOAD_METHOD_SUPER,
opcodes.opcode.LOAD_ATTR_SUPER,
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
opcodes.opcode.LOAD_FAST,
opcodes.opcode.STORE_FAST,
opcodes.opcode.DELETE_FAST,
}
INSTRS_WITH_OPARG_IN_NAMES = {
opcodes.opcode.LOAD_NAME,
opcodes.opcode.LOAD_GLOBAL,
opcodes.opcode.STORE_GLOBAL,
opcodes.opcode.DELETE_GLOBAL,
opcodes.opcode.STORE_NAME,
opcodes.opcode.DELETE_NAME,
opcodes.opcode.IMPORT_NAME,
opcodes.opcode.IMPORT_FROM,
opcodes.opcode.STORE_ATTR,
opcodes.opcode.LOAD_ATTR,
opcodes.opcode.DELETE_ATTR,
opcodes.opcode.LOAD_METHOD,
}
DEREF_INSTRS = {
opcodes.opcode.LOAD_DEREF,
opcodes.opcode.STORE_DEREF,
opcodes.opcode.DELETE_DEREF,
opcodes.opcode.LOAD_CLASSDEREF,
}
INSTRS_WITH_OPARG_TYPE_STRING = {
opcodes.opcode.LOAD_FAST,
opcodes.opcode.STORE_FAST,
opcodes.opcode.DELETE_FAST,
opcodes.opcode.LOAD_NAME,
opcodes.opcode.LOAD_CLOSURE,
opcodes.opcode.COMPARE_OP,
opcodes.opcode.LOAD_GLOBAL,
opcodes.opcode.STORE_GLOBAL,
opcodes.opcode.DELETE_GLOBAL,
opcodes.opcode.STORE_NAME,
opcodes.opcode.DELETE_NAME,
opcodes.opcode.IMPORT_NAME,
opcodes.opcode.IMPORT_FROM,
opcodes.opcode.STORE_ATTR,
opcodes.opcode.LOAD_ATTR,
opcodes.opcode.DELETE_ATTR,
opcodes.opcode.LOAD_METHOD,
opcodes.opcode.LOAD_DEREF,
opcodes.opcode.STORE_DEREF,
opcodes.opcode.DELETE_DEREF,
opcodes.opcode.LOAD_CLASSDEREF,
}
INSTRS_WITH_OPARG_TYPE_TUPLE = {
opcodes.opcode.INVOKE_FUNCTION,
opcodes.opcode.INVOKE_METHOD,
opcodes.opcode.BUILD_CHECKED_MAP,
opcodes.opcode.BUILD_CHECKED_LIST,
opcodes.opcode.LOAD_LOCAL,
opcodes.opcode.STORE_LOCAL,
opcodes.opcode.LOAD_METHOD_SUPER,
opcodes.opcode.LOAD_ATTR_SUPER,
opcodes.opcode.TP_ALLOC,
opcodes.opcode.PRIMITIVE_BOX,
opcodes.opcode.PRIMITIVE_UNBOX,
opcodes.opcode.LOAD_CLASS,
opcodes.opcode.LOAD_FIELD,
opcodes.opcode.STORE_FIELD,
opcodes.opcode.CAST,
opcodes.opcode.REFINE_TYPE,
}
def fuzzer_compile(code_str: str) -> tuple:
# wrap code in a wrapper function for jit compilation
wrapped_code_str = "def wrapper_function():\n" + textwrap.indent(code_str, " ")
# compile code with the Fuzzer as its codegenerator
try:
code = compile(wrapped_code_str, "", "exec", compiler=Fuzzer)
except SyntaxError:
return (None, FuzzerReturnTypes.SYNTAX_ERROR)
except (AssertionError, AttributeError, IndexError, KeyError, ValueError):
# indicates an error during code generation
# meaning the fuzzer has modified the code in a way which
# does not allow the creation of a code object
# ideally these types of errors are minimal
return (None, FuzzerReturnTypes.FUZZER_CODEGEN_ERROR)
# print code object to stdout so it is present in the output file
print(code_object_to_string(code))
# Run through the verifier
try:
Verifier.validate_code(code)
except VerificationError:
return (code, FuzzerReturnTypes.VERIFICATION_ERROR)
# create a function object from the code object
func = types.FunctionType(code.co_consts[0], {})
# jit compile the function
try:
jit_compiled_function = cinderjit.force_compile(func)
except RuntimeError:
return (code, FuzzerReturnTypes.ERROR_CAUGHT_BY_JIT)
return (code, FuzzerReturnTypes.SUCCESS) | null |
185,755 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
def replace_closure_var(
name: str,
randomized_name: str,
ioparg: int,
freevars: pyassem.IndexedSet,
cellvars: pyassem.IndexedSet,
) -> int:
if name in freevars:
del freevars.keys[name]
return freevars.get_index(randomized_name)
else:
del cellvars.keys[name]
return cellvars.get_index(randomized_name) | null |
185,756 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
def replace_name_var(
name: str, randomized_name: str, location: pyassem.IndexedSet
) -> int:
if name in location:
del location.keys[name]
return location.get_index(randomized_name) | null |
185,757 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
def replace_const_var(
old_key: tuple,
new_key: tuple,
consts: dict,
) -> int:
oparg_index = consts[old_key]
del consts[old_key]
consts[new_key] = oparg_index
return oparg_index | null |
185,758 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
OPARG_LOWER_BOUND = 0
OPARG_UPPER_BOUND = 2**32 - 1
CMP_OP_LENGTH = len(opcode_cinder.opcode.CMP_OP) - 1
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flow_graph = PyFlowGraphFuzzer
self.oparg_randomizations = {}
# overriding to set definitions
def _setupGraphDelegation(self):
self.emitWithBlock = self.graph.emitWithBlock
self.newBlock = self.graph.newBlock
self.nextBlock = self.graph.nextBlock
# Overriding emit call to fuzz certain opargs stored in names, varnames, consts
# Will update to fuzz more types of opargs, and fuzz instructions as well
def emit(self, opcode: str, oparg: object = 0) -> None:
self.graph.maybeEmitSetLineno()
if opcode != "SET_LINENO" and isinstance(oparg, pyassem.Block):
if not self.graph.do_not_emit_bytecode:
self.graph.current.addOutEdge(oparg)
self.graph.current.emit(pyassem.Instruction(opcode, 0, 0, target=oparg))
return
ioparg = self.graph.convertArg(opcode, oparg)
randomized_opcode = randomize_opcode(opcode)
"""
# We can fuzz opcodes if 3 conditions are met
# 1. randomized_opcode != opcode (certain opcodes are left unrandomized, such as branch instructions)
# 2. we can safely replace the original oparg with a new one (for the new instruction)
without the length of a tuple (i.e. co_names, co_varnames) hitting zero (or it will fail assertions)
# 3. random chance based on INSTR_RANDOMIZATION_CHANCE
"""
if (
random.randint(1, 100) <= INSTR_RANDOMIZATION_CHANCE
and randomized_opcode != opcode
and can_replace_oparg(
opcode,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.closure,
)
):
# if we are fuzzing this opcode
# create a new oparg corresponding to that opcode
# and emit
new_oparg = generate_oparg_for_randomized_opcode(
opcode,
randomized_opcode,
oparg,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.freevars,
self.graph.cellvars,
)
# get new ioparg
ioparg = self.graph.convertArg(randomized_opcode, new_oparg)
self.graph.current.emit(
pyassem.Instruction(randomized_opcode, new_oparg, ioparg)
)
else:
# otherwise, just randomize the oparg and emit
self.randomize_oparg(opcode, oparg, ioparg)
if opcode == "SET_LINENO" and not self.graph.first_inst_lineno:
self.graph.first_inst_lineno = ioparg
# randomizes an existing oparg and emits an instruction with the randomized oparg and ioparg
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
if not self.graph.do_not_emit_bytecode:
# storing oparg to randomized version as a key value pair
if oparg in self.oparg_randomizations:
randomized_oparg = self.oparg_randomizations[oparg]
else:
randomized_oparg = randomize_variable(oparg)
self.oparg_randomizations[oparg] = randomized_oparg
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.names)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.varnames)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif (
opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I randomize the oparg to a LOAD_CONST the code object generation could fail
# Therefore it is not being randomized at the moment
and opcode != "LOAD_CONST"
):
ioparg = replace_const_var(
self.graph.get_const_key(oparg),
self.graph.get_const_key(randomized_oparg),
self.graph.consts,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
ioparg = replace_closure_var(
oparg,
randomized_oparg,
ioparg,
self.graph.freevars,
self.graph.cellvars,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
else:
ioparg = generate_random_ioparg(opcode, ioparg)
self.graph.current.emit(pyassem.Instruction(opcode, oparg, ioparg))
INSTRS_WITH_OPARG_IN_CONSTS = {
"LOAD_CONST",
"LOAD_CLASS",
"INVOKE_FUNCTION",
"INVOKE_METHOD",
"LOAD_FIELD",
"STORE_FIELD",
"CAST",
"PRIMITIVE_BOX",
"PRIMITIVE_UNBOX",
"TP_ALLOC",
"BUILD_CHECKED_MAP",
"BUILD_CHECKED_LIST",
"PRIMITIVE_LOAD_CONST",
"LOAD_LOCAL",
"STORE_LOCAL",
"REFINE_TYPE",
"LOAD_METHOD_SUPER",
"LOAD_ATTR_SUPER",
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
"LOAD_FAST",
"STORE_FAST",
"DELETE_FAST",
}
INSTRS_WITH_OPARG_IN_NAMES = {
"LOAD_NAME",
"LOAD_GLOBAL",
"STORE_GLOBAL",
"DELETE_GLOBAL",
"STORE_NAME",
"DELETE_NAME",
"IMPORT_NAME",
"IMPORT_FROM",
"STORE_ATTR",
"LOAD_ATTR",
"DELETE_ATTR",
"LOAD_METHOD",
}
INSTRS_WITH_OPARG_IN_CLOSURE = {
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
"LOAD_CLOSURE",
}
INSTRS_WITH_BRANCHES = {
"FOR_ITER",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_NOT_EXC_MATCH",
"JUMP_IF_TRUE_OR_POP",
"POP_JUMP_IF_FALSE",
"POP_JUMP_IF_TRUE",
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
}
def opcodes_with_stack_effect(n, without=()):
all_opcodes = {
op for op, eff in opcode_cinder.opcode.stack_effects.items() if eff == n
}
for opcode in without:
assert opcode in all_opcodes, f"Opcode {opcode} not found in list"
result = all_opcodes - set(without)
assert (
len(result) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
return result
INSTRS_WITH_STACK_EFFECT_0 = opcodes_with_stack_effect(0)
INSTRS_WITH_STACK_EFFECT_0_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_0)
# WITH_EXCEPT_START expects 7 things on the stack as a precondition.
INSTRS_WITH_STACK_EFFECT_1 = opcodes_with_stack_effect(1, {"WITH_EXCEPT_START"})
INSTRS_WITH_STACK_EFFECT_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_1)
INSTRS_WITH_STACK_EFFECT_2 = opcodes_with_stack_effect(2)
INSTRS_WITH_STACK_EFFECT_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_2)
INSTRS_WITH_STACK_EFFECT_NEG_1 = opcodes_with_stack_effect(-1)
INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_1)
INSTRS_WITH_STACK_EFFECT_NEG_2 = opcodes_with_stack_effect(
-2,
)
INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_2)
# RERAISE has some preconditions about the blockstack.
INSTRS_WITH_STACK_EFFECT_NEG_3 = opcodes_with_stack_effect(-3, {"RERAISE"})
INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_3)
INSTRS_WITH_OPARG_AFFECTING_STACK = {
op
for op, eff in opcode_cinder.opcode.stack_effects.items()
if not isinstance(eff, int)
} - {
# TODO(emacs): Figure out why BUILD_SLICE is excluded.
"BUILD_SLICE",
# TODO(emacs): Figure out why FOR_ITER is excluded.
"FOR_ITER",
# TODO(emacs): Figure out why FORMAT_VALUE is excluded.
"FORMAT_VALUE",
# TODO(emacs): Figure out why INVOKE_METHOD' is excluded.
"INVOKE_METHOD",
# TODO(emacs): Figure out why JUMP_IF_X_OR_POP group is excluded.
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_TRUE_OR_POP",
# Exclude instructions that modify the blockstack.
"SETUP_ASYNC_WITH",
"SETUP_FINALLY",
"SETUP_WITH",
}
assert (
len(INSTRS_WITH_OPARG_AFFECTING_STACK) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
def generate_random_integer(original: int, lower: int, upper: int) -> int:
random_int = original
while random_int == original:
random_int = random.randint(lower, upper)
return random_int
def generate_random_ioparg(opcode: str, ioparg: int):
if (
opcode in Fuzzer.INSTRS_WITH_BRANCHES
or opcode in Fuzzer.INSTRS_WITH_OPARG_AFFECTING_STACK
or opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
):
return ioparg
elif opcode == "COMPARE_OP":
return generate_random_integer(ioparg, 0, CMP_OP_LENGTH)
return generate_random_integer(ioparg, OPARG_LOWER_BOUND, OPARG_UPPER_BOUND) | null |
185,759 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
def _setupGraphDelegation(self):
def emit(self, opcode: str, oparg: object = 0) -> None:
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
def opcodes_with_stack_effect(n, without=()):
def generate_random_opcode(opcode: str, options: set) -> str:
def randomize_opcode(opcode: str) -> str:
if (
opcode in Fuzzer.INSTRS_WITH_BRANCHES
or opcode in Fuzzer.INSTRS_WITH_OPARG_AFFECTING_STACK
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I replace LOAD_CONST instructions the code object generation can fail
# Therefore it is not being replaced at the moment
or opcode == "LOAD_CONST"
):
return opcode
stack_depth_sets = (
Fuzzer.INSTRS_WITH_STACK_EFFECT_0,
Fuzzer.INSTRS_WITH_STACK_EFFECT_1,
Fuzzer.INSTRS_WITH_STACK_EFFECT_2,
Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_1,
Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_2,
Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_3,
)
for stack_depth_set in stack_depth_sets:
if opcode in stack_depth_set:
return generate_random_opcode(opcode, stack_depth_set)
return opcode | null |
185,760 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flow_graph = PyFlowGraphFuzzer
self.oparg_randomizations = {}
# overriding to set definitions
def _setupGraphDelegation(self):
self.emitWithBlock = self.graph.emitWithBlock
self.newBlock = self.graph.newBlock
self.nextBlock = self.graph.nextBlock
# Overriding emit call to fuzz certain opargs stored in names, varnames, consts
# Will update to fuzz more types of opargs, and fuzz instructions as well
def emit(self, opcode: str, oparg: object = 0) -> None:
self.graph.maybeEmitSetLineno()
if opcode != "SET_LINENO" and isinstance(oparg, pyassem.Block):
if not self.graph.do_not_emit_bytecode:
self.graph.current.addOutEdge(oparg)
self.graph.current.emit(pyassem.Instruction(opcode, 0, 0, target=oparg))
return
ioparg = self.graph.convertArg(opcode, oparg)
randomized_opcode = randomize_opcode(opcode)
"""
# We can fuzz opcodes if 3 conditions are met
# 1. randomized_opcode != opcode (certain opcodes are left unrandomized, such as branch instructions)
# 2. we can safely replace the original oparg with a new one (for the new instruction)
without the length of a tuple (i.e. co_names, co_varnames) hitting zero (or it will fail assertions)
# 3. random chance based on INSTR_RANDOMIZATION_CHANCE
"""
if (
random.randint(1, 100) <= INSTR_RANDOMIZATION_CHANCE
and randomized_opcode != opcode
and can_replace_oparg(
opcode,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.closure,
)
):
# if we are fuzzing this opcode
# create a new oparg corresponding to that opcode
# and emit
new_oparg = generate_oparg_for_randomized_opcode(
opcode,
randomized_opcode,
oparg,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.freevars,
self.graph.cellvars,
)
# get new ioparg
ioparg = self.graph.convertArg(randomized_opcode, new_oparg)
self.graph.current.emit(
pyassem.Instruction(randomized_opcode, new_oparg, ioparg)
)
else:
# otherwise, just randomize the oparg and emit
self.randomize_oparg(opcode, oparg, ioparg)
if opcode == "SET_LINENO" and not self.graph.first_inst_lineno:
self.graph.first_inst_lineno = ioparg
# randomizes an existing oparg and emits an instruction with the randomized oparg and ioparg
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
if not self.graph.do_not_emit_bytecode:
# storing oparg to randomized version as a key value pair
if oparg in self.oparg_randomizations:
randomized_oparg = self.oparg_randomizations[oparg]
else:
randomized_oparg = randomize_variable(oparg)
self.oparg_randomizations[oparg] = randomized_oparg
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.names)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.varnames)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif (
opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I randomize the oparg to a LOAD_CONST the code object generation could fail
# Therefore it is not being randomized at the moment
and opcode != "LOAD_CONST"
):
ioparg = replace_const_var(
self.graph.get_const_key(oparg),
self.graph.get_const_key(randomized_oparg),
self.graph.consts,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
ioparg = replace_closure_var(
oparg,
randomized_oparg,
ioparg,
self.graph.freevars,
self.graph.cellvars,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
else:
ioparg = generate_random_ioparg(opcode, ioparg)
self.graph.current.emit(pyassem.Instruction(opcode, oparg, ioparg))
INSTRS_WITH_OPARG_IN_CONSTS = {
"LOAD_CONST",
"LOAD_CLASS",
"INVOKE_FUNCTION",
"INVOKE_METHOD",
"LOAD_FIELD",
"STORE_FIELD",
"CAST",
"PRIMITIVE_BOX",
"PRIMITIVE_UNBOX",
"TP_ALLOC",
"BUILD_CHECKED_MAP",
"BUILD_CHECKED_LIST",
"PRIMITIVE_LOAD_CONST",
"LOAD_LOCAL",
"STORE_LOCAL",
"REFINE_TYPE",
"LOAD_METHOD_SUPER",
"LOAD_ATTR_SUPER",
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
"LOAD_FAST",
"STORE_FAST",
"DELETE_FAST",
}
INSTRS_WITH_OPARG_IN_NAMES = {
"LOAD_NAME",
"LOAD_GLOBAL",
"STORE_GLOBAL",
"DELETE_GLOBAL",
"STORE_NAME",
"DELETE_NAME",
"IMPORT_NAME",
"IMPORT_FROM",
"STORE_ATTR",
"LOAD_ATTR",
"DELETE_ATTR",
"LOAD_METHOD",
}
INSTRS_WITH_OPARG_IN_CLOSURE = {
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
"LOAD_CLOSURE",
}
INSTRS_WITH_BRANCHES = {
"FOR_ITER",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_NOT_EXC_MATCH",
"JUMP_IF_TRUE_OR_POP",
"POP_JUMP_IF_FALSE",
"POP_JUMP_IF_TRUE",
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
}
def opcodes_with_stack_effect(n, without=()):
all_opcodes = {
op for op, eff in opcode_cinder.opcode.stack_effects.items() if eff == n
}
for opcode in without:
assert opcode in all_opcodes, f"Opcode {opcode} not found in list"
result = all_opcodes - set(without)
assert (
len(result) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
return result
INSTRS_WITH_STACK_EFFECT_0 = opcodes_with_stack_effect(0)
INSTRS_WITH_STACK_EFFECT_0_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_0)
# WITH_EXCEPT_START expects 7 things on the stack as a precondition.
INSTRS_WITH_STACK_EFFECT_1 = opcodes_with_stack_effect(1, {"WITH_EXCEPT_START"})
INSTRS_WITH_STACK_EFFECT_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_1)
INSTRS_WITH_STACK_EFFECT_2 = opcodes_with_stack_effect(2)
INSTRS_WITH_STACK_EFFECT_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_2)
INSTRS_WITH_STACK_EFFECT_NEG_1 = opcodes_with_stack_effect(-1)
INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_1)
INSTRS_WITH_STACK_EFFECT_NEG_2 = opcodes_with_stack_effect(
-2,
)
INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_2)
# RERAISE has some preconditions about the blockstack.
INSTRS_WITH_STACK_EFFECT_NEG_3 = opcodes_with_stack_effect(-3, {"RERAISE"})
INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_3)
INSTRS_WITH_OPARG_AFFECTING_STACK = {
op
for op, eff in opcode_cinder.opcode.stack_effects.items()
if not isinstance(eff, int)
} - {
# TODO(emacs): Figure out why BUILD_SLICE is excluded.
"BUILD_SLICE",
# TODO(emacs): Figure out why FOR_ITER is excluded.
"FOR_ITER",
# TODO(emacs): Figure out why FORMAT_VALUE is excluded.
"FORMAT_VALUE",
# TODO(emacs): Figure out why INVOKE_METHOD' is excluded.
"INVOKE_METHOD",
# TODO(emacs): Figure out why JUMP_IF_X_OR_POP group is excluded.
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_TRUE_OR_POP",
# Exclude instructions that modify the blockstack.
"SETUP_ASYNC_WITH",
"SETUP_FINALLY",
"SETUP_WITH",
}
assert (
len(INSTRS_WITH_OPARG_AFFECTING_STACK) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
def can_replace_oparg(
opcode: str,
consts: dict,
names: pyassem.IndexedSet,
varnames: pyassem.IndexedSet,
closure: pyassem.IndexedSet,
):
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS:
return len(consts) > 1
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
return len(names) > 1
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
return len(varnames) > 1
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
return len(closure) > 1
else:
return True | null |
185,761 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
OPARG_LOWER_BOUND = 0
OPARG_UPPER_BOUND = 2**32 - 1
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flow_graph = PyFlowGraphFuzzer
self.oparg_randomizations = {}
# overriding to set definitions
def _setupGraphDelegation(self):
self.emitWithBlock = self.graph.emitWithBlock
self.newBlock = self.graph.newBlock
self.nextBlock = self.graph.nextBlock
# Overriding emit call to fuzz certain opargs stored in names, varnames, consts
# Will update to fuzz more types of opargs, and fuzz instructions as well
def emit(self, opcode: str, oparg: object = 0) -> None:
self.graph.maybeEmitSetLineno()
if opcode != "SET_LINENO" and isinstance(oparg, pyassem.Block):
if not self.graph.do_not_emit_bytecode:
self.graph.current.addOutEdge(oparg)
self.graph.current.emit(pyassem.Instruction(opcode, 0, 0, target=oparg))
return
ioparg = self.graph.convertArg(opcode, oparg)
randomized_opcode = randomize_opcode(opcode)
"""
# We can fuzz opcodes if 3 conditions are met
# 1. randomized_opcode != opcode (certain opcodes are left unrandomized, such as branch instructions)
# 2. we can safely replace the original oparg with a new one (for the new instruction)
without the length of a tuple (i.e. co_names, co_varnames) hitting zero (or it will fail assertions)
# 3. random chance based on INSTR_RANDOMIZATION_CHANCE
"""
if (
random.randint(1, 100) <= INSTR_RANDOMIZATION_CHANCE
and randomized_opcode != opcode
and can_replace_oparg(
opcode,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.closure,
)
):
# if we are fuzzing this opcode
# create a new oparg corresponding to that opcode
# and emit
new_oparg = generate_oparg_for_randomized_opcode(
opcode,
randomized_opcode,
oparg,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.freevars,
self.graph.cellvars,
)
# get new ioparg
ioparg = self.graph.convertArg(randomized_opcode, new_oparg)
self.graph.current.emit(
pyassem.Instruction(randomized_opcode, new_oparg, ioparg)
)
else:
# otherwise, just randomize the oparg and emit
self.randomize_oparg(opcode, oparg, ioparg)
if opcode == "SET_LINENO" and not self.graph.first_inst_lineno:
self.graph.first_inst_lineno = ioparg
# randomizes an existing oparg and emits an instruction with the randomized oparg and ioparg
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
if not self.graph.do_not_emit_bytecode:
# storing oparg to randomized version as a key value pair
if oparg in self.oparg_randomizations:
randomized_oparg = self.oparg_randomizations[oparg]
else:
randomized_oparg = randomize_variable(oparg)
self.oparg_randomizations[oparg] = randomized_oparg
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.names)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.varnames)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif (
opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I randomize the oparg to a LOAD_CONST the code object generation could fail
# Therefore it is not being randomized at the moment
and opcode != "LOAD_CONST"
):
ioparg = replace_const_var(
self.graph.get_const_key(oparg),
self.graph.get_const_key(randomized_oparg),
self.graph.consts,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
ioparg = replace_closure_var(
oparg,
randomized_oparg,
ioparg,
self.graph.freevars,
self.graph.cellvars,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
else:
ioparg = generate_random_ioparg(opcode, ioparg)
self.graph.current.emit(pyassem.Instruction(opcode, oparg, ioparg))
INSTRS_WITH_OPARG_IN_CONSTS = {
"LOAD_CONST",
"LOAD_CLASS",
"INVOKE_FUNCTION",
"INVOKE_METHOD",
"LOAD_FIELD",
"STORE_FIELD",
"CAST",
"PRIMITIVE_BOX",
"PRIMITIVE_UNBOX",
"TP_ALLOC",
"BUILD_CHECKED_MAP",
"BUILD_CHECKED_LIST",
"PRIMITIVE_LOAD_CONST",
"LOAD_LOCAL",
"STORE_LOCAL",
"REFINE_TYPE",
"LOAD_METHOD_SUPER",
"LOAD_ATTR_SUPER",
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
"LOAD_FAST",
"STORE_FAST",
"DELETE_FAST",
}
INSTRS_WITH_OPARG_IN_NAMES = {
"LOAD_NAME",
"LOAD_GLOBAL",
"STORE_GLOBAL",
"DELETE_GLOBAL",
"STORE_NAME",
"DELETE_NAME",
"IMPORT_NAME",
"IMPORT_FROM",
"STORE_ATTR",
"LOAD_ATTR",
"DELETE_ATTR",
"LOAD_METHOD",
}
INSTRS_WITH_OPARG_IN_CLOSURE = {
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
"LOAD_CLOSURE",
}
INSTRS_WITH_BRANCHES = {
"FOR_ITER",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_NOT_EXC_MATCH",
"JUMP_IF_TRUE_OR_POP",
"POP_JUMP_IF_FALSE",
"POP_JUMP_IF_TRUE",
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
}
def opcodes_with_stack_effect(n, without=()):
all_opcodes = {
op for op, eff in opcode_cinder.opcode.stack_effects.items() if eff == n
}
for opcode in without:
assert opcode in all_opcodes, f"Opcode {opcode} not found in list"
result = all_opcodes - set(without)
assert (
len(result) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
return result
INSTRS_WITH_STACK_EFFECT_0 = opcodes_with_stack_effect(0)
INSTRS_WITH_STACK_EFFECT_0_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_0)
# WITH_EXCEPT_START expects 7 things on the stack as a precondition.
INSTRS_WITH_STACK_EFFECT_1 = opcodes_with_stack_effect(1, {"WITH_EXCEPT_START"})
INSTRS_WITH_STACK_EFFECT_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_1)
INSTRS_WITH_STACK_EFFECT_2 = opcodes_with_stack_effect(2)
INSTRS_WITH_STACK_EFFECT_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_2)
INSTRS_WITH_STACK_EFFECT_NEG_1 = opcodes_with_stack_effect(-1)
INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_1)
INSTRS_WITH_STACK_EFFECT_NEG_2 = opcodes_with_stack_effect(
-2,
)
INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_2)
# RERAISE has some preconditions about the blockstack.
INSTRS_WITH_STACK_EFFECT_NEG_3 = opcodes_with_stack_effect(-3, {"RERAISE"})
INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_3)
INSTRS_WITH_OPARG_AFFECTING_STACK = {
op
for op, eff in opcode_cinder.opcode.stack_effects.items()
if not isinstance(eff, int)
} - {
# TODO(emacs): Figure out why BUILD_SLICE is excluded.
"BUILD_SLICE",
# TODO(emacs): Figure out why FOR_ITER is excluded.
"FOR_ITER",
# TODO(emacs): Figure out why FORMAT_VALUE is excluded.
"FORMAT_VALUE",
# TODO(emacs): Figure out why INVOKE_METHOD' is excluded.
"INVOKE_METHOD",
# TODO(emacs): Figure out why JUMP_IF_X_OR_POP group is excluded.
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_TRUE_OR_POP",
# Exclude instructions that modify the blockstack.
"SETUP_ASYNC_WITH",
"SETUP_FINALLY",
"SETUP_WITH",
}
assert (
len(INSTRS_WITH_OPARG_AFFECTING_STACK) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
def randomize_variable(var: object) -> object:
if isinstance(var, str):
return generate_random_string(var, STR_LEN_LOWER_BOUND, STR_LEN_UPPER_BOUND)
elif isinstance(var, int):
return generate_random_integer(var, INT_LOWER_BOUND, INT_UPPER_BOUND)
elif isinstance(var, tuple):
return tuple(randomize_variable(i) for i in var)
elif isinstance(var, frozenset):
return frozenset(randomize_variable(i) for i in var)
else:
return var
def generate_random_integer(original: int, lower: int, upper: int) -> int:
random_int = original
while random_int == original:
random_int = random.randint(lower, upper)
return random_int
def get_const_key(value: object):
if isinstance(value, float):
return type(value), value, pyassem.sign(value)
elif isinstance(value, complex):
return type(value), value, pyassem.sign(value.real), pyassem.sign(value.imag)
elif isinstance(value, (tuple, frozenset)):
return (
type(value),
value,
tuple(get_const_key(const) for const in value),
)
return type(value), value
def generate_oparg_for_randomized_opcode(
original_opcode: str,
randomized_opcode: str,
oparg: object,
consts: dict,
names: pyassem.IndexedSet,
varnames: pyassem.IndexedSet,
freevars: pyassem.IndexedSet,
cellvars: pyassem.IndexedSet,
) -> object:
# delete the original oparg
if original_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS:
del consts[get_const_key(oparg)]
elif original_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
del names.keys[oparg]
elif original_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
del varnames.keys[oparg]
elif original_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
if oparg in freevars:
del freevars.keys[oparg]
else:
del cellvars.keys[oparg]
# replace with a new oparg that corresponds with the new instruction
if randomized_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS:
new_oparg = randomize_variable(oparg)
consts[get_const_key(new_oparg)] = len(consts)
return new_oparg
elif randomized_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
new_oparg = randomize_variable("") # random string
names.get_index(new_oparg)
return new_oparg
elif randomized_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
new_oparg = randomize_variable("")
varnames.get_index(new_oparg)
return new_oparg
elif randomized_opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
new_oparg = randomize_variable("")
freevars.get_index(new_oparg)
return new_oparg
elif randomized_opcode == "GEN_START":
# oparg must be < 3 according to an assert in ceval.c
return generate_random_integer(-1, 0, 3)
else:
# if it isn't in one of the tuples, just return a random integer within oparg bounds
return generate_random_integer(-1, OPARG_LOWER_BOUND, OPARG_UPPER_BOUND) | null |
185,762 | import argparse
import dis
import enum
import random
import string
import sys
import textwrap
import types
from cinderx.compiler import compile, opcode_cinder, pyassem, pycodegen, symbols
from cinderx.compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_VARARGS,
CO_VARKEYWORDS,
PyCF_MASK_OBSOLETE,
PyCF_ONLY_AST,
PyCF_SOURCE_IS_UTF8,
SC_CELL,
SC_FREE,
SC_GLOBAL_EXPLICIT,
SC_GLOBAL_IMPLICIT,
SC_LOCAL,
)
from verifier import VerificationError, Verifier
OPARG_LOWER_BOUND = 0
OPARG_UPPER_BOUND = 2**32 - 1
class Fuzzer(pycodegen.CinderCodeGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flow_graph = PyFlowGraphFuzzer
self.oparg_randomizations = {}
# overriding to set definitions
def _setupGraphDelegation(self):
self.emitWithBlock = self.graph.emitWithBlock
self.newBlock = self.graph.newBlock
self.nextBlock = self.graph.nextBlock
# Overriding emit call to fuzz certain opargs stored in names, varnames, consts
# Will update to fuzz more types of opargs, and fuzz instructions as well
def emit(self, opcode: str, oparg: object = 0) -> None:
self.graph.maybeEmitSetLineno()
if opcode != "SET_LINENO" and isinstance(oparg, pyassem.Block):
if not self.graph.do_not_emit_bytecode:
self.graph.current.addOutEdge(oparg)
self.graph.current.emit(pyassem.Instruction(opcode, 0, 0, target=oparg))
return
ioparg = self.graph.convertArg(opcode, oparg)
randomized_opcode = randomize_opcode(opcode)
"""
# We can fuzz opcodes if 3 conditions are met
# 1. randomized_opcode != opcode (certain opcodes are left unrandomized, such as branch instructions)
# 2. we can safely replace the original oparg with a new one (for the new instruction)
without the length of a tuple (i.e. co_names, co_varnames) hitting zero (or it will fail assertions)
# 3. random chance based on INSTR_RANDOMIZATION_CHANCE
"""
if (
random.randint(1, 100) <= INSTR_RANDOMIZATION_CHANCE
and randomized_opcode != opcode
and can_replace_oparg(
opcode,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.closure,
)
):
# if we are fuzzing this opcode
# create a new oparg corresponding to that opcode
# and emit
new_oparg = generate_oparg_for_randomized_opcode(
opcode,
randomized_opcode,
oparg,
self.graph.consts,
self.graph.names,
self.graph.varnames,
self.graph.freevars,
self.graph.cellvars,
)
# get new ioparg
ioparg = self.graph.convertArg(randomized_opcode, new_oparg)
self.graph.current.emit(
pyassem.Instruction(randomized_opcode, new_oparg, ioparg)
)
else:
# otherwise, just randomize the oparg and emit
self.randomize_oparg(opcode, oparg, ioparg)
if opcode == "SET_LINENO" and not self.graph.first_inst_lineno:
self.graph.first_inst_lineno = ioparg
# randomizes an existing oparg and emits an instruction with the randomized oparg and ioparg
def randomize_oparg(self, opcode: str, oparg: object, ioparg: int) -> None:
if not self.graph.do_not_emit_bytecode:
# storing oparg to randomized version as a key value pair
if oparg in self.oparg_randomizations:
randomized_oparg = self.oparg_randomizations[oparg]
else:
randomized_oparg = randomize_variable(oparg)
self.oparg_randomizations[oparg] = randomized_oparg
if opcode in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.names)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
ioparg = replace_name_var(oparg, randomized_oparg, self.graph.varnames)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif (
opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS
# LOAD_CONST often has embedded code objects or a code generator as its oparg
# If I randomize the oparg to a LOAD_CONST the code object generation could fail
# Therefore it is not being randomized at the moment
and opcode != "LOAD_CONST"
):
ioparg = replace_const_var(
self.graph.get_const_key(oparg),
self.graph.get_const_key(randomized_oparg),
self.graph.consts,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
elif opcode in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
ioparg = replace_closure_var(
oparg,
randomized_oparg,
ioparg,
self.graph.freevars,
self.graph.cellvars,
)
self.graph.current.emit(
pyassem.Instruction(opcode, randomized_oparg, ioparg)
)
else:
ioparg = generate_random_ioparg(opcode, ioparg)
self.graph.current.emit(pyassem.Instruction(opcode, oparg, ioparg))
INSTRS_WITH_OPARG_IN_CONSTS = {
"LOAD_CONST",
"LOAD_CLASS",
"INVOKE_FUNCTION",
"INVOKE_METHOD",
"LOAD_FIELD",
"STORE_FIELD",
"CAST",
"PRIMITIVE_BOX",
"PRIMITIVE_UNBOX",
"TP_ALLOC",
"BUILD_CHECKED_MAP",
"BUILD_CHECKED_LIST",
"PRIMITIVE_LOAD_CONST",
"LOAD_LOCAL",
"STORE_LOCAL",
"REFINE_TYPE",
"LOAD_METHOD_SUPER",
"LOAD_ATTR_SUPER",
}
INSTRS_WITH_OPARG_IN_VARNAMES = {
"LOAD_FAST",
"STORE_FAST",
"DELETE_FAST",
}
INSTRS_WITH_OPARG_IN_NAMES = {
"LOAD_NAME",
"LOAD_GLOBAL",
"STORE_GLOBAL",
"DELETE_GLOBAL",
"STORE_NAME",
"DELETE_NAME",
"IMPORT_NAME",
"IMPORT_FROM",
"STORE_ATTR",
"LOAD_ATTR",
"DELETE_ATTR",
"LOAD_METHOD",
}
INSTRS_WITH_OPARG_IN_CLOSURE = {
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
"LOAD_CLOSURE",
}
INSTRS_WITH_BRANCHES = {
"FOR_ITER",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_NOT_EXC_MATCH",
"JUMP_IF_TRUE_OR_POP",
"POP_JUMP_IF_FALSE",
"POP_JUMP_IF_TRUE",
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"JUMP_ABSOLUTE",
"JUMP_FORWARD",
}
def opcodes_with_stack_effect(n, without=()):
all_opcodes = {
op for op, eff in opcode_cinder.opcode.stack_effects.items() if eff == n
}
for opcode in without:
assert opcode in all_opcodes, f"Opcode {opcode} not found in list"
result = all_opcodes - set(without)
assert (
len(result) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
return result
INSTRS_WITH_STACK_EFFECT_0 = opcodes_with_stack_effect(0)
INSTRS_WITH_STACK_EFFECT_0_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_0)
# WITH_EXCEPT_START expects 7 things on the stack as a precondition.
INSTRS_WITH_STACK_EFFECT_1 = opcodes_with_stack_effect(1, {"WITH_EXCEPT_START"})
INSTRS_WITH_STACK_EFFECT_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_1)
INSTRS_WITH_STACK_EFFECT_2 = opcodes_with_stack_effect(2)
INSTRS_WITH_STACK_EFFECT_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_2)
INSTRS_WITH_STACK_EFFECT_NEG_1 = opcodes_with_stack_effect(-1)
INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_1)
INSTRS_WITH_STACK_EFFECT_NEG_2 = opcodes_with_stack_effect(
-2,
)
INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_2)
# RERAISE has some preconditions about the blockstack.
INSTRS_WITH_STACK_EFFECT_NEG_3 = opcodes_with_stack_effect(-3, {"RERAISE"})
INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ = tuple(INSTRS_WITH_STACK_EFFECT_NEG_3)
INSTRS_WITH_OPARG_AFFECTING_STACK = {
op
for op, eff in opcode_cinder.opcode.stack_effects.items()
if not isinstance(eff, int)
} - {
# TODO(emacs): Figure out why BUILD_SLICE is excluded.
"BUILD_SLICE",
# TODO(emacs): Figure out why FOR_ITER is excluded.
"FOR_ITER",
# TODO(emacs): Figure out why FORMAT_VALUE is excluded.
"FORMAT_VALUE",
# TODO(emacs): Figure out why INVOKE_METHOD' is excluded.
"INVOKE_METHOD",
# TODO(emacs): Figure out why JUMP_IF_X_OR_POP group is excluded.
"JUMP_IF_FALSE_OR_POP",
"JUMP_IF_TRUE_OR_POP",
# Exclude instructions that modify the blockstack.
"SETUP_ASYNC_WITH",
"SETUP_FINALLY",
"SETUP_WITH",
}
assert (
len(INSTRS_WITH_OPARG_AFFECTING_STACK) > 1
), "Not enough opcodes in list to prevent unbounded recursion"
def randomize_variable(var: object) -> object:
if isinstance(var, str):
return generate_random_string(var, STR_LEN_LOWER_BOUND, STR_LEN_UPPER_BOUND)
elif isinstance(var, int):
return generate_random_integer(var, INT_LOWER_BOUND, INT_UPPER_BOUND)
elif isinstance(var, tuple):
return tuple(randomize_variable(i) for i in var)
elif isinstance(var, frozenset):
return frozenset(randomize_variable(i) for i in var)
else:
return var
def generate_random_integer(original: int, lower: int, upper: int) -> int:
random_int = original
while random_int == original:
random_int = random.randint(lower, upper)
return random_int
def get_const_key(value: object):
if isinstance(value, float):
return type(value), value, pyassem.sign(value)
elif isinstance(value, complex):
return type(value), value, pyassem.sign(value.real), pyassem.sign(value.imag)
elif isinstance(value, (tuple, frozenset)):
return (
type(value),
value,
tuple(get_const_key(const) for const in value),
)
return type(value), value
def generate_stackdepth_combinations(possible_stack_depths):
result_list = []
_generate_stackdepth_combinations(
possible_stack_depths, current_idx=0, current_list=[], result_list=result_list
)
return result_list
def generate_random_block(
consts: dict,
names: pyassem.IndexedSet,
varnames: pyassem.IndexedSet,
freevars: pyassem.IndexedSet,
) -> pyassem.Block:
block = pyassem.Block("random")
# possible stack depths that are available, and mapping to correct set of instruction
stack_depth_to_instr_seq = {
0: Fuzzer.INSTRS_WITH_STACK_EFFECT_0_SEQ,
1: Fuzzer.INSTRS_WITH_STACK_EFFECT_1_SEQ,
2: Fuzzer.INSTRS_WITH_STACK_EFFECT_2_SEQ,
-1: Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_1_SEQ,
-2: Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_2_SEQ,
-3: Fuzzer.INSTRS_WITH_STACK_EFFECT_NEG_3_SEQ,
}
# generating all stack depth combinations of size BLOCK_SIZE that have net 0 stack effect
combinations = generate_stackdepth_combinations([0, 1, 2, -1, -2, -3])
# picking a random combination out of all that were generated
random_combination = combinations[random.randint(0, len(combinations) - 1)]
# sorting so that we don't pick a negative instruction first (preventing dip below 0 stack depth)
random_combination.sort(reverse=True)
# emit random instructions and corresponding opargs
for i in random_combination:
oparg, ioparg, instr = None, None, None
instr = random.choice(stack_depth_to_instr_seq[i])
if instr in Fuzzer.INSTRS_WITH_OPARG_IN_CONSTS:
oparg = randomize_variable(0)
ioparg = len(consts)
consts[get_const_key(oparg)] = ioparg
elif instr in Fuzzer.INSTRS_WITH_OPARG_IN_NAMES:
oparg = randomize_variable("")
ioparg = names.get_index(oparg)
elif instr in Fuzzer.INSTRS_WITH_OPARG_IN_VARNAMES:
oparg = randomize_variable("")
ioparg = varnames.get_index(oparg)
elif instr in Fuzzer.INSTRS_WITH_OPARG_IN_CLOSURE:
oparg = randomize_variable("")
ioparg = freevars.get_index(oparg)
else:
random_int_oparg = generate_random_integer(
0, OPARG_LOWER_BOUND, OPARG_UPPER_BOUND
)
oparg = random_int_oparg
ioparg = random_int_oparg
block.emit(pyassem.Instruction(instr, oparg, ioparg))
return block | null |
185,794 | from __future__ import division
from __future__ import print_function
import time
import os
import sys
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def latency_client(addr, nb_pings, interval):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
finally:
sock.close() | null |
185,795 | from __future__ import division
from __future__ import print_function
import time
import os
import sys
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
latency_tasks = throughput_tasks
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.daemon = True
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
sock.close()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print() | null |
185,796 | from __future__ import division
from __future__ import print_function
import time
import os
import sys
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.