language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 97126,
"end": 98260
} | class ____(fixtures.TestBase):
def test_modules_are_loaded(self):
to_restore = []
for m in ("xml.dom", "wsgiref.simple_server"):
to_restore.append((m, sys.modules.pop(m, None)))
try:
mr = preloaded._ModuleRegistry()
ret = mr.preload_module(
"xml.dom", "wsgiref.simple_server", "sqlalchemy.sql.util"
)
o = object()
is_(ret(o), o)
is_false(hasattr(mr, "xml_dom"))
mr.import_prefix("xml")
is_true("xml.dom" in sys.modules)
is_(sys.modules["xml.dom"], mr.xml_dom)
is_true("wsgiref.simple_server" not in sys.modules)
mr.import_prefix("wsgiref")
is_true("wsgiref.simple_server" in sys.modules)
is_(sys.modules["wsgiref.simple_server"], mr.wsgiref_simple_server)
mr.import_prefix("sqlalchemy")
is_(sys.modules["sqlalchemy.sql.util"], mr.sql_util)
finally:
for name, mod in to_restore:
if mod is not None:
sys.modules[name] = mod
| TestModuleRegistry |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 2625,
"end": 2740
} | class ____(complexfloating):
name = "complex128"
typecode = "D"
torch_dtype = torch.complex128
| complex128 |
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/selected/_textfont.py | {
"start": 233,
"end": 2451
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet.selected"
_path_str = "scattercarpet.selected.textfont"
_valid_props = {"color"}
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.selected.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | ray-project__ray | python/ray/tests/accelerators/mock_dpctl_2.py | {
"start": 0,
"end": 124
} | class ____:
def __init__(self, info):
pass
@property
def device_count(self):
return 4
| SyclContext |
python | ansible__ansible | lib/ansible/plugins/action/raw.py | {
"start": 792,
"end": 1762
} | class ____(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
if self._task.environment and any(self._task.environment):
self._display.warning('raw module does not support the environment keyword')
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.check_mode:
# in --check mode, always skip this module execution
result['skipped'] = True
return result
executable = self._task.args.get('executable', False)
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
result['changed'] = True
if 'rc' in result and result['rc'] != 0:
result['failed'] = True
result['msg'] = 'non-zero return code'
return result
| ActionModule |
python | jina-ai__jina | jina/serve/runtimes/servers/composite.py | {
"start": 3437,
"end": 4195
} | class ____(CompositeBaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for server_kwargs in self._server_kwargs:
server_cls = _get_gateway_class(
server_kwargs['runtime_args']['protocol'],
works_as_load_balancer=self.works_as_load_balancer,
)
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
| CompositeServer |
python | dagster-io__dagster | python_modules/libraries/dagster-deltalake-pandas/dagster_deltalake_pandas/deltalake_pandas_type_handler.py | {
"start": 333,
"end": 813
} | class ____(DeltalakeBaseArrowTypeHandler[pd.DataFrame]):
def from_arrow(
self, obj: pa.RecordBatchReader, target_type: type[pd.DataFrame]
) -> pd.DataFrame:
return obj.read_pandas()
def to_arrow(self, obj: pd.DataFrame) -> tuple[pa.RecordBatchReader, dict[str, Any]]:
return pa.Table.from_pandas(obj).to_reader(), {}
@property
def supported_types(self) -> Sequence[type[object]]:
return [pd.DataFrame]
| DeltaLakePandasTypeHandler |
python | scipy__scipy | benchmarks/benchmarks/linalg_logm.py | {
"start": 164,
"end": 785
} | class ____(Benchmark):
params = [
['float64', 'complex128'],
[64, 256],
['gen', 'her', 'pos']
]
param_names = ['dtype', 'n', 'structure']
def setup(self, dtype, n, structure):
n = int(n)
dtype = np.dtype(dtype)
A = np.random.rand(n, n)
if dtype == np.complex128:
A = A + 1j*np.random.rand(n, n)
if structure == 'pos':
A = A @ A.T.conj()
elif structure == 'her':
A = A + A.T.conj()
self.A = A
def time_logm(self, dtype, n, structure):
scipy.linalg.logm(self.A, disp=False)
| Logm |
python | dagster-io__dagster | python_modules/libraries/dagster-mysql/dagster_mysql_tests/test_event_log.py | {
"start": 759,
"end": 5329
} | class ____(TestEventLogStorage):
__test__ = True
@pytest.fixture(name="instance", scope="function")
def instance(self, conn_string):
MySQLEventLogStorage.create_clean_storage(conn_string)
with instance_for_test(
overrides={"storage": {"mysql": {"mysql_url": conn_string}}}
) as instance:
yield instance
def can_wipe_asset_partitions(self) -> bool:
return False
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self, instance):
event_log_storage = instance.event_log_storage
assert isinstance(event_log_storage, MySQLEventLogStorage)
yield event_log_storage
def test_event_log_storage_two_watchers(self, conn_string):
with _clean_storage(conn_string) as storage:
run_id = make_new_run_id()
watched_1 = []
watched_2 = []
def watch_one(event, _cursor):
watched_1.append(event)
def watch_two(event, _cursor):
watched_2.append(event)
assert len(storage.get_logs_for_run(run_id)) == 0
storage.store_event(create_test_event_log_record(str(1), run_id=run_id))
assert len(storage.get_logs_for_run(run_id)) == 1
assert len(watched_1) == 0
storage.watch(run_id, str(EventLogCursor.from_storage_id(1)), watch_one)
storage.store_event(create_test_event_log_record(str(2), run_id=run_id))
storage.store_event(create_test_event_log_record(str(3), run_id=run_id))
storage.watch(run_id, str(EventLogCursor.from_storage_id(3)), watch_two)
storage.store_event(create_test_event_log_record(str(4), run_id=run_id))
attempts = 10
while (len(watched_1) < 3 or len(watched_2) < 1) and attempts > 0:
time.sleep(0.1)
attempts -= 1
assert len(storage.get_logs_for_run(run_id)) == 4
assert len(watched_1) == 3
assert len(watched_2) == 1
storage.end_watch(run_id, watch_one)
time.sleep(0.3) # this value scientifically selected from a range of attractive values
storage.store_event(create_test_event_log_record(str(5), run_id=run_id))
attempts = 10
while len(watched_2) < 2 and attempts > 0:
time.sleep(0.1)
attempts -= 1
storage.end_watch(run_id, watch_two)
assert len(storage.get_logs_for_run(run_id)) == 5
assert len(watched_1) == 3
assert len(watched_2) == 2
storage.delete_events(run_id)
assert len(storage.get_logs_for_run(run_id)) == 0
assert len(watched_1) == 3
assert len(watched_2) == 2
assert [int(evt.message) for evt in watched_1] == [2, 3, 4]
assert [int(evt.message) for evt in watched_2] == [4, 5]
assert len(objgraph.by_type("SqlPollingEventWatcher")) == 1
# ensure we clean up poller on exit
gc.collect()
assert len(objgraph.by_type("SqlPollingEventWatcher")) == 0
def test_load_from_config(self, conn_string):
parse_result = urlparse(conn_string)
hostname = parse_result.hostname # can be custom set in the BK env
port = (
parse_result.port
) # can be different, based on the backcompat mysql version or latest mysql version
url_cfg = f"""
event_log_storage:
module: dagster_mysql.event_log
class: MySQLEventLogStorage
config:
mysql_url: mysql+mysqlconnector://test:test@{hostname}:{port}/test
"""
explicit_cfg = f"""
event_log_storage:
module: dagster_mysql.event_log
class: MySQLEventLogStorage
config:
mysql_db:
username: test
password: test
hostname: {hostname}
port: {port}
db_name: test
"""
with instance_for_test(overrides=yaml.safe_load(url_cfg)) as from_url_instance:
from_url = from_url_instance._event_storage # noqa: SLF001
with instance_for_test(overrides=yaml.safe_load(explicit_cfg)) as explicit_instance:
from_explicit = explicit_instance._event_storage # noqa: SLF001
assert from_url.mysql_url == from_explicit.mysql_url # pyright: ignore[reportAttributeAccessIssue]
| TestMySQLEventLogStorage |
python | kamyu104__LeetCode-Solutions | Python/sum-of-matrix-after-queries.py | {
"start": 46,
"end": 516
} | class ____(object):
def matrixSumQueries(self, n, queries):
"""
:type n: int
:type queries: List[List[int]]
:rtype: int
"""
lookup = [[False]*n for _ in xrange(2)]
cnt = [0]*2
result = 0
for t, i, v in reversed(queries):
if lookup[t][i]:
continue
lookup[t][i] = True
cnt[t] += 1
result += v*(n-cnt[t^1])
return result
| Solution |
python | python__mypy | mypy/semanal_enum.py | {
"start": 1283,
"end": 10197
} | class ____:
def __init__(self, options: Options, api: SemanticAnalyzerInterface) -> None:
self.options = options
self.api = api
def process_enum_call(self, s: AssignmentStmt, is_func_scope: bool) -> bool:
"""Check if s defines an Enum; if yes, store the definition in symbol table.
Return True if this looks like an Enum definition (but maybe with errors),
otherwise return False.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
enum_call = self.check_enum_call(s.rvalue, name, is_func_scope)
if enum_call is None:
return False
if isinstance(lvalue, MemberExpr):
self.fail("Enum type as attribute is not supported", lvalue)
return False
# Yes, it's a valid Enum definition. Add it to the symbol table.
self.api.add_symbol(name, enum_call, s)
return True
def check_enum_call(
self, node: Expression, var_name: str, is_func_scope: bool
) -> TypeInfo | None:
"""Check if a call defines an Enum.
Example:
A = enum.Enum('A', 'foo bar')
is equivalent to:
class A(enum.Enum):
foo = 1
bar = 2
"""
if not isinstance(node, CallExpr):
return None
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return None
fullname = callee.fullname
if fullname not in ENUM_BASES:
return None
new_class_name, items, values, ok = self.parse_enum_call_args(
call, fullname.split(".")[-1]
)
if not ok:
# Error. Construct dummy return value.
name = var_name
if is_func_scope:
name += "@" + str(call.line)
info = self.build_enum_call_typeinfo(name, [], fullname, node.line)
else:
if new_class_name != var_name:
msg = f'String argument 1 "{new_class_name}" to {fullname}(...) does not match variable name "{var_name}"'
self.fail(msg, call)
name = cast(StrExpr, call.args[0]).value
if name != var_name or is_func_scope:
# Give it a unique name derived from the line number.
name += "@" + str(call.line)
info = self.build_enum_call_typeinfo(name, items, fullname, call.line)
# Store generated TypeInfo under both names, see semanal_namedtuple for more details.
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
call.analyzed = EnumCallExpr(info, items, values)
call.analyzed.set_line(call)
info.line = node.line
return info
def build_enum_call_typeinfo(
self, name: str, items: list[str], fullname: str, line: int
) -> TypeInfo:
base = self.api.named_type_or_none(fullname)
assert base is not None
info = self.api.basic_new_typeinfo(name, base, line)
info.metaclass_type = info.calculate_metaclass_type()
info.is_enum = True
for item in items:
var = Var(item)
var.info = info
var.is_property = True
# When an enum is created by its functional form `Enum(name, values)`
# - if it is a string it is first split by commas/whitespace
# - if it is an iterable of single items each item is assigned a value starting at `start`
# - if it is an iterable of (name, value) then the given values will be used
# either way, each item should be treated as if it has an explicit value.
var.has_explicit_value = True
var._fullname = f"{info.fullname}.{item}"
info.names[item] = SymbolTableNode(MDEF, var)
return info
def parse_enum_call_args(
self, call: CallExpr, class_name: str
) -> tuple[str, list[str], list[Expression | None], bool]:
"""Parse arguments of an Enum call.
Return a tuple of fields, values, was there an error.
"""
args = call.args
if not all(arg_kind in [ARG_POS, ARG_NAMED] for arg_kind in call.arg_kinds):
return self.fail_enum_call_arg(f"Unexpected arguments to {class_name}()", call)
if len(args) < 2:
return self.fail_enum_call_arg(f"Too few arguments for {class_name}()", call)
if len(args) > 6:
return self.fail_enum_call_arg(f"Too many arguments for {class_name}()", call)
valid_name = [None, "value", "names", "module", "qualname", "type", "start"]
for arg_name in call.arg_names:
if arg_name not in valid_name:
self.fail_enum_call_arg(f'Unexpected keyword argument "{arg_name}"', call)
value, names = None, None
for arg_name, arg in zip(call.arg_names, args):
if arg_name == "value":
value = arg
if arg_name == "names":
names = arg
if value is None:
value = args[0]
if names is None:
names = args[1]
if not isinstance(value, StrExpr):
return self.fail_enum_call_arg(
f"{class_name}() expects a string literal as the first argument", call
)
new_class_name = value.value
items = []
values: list[Expression | None] = []
if isinstance(names, StrExpr):
fields = names.value
for field in fields.replace(",", " ").split():
items.append(field)
elif isinstance(names, (TupleExpr, ListExpr)):
seq_items = names.items
if is_StrExpr_list(seq_items):
items = [seq_item.value for seq_item in seq_items]
elif all(
isinstance(seq_item, (TupleExpr, ListExpr))
and len(seq_item.items) == 2
and isinstance(seq_item.items[0], StrExpr)
for seq_item in seq_items
):
for seq_item in seq_items:
assert isinstance(seq_item, (TupleExpr, ListExpr))
name, value = seq_item.items
assert isinstance(name, StrExpr)
items.append(name.value)
values.append(value)
else:
return self.fail_enum_call_arg(
"%s() with tuple or list expects strings or (name, value) pairs" % class_name,
call,
)
elif isinstance(names, DictExpr):
for key, value in names.items:
if not isinstance(key, StrExpr):
return self.fail_enum_call_arg(
f"{class_name}() with dict literal requires string literals", call
)
items.append(key.value)
values.append(value)
elif isinstance(args[1], RefExpr) and isinstance(args[1].node, Var):
proper_type = get_proper_type(args[1].node.type)
if (
proper_type is not None
and isinstance(proper_type, LiteralType)
and isinstance(proper_type.value, str)
):
fields = proper_type.value
for field in fields.replace(",", " ").split():
items.append(field)
elif args[1].node.is_final and isinstance(args[1].node.final_value, str):
fields = args[1].node.final_value
for field in fields.replace(",", " ").split():
items.append(field)
else:
return self.fail_enum_call_arg(
"Second argument of %s() must be string, tuple, list or dict literal for mypy to determine Enum members"
% class_name,
call,
)
else:
# TODO: Allow dict(x=1, y=2) as a substitute for {'x': 1, 'y': 2}?
return self.fail_enum_call_arg(
"Second argument of %s() must be string, tuple, list or dict literal for mypy to determine Enum members"
% class_name,
call,
)
if not items:
return self.fail_enum_call_arg(f"{class_name}() needs at least one item", call)
if not values:
values = [None] * len(items)
assert len(items) == len(values)
return new_class_name, items, values, True
def fail_enum_call_arg(
self, message: str, context: Context
) -> tuple[str, list[str], list[Expression | None], bool]:
self.fail(message, context)
return "", [], [], False
# Helpers
def fail(self, msg: str, ctx: Context) -> None:
self.api.fail(msg, ctx)
| EnumCallAnalyzer |
python | huggingface__transformers | tests/utils/test_versions_utils.py | {
"start": 889,
"end": 3487
} | class ____(TestCasePlus):
def test_core(self):
# lt + different version strings
require_version_core("numpy<1000.4.5")
require_version_core("numpy<1000.4")
require_version_core("numpy<1000")
# le
require_version_core("numpy<=1000.4.5")
require_version_core(f"numpy<={numpy_ver}")
# eq
require_version_core(f"numpy=={numpy_ver}")
# ne
require_version_core("numpy!=1000.4.5")
# ge
require_version_core("numpy>=1.0")
require_version_core("numpy>=1.0.0")
require_version_core(f"numpy>={numpy_ver}")
# gt
require_version_core("numpy>1.0.0")
# mix
require_version_core("numpy>1.0.0,<1000")
# requirement w/o version
require_version_core("numpy")
# unmet requirements due to version conflict
for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn("but found", str(e))
# unmet requirements due to missing module
for req in ["numpipypie>1", "numpipypie2"]:
try:
require_version_core(req)
except importlib.metadata.PackageNotFoundError as e:
self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e))
self.assertIn("Try: `pip install transformers -U`", str(e))
# bogus requirements formats:
# 1. whole thing
for req in ["numpy??1.0.0", "numpy1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("requirement needs to be in the pip package format", str(e))
# 2. only operators
for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("need one of ", str(e))
def test_python(self):
# matching requirement
require_version("python>=3.9.0")
# not matching requirements
for req in ["python>9.9.9", "python<3.0.0"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn(f"but found python=={python_ver}", str(e))
| DependencyVersionCheckTest |
python | huggingface__transformers | tests/models/deepseek_v2/test_modeling_deepseek_v2.py | {
"start": 1201,
"end": 1798
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = DeepseekV2Model
def __init__(
self,
parent,
n_routed_experts=8,
kv_lora_rank=32,
q_lora_rank=16,
qk_nope_head_dim=64,
qk_rope_head_dim=64,
):
super().__init__(parent=parent)
self.n_routed_experts = n_routed_experts
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
@require_torch
| DeepseekV2ModelTester |
python | gevent__gevent | src/gevent/tests/test__exc_info.py | {
"start": 449,
"end": 1377
} | class ____(greentest.TestCase):
def test1(self):
error = RawException('hello')
expected_error = ExpectedError('expected exception in hello')
try:
raise error
except RawException:
self.expect_one_error()
g = gevent.spawn(hello, expected_error)
g.join()
self.assert_error(ExpectedError, expected_error)
self.assertIsInstance(g.exception, ExpectedError)
try:
raise
except: # pylint:disable=bare-except
ex = sys.exc_info()[1]
self.assertIs(ex, error)
def test2(self):
timer = gevent.get_hub().loop.timer(0)
timer.start(hello2)
try:
gevent.sleep(0.1)
self.assertEqual(sys.exc_info(), (None, None, None))
finally:
timer.close()
if __name__ == '__main__':
greentest.main()
| Test |
python | geekcomputers__Python | singly_linked_list.py | {
"start": 94,
"end": 2620
} | class ____:
def __init__(self):
self.head = None
def length(self):
curr = self.head
count = 0
while curr.next != None:
count += 1
curr = curr.next
return count
def add_node(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
curr = self.head
while curr.next != None:
curr = curr.next
curr.next = new_node
def insert_at_head(self, data):
new_node = Node(data)
temp = self.head
self.head = new_node
new_node.next = temp
del temp
def insert(self, pos, data):
if pos < 0 or pos > self.length():
print("Enter valid index")
elif pos == 0:
self.insert_at_head(data)
return
elif pos == self.length() - 1:
self.add_node(data)
return
new_node = Node(data)
curr_pos = 0
prev = None
curr = self.head
while True:
if pos == curr_pos:
prev.next = new_node
new_node.next = curr
break
prev = curr
curr = curr.next
curr_pos += 1
def delete_head(self):
temp = self.head
self.head = temp.next
del temp
def delete_end(self):
curr = self.head
prev = None
while True:
if curr.next == None:
prev.next = None
del curr
break
prev = curr
curr = curr.next
def delete(self, pos):
if pos < 0 or pos > self.length():
print("Enter valid index")
return
elif pos == 0:
self.delete_head()
return
elif pos == self.length() - 1:
self.delete_end()
return
curr = self.head
curr_pos = 0
prev = None
while True:
if curr_pos == pos:
prev.next = curr.next
del curr
break
prev = curr
curr = curr.next
curr_pos += 1
def display(self):
if self.head is None:
print("List is empty")
rev = []
curr = self.head
while curr != None:
print(f"{curr.data} --> ", end="")
rev.append(curr.data)
curr = curr.next
print()
return rev[::-1]
| LinkedList |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 184,
"end": 470
} | class ____(unittest.TestCase):
def test_simple_call(self):
d = fuzzy.FuzzyAttribute(lambda: 10)
res = utils.evaluate_declaration(d)
self.assertEqual(10, res)
res = utils.evaluate_declaration(d)
self.assertEqual(10, res)
| FuzzyAttributeTestCase |
python | nryoung__algorithms | tests/test_searching.py | {
"start": 5123,
"end": 5484
} | class ____(unittest.TestCase):
"""
Tests KMP search on string "ABCDE FG ABCDEABCDEF"
"""
def test_kmpsearch(self):
self.string = "ABCDE FG ABCDEABCDEF"
rv1 = kmp_search.search(self.string, "ABCDEA")
rv2 = kmp_search.search(self.string, "ABCDER")
self.assertIs(rv1[0], 9)
self.assertFalse(rv2)
| TestKMPSearch |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto_fr.py | {
"start": 113,
"end": 808
} | class ____(App):
CSS = """
Screen {
align: center middle;
border: solid cyan;
}
#container {
width: 30;
height: auto;
border: solid green;
overflow-y: auto;
}
#child {
height: 1fr;
border: solid red;
}
#bottom {
margin: 1 2;
background: $primary;
}
"""
def compose(self) -> ComposeResult:
with Widget(id="container"):
yield Label("Hello one line", id="top")
yield Widget(id="child")
yield Label("Two\nLines with 1x2 margin", id="bottom")
if __name__ == "__main__":
app = FRApp()
app.run()
| FRApp |
python | eth-brownie__brownie | brownie/_config.py | {
"start": 4796,
"end": 4862
} | class ____(ConfigContainer, metaclass=_Singleton): ...
@final
| Config |
python | sympy__sympy | sympy/core/tests/test_expr.py | {
"start": 27347,
"end": 80071
} | class ____(Mul):
pass
def test_as_independent():
assert S.Zero.as_independent(x, as_Add=True) == (0, 0)
assert S.Zero.as_independent(x, as_Add=False) == (0, 0)
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 4903 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 5479
assert (3*x).as_independent(Symbol) == (3, x)
# issue 5648
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 5784
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
eq = Add(x, -x, 2, -3, evaluate=False)
assert eq.as_independent(x) == (-1, Add(x, -x, evaluate=False))
eq = Mul(x, 1/x, 2, -3, evaluate=False)
assert eq.as_independent(x) == (-6, Mul(x, 1/x, evaluate=False))
assert (x*y).as_independent(z, as_Add=True) == (x*y, 0)
# subclassing Add and Mul
eq = CustomAdd(y, CustomMul(x, y), z)
ind, dep = eq.as_independent(x)
assert ind - (y + z) == 0
assert isinstance(ind, CustomAdd)
assert dep/(x*y) == 1
assert isinstance(dep, CustomMul)
eq = CustomMul(y, CustomAdd(x, y), z)
ind, dep = eq.as_independent(x)
assert ind/(y*z) == 1
assert isinstance(ind, CustomMul)
assert dep - (x + y) == 0
assert isinstance(dep, CustomAdd)
@XFAIL
def test_call_2():
# TODO UndefinedFunction does not subclass Expr
assert (2*f)(x) == 2*f(x)
def test_replace():
e = log(sin(x)) + tan(sin(x**2))
assert e.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert e.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert e.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert e.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2*x
assert (2*x).replace(a*x + b, b - a, exact=False) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=False) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e
) == x**2/2 + O(x**3)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
# https://groups.google.com/forum/#!topic/sympy/8wCgeC95tz0
n1, n2, n3 = symbols('n1:4', commutative=False)
assert (n1*f(n2)).replace(f, lambda x: x) == n1*n2
assert (n3*f(n2)).replace(f, lambda x: x) == n3*n2
# issue 16725
assert S.Zero.replace(Wild('x'), 1) == 1
# let the user override the default decision of False
assert S.Zero.replace(Wild('x'), 1, exact=True) == 0
def test_replace_integral():
# https://github.com/sympy/sympy/issues/27142
q, p, s, t = symbols('q p s t', cls=Wild)
a, b, c, d = symbols('a b c d')
i = Integral(a + b, (b, c, d))
pattern = Integral(q, (p, s, t))
assert i.replace(pattern, q) == a + b
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == {S(2), S(3)}
assert expr.find(lambda u: u.is_Symbol) == {x, y}
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == {S(2), S(3)}
assert expr.find(Symbol) == {x, y}
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == {sin(x), sin(sin(x))}
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == {sin(x), sin(sin(x))}
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == {sin(x), sin(sin(x))}
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
assert f(x).count(f(x)) == 1
assert f(x).diff(x).count(f(x)) == 1
assert f(x).diff(x).count(x) == 2
def test_has_basics():
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
# see issue at https://github.com/sympy/sympy/issues/5190
assert not S(1).has(Wild)
assert not x.has(Wild)
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
# XXX to be deprecated
#assert not Tuple(f, g).has(x)
#assert Tuple(f, g).has(f)
#assert not Tuple(f, g).has(h)
assert Tuple(True).has(True)
assert Tuple(True).has(S.true)
assert not Tuple(True).has(1)
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
# https://github.com/sympy/sympy/issues/20610
assert S(2).as_poly() is None
assert sqrt(2).as_poly(extension=True) is None
assert pi.as_poly(x, domain='QQ') is None
raises(AttributeError, lambda: Tuple(x, x).as_poly(x))
raises(AttributeError, lambda: Tuple(x ** 2, x, y).as_poly(x))
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert S(-3.0).as_coeff_mul(rational=False) == (-S(3.0), ())
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul() == (1, (3 + x,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
assert (1.1*x).as_coeff_mul(rational=False) == (1.1, (x,))
assert (1.1*x).as_coeff_mul() == (1, (1.1, x))
assert (-oo*x).as_coeff_mul(rational=True) == (-1, (oo, x))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# issue 4784
D = Derivative
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
for base in (2, S.Exp1):
assert Pow(base**x, 3, evaluate=False
).extract_multiplicatively(base**x) == base**(2*x)
assert (base**(5*x)).extract_multiplicatively(
base**(3*x)) == base**(2*x)
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (S.Half*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert x.extract_multiplicatively(-x) is None
assert (-2 - 4*I).extract_multiplicatively(-2) == 1 + 2*I
assert (-2 - 4*I).extract_multiplicatively(3) is None
assert (-2*x - 4*y - 8).extract_multiplicatively(-2) == x + 2*y + 4
assert (-2*x*y - 4*x**2*y).extract_multiplicatively(-2*y) == 2*x**2 + x
assert (2*x*y + 4*x**2*y).extract_multiplicatively(2*y) == 2*x**2 + x
assert (-4*y**2*x).extract_multiplicatively(-3*y) is None
assert (2*x).extract_multiplicatively(1) == 2*x
assert (-oo).extract_multiplicatively(5) is -oo
assert (oo).extract_multiplicatively(5) is oo
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2.) is S.Zero
assert S(2.).extract_additively(2) is S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
assert (-x - x*I).extract_additively(-x) == -I*x
# extraction does not leave artificats, now
assert (4*x*(y + 1) + y).extract_additively(x) == x*(4*y + 3) + y
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is False
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert ((-x - y)/(x + y)).could_extract_minus_sign() is False
class sign_invariant(DefinedFunction, Expr):
nargs = 1
def __neg__(self):
return self
foo = sign_invariant(x)
assert foo == -foo
assert foo.could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
assert (x - 1).could_extract_minus_sign() is False
assert (1 - x).could_extract_minus_sign() is True
assert (sqrt(2) - 1).could_extract_minus_sign() is True
assert (1 - sqrt(2)).could_extract_minus_sign() is False
# check that result is canonical
eq = (3*x + 15*y).extract_multiplicatively(3)
assert eq is not None and eq.args == eq.func(*eq.args).args
def test_nan_extractions():
for r in (1, 0, I, nan):
assert nan.extract_additively(r) is None
assert nan.extract_multiplicatively(r) is None
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == Rational(-1, 8) + y
assert (-x/8 + x*y).coeff(-x) == S.One/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
assert (10*x).coeff(x, 0) == 0
assert (10*x).coeff(10*x, 0) == 0
n1, n2 = symbols('n1 n2', commutative=False, seq=True)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=True) == 1
assert (n*m + n*m*n).coeff(n*m, right=True) == 1 + n # = n*m*(n + 1)
assert (x*y).coeff(z, 0) == x*y
assert (x*n + y*n + z*m).coeff(n) == x + y
assert (n*m + n*o + o*l).coeff(n, right=True) == m + o
assert (x*n*m*n + y*n*m*o + z*l).coeff(m, right=True) == x*n + y*o
assert (x*n*m*n + x*n*m*o + z*l).coeff(m, right=True) == n + o
assert (x*n*m*n + x*n*m*o + z*l).coeff(m) == x*n
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r)) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S.Half
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
assert (x**2*y**2).as_base_exp() == (x*y, 2)
assert (x**z*y**z).as_base_exp() == (x**z*y**z, S.One)
def test_issue_4963():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify(1/(exp(3*pi*x/5) + 1)) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert radsimp(1/(a + b*sqrt(c)), symbolic=False) == \
(1/(a + b*sqrt(c))).radsimp(symbolic=False)
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert (x**t*y**t).powsimp(force=True) == (x*y)**t
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert gammasimp(gamma(x)/gamma(x-5)) == (gamma(x)/gamma(x-5)).gammasimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S.One, x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 3]
assert [Add(3*x, 2*x, y, 3, evaluate=False).as_coefficients_dict()[i]
for i in check] == [3, 5, 1, 0, 3]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert [(3.0*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3.0, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 0
eq = x*(x + 1)*a + x*b + c/x
assert eq.as_coefficients_dict(x) == {x: b, 1/x: c,
x*(x + 1): a}
assert eq.expand().as_coefficients_dict(x) == {x**2: a, x: a + b, 1/x: c}
assert x.as_coefficients_dict() == {x: S.One}
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[{x, y}, [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[{x}, []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[{x, x**2}, []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert isinstance(a, Add)
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert isinstance(m, Mul)
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_issue_5226():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S.One.free_symbols == set()
assert x.free_symbols == {x}
assert Integral(x, (x, 1, y)).free_symbols == {y}
assert (-Integral(x, (x, 1, y))).free_symbols == {y}
assert meter.free_symbols == set()
assert (meter**x).free_symbols == {x}
def test_has_free():
assert x.has_free(x)
assert not x.has_free(y)
assert (x + y).has_free(x)
assert (x + y).has_free(*(x, z))
assert f(x).has_free(x)
assert f(x).has_free(f(x))
assert Integral(f(x), (f(x), 1, y)).has_free(y)
assert not Integral(f(x), (f(x), 1, y)).has_free(x)
assert not Integral(f(x), (f(x), 1, y)).has_free(f(x))
# simple extraction
assert (x + 1 + y).has_free(x + 1)
assert not (x + 2 + y).has_free(x + 1)
assert (2 + 3*x*y).has_free(3*x)
raises(TypeError, lambda: x.has_free({x, y}))
s = FiniteSet(1, 2)
assert Piecewise((s, x > 3), (4, True)).has_free(s)
assert not Piecewise((1, x > 3), (4, True)).has_free(s)
# can't make set of these, but fallback will handle
raises(TypeError, lambda: x.has_free(y, []))
def test_has_xfree():
assert (x + 1).has_xfree({x})
assert ((x + 1)**2).has_xfree({x + 1})
assert not (x + y + 1).has_xfree({x + 1})
raises(TypeError, lambda: x.has_xfree(x))
raises(TypeError, lambda: x.has_xfree([x]))
def test_issue_5300():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_floordiv():
from sympy.functions.elementary.integers import floor
assert x // y == floor(x / y)
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert Float(0.0).as_coeff_Mul() == (Float(0.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
assert (-oo*x).as_coeff_Mul(rational=True) == (-1, oo*x)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Float(5.0) + x).as_coeff_Add(rational=True) == (0, Float(5.0) + x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{1}, {1, 2}]
assert sorted(exprs, key=default_sort_key) == exprs
a, b = exprs = [Dummy('x'), Dummy('x')]
assert sorted([b, a], key=default_sort_key) == exprs
def test_as_ordered_factors():
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False, seq=True)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
e = x**2*y**2 + x*y**4 + y + 2
assert e.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert e.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert e.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert e.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
k = Symbol('k')
assert k.as_ordered_terms(data=True) == ([(k, ((1.0, 0.0), (1,), ()))], [k]) # type: ignore
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_eval_interval():
assert exp(x)._eval_interval(*Tuple(x, 0, 1)) == exp(1) - exp(0)
# issue 4199
a = x/y
raises(NotImplementedError, lambda: a._eval_interval(x, S.Zero, oo)._eval_interval(y, oo, S.Zero))
raises(NotImplementedError, lambda: a._eval_interval(x, S.Zero, oo)._eval_interval(y, S.Zero, oo))
a = x - y
raises(NotImplementedError, lambda: a._eval_interval(x, S.One, oo)._eval_interval(y, oo, S.One))
raises(ValueError, lambda: x._eval_interval(x, None, None))
a = -y*Heaviside(x - y)
assert a._eval_interval(x, -oo, oo) == -y
assert a._eval_interval(x, oo, -oo) == y
def test_eval_interval_zoo():
# Test that limit is used when zoo is returned
assert Si(1/x)._eval_interval(x, S.Zero, S.One) == -pi/2 + Si(1)
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S.Half, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S.One/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S.One/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S.One/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_5843():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
assert Sum(x, (x, 1, 10)).is_constant() is True
assert Sum(x, (x, 1, n)).is_constant() is False
assert Sum(x, (x, 1, n)).is_constant(y) is True
assert Sum(x, (x, 1, n)).is_constant(n) is False
assert Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
assert eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert log(x/y).is_constant() is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert f(1).is_constant
assert checksol(x, x, f(x)) is False
assert Pow(x, S.Zero, evaluate=False).is_constant() is True # == 1
assert Pow(S.Zero, x, evaluate=False).is_constant() is False # == 0 or 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S.One/4) + (-6)**(S.One/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1 + 2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is None # should be False, but previously this was False due to wrong intermediate result
assert diff.subs(x, Rational(-1, 2)/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 6829
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S.One/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/6)/2 - S.One/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**2 - Rational(1, 3))
assert z.equals(0)
def test_random():
from sympy.functions.combinatorial.numbers import lucas
from sympy.simplify.simplify import posify
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
# issue 8662
assert Piecewise((Max(x, y), z))._random() is None
def test_round():
assert str(Float('0.1249999').round(2)) == '0.12'
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Integer and ans == d20
ans = S(d20).round(-2)
assert ans.is_Integer and ans == 12345678901234567900
assert str(S('1/7').round(4)) == '0.1429'
assert str(S('.[12345]').round(4)) == '0.1235'
assert str(S('.1349').round(2)) == '0.13'
n = S(12345)
ans = n.round()
assert ans.is_Integer
assert ans == n
ans = n.round(1)
assert ans.is_Integer
assert ans == n
ans = n.round(4)
assert ans.is_Integer
assert ans == n
assert n.round(-1) == 12340
r = Float(str(n)).round(-4)
assert r == 10000.0
assert n.round(-5) == 0
assert str((pi + sqrt(2)).round(2)) == '4.56'
assert (10*(pi + sqrt(2))).round(-1) == 50.0
raises(TypeError, lambda: round(x + 2, 2))
assert str(S(2.3).round(1)) == '2.3'
# rounding in SymPy (as in Decimal) should be
# exact for the given precision; we check here
# that when a 5 follows the last digit that
# the rounded digit will be even.
for i in range(-99, 100):
# construct a decimal that ends in 5, e.g. 123 -> 0.1235
s = str(abs(i))
p = len(s) # we are going to round to the last digit of i
n = '0.%s5' % s # put a 5 after i's digits
j = p + 2 # 2 for '0.'
if i < 0: # 1 for '-'
j += 1
n = '-' + n
v = str(Float(n).round(p))[:j] # pertinent digits
if v.endswith('.'):
continue # it ends with 0 which is even
L = int(v[-1]) # last digit
assert L % 2 == 0, (n, '->', v)
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (pi + 2*E*I).round() == 3 + 5*I
# don't let request for extra precision give more than
# what is known (in this case, only 3 digits)
assert str((Float(.03, 3) + 2*pi/100).round(5)) == '0.0928'
assert str((Float(.03, 3) + 2*pi/100).round(4)) == '0.0928'
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=False))
assert a.round(10) == Float('3.000000000000000000000000000', '')
assert a.round(25) == Float('3.000000000000000000000000000', '')
assert a.round(26) == Float('3.000000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
#assert a.round(10) == Float('3.0000000000', '')
#assert a.round(25) == Float('3.0000000000000000000000000', '')
#assert a.round(26) == Float('3.00000000000000000000000000', '')
#assert a.round(27) == Float('2.999999999999999999999999999', '')
#assert a.round(30) == Float('2.999999999999999999999999999', '')
# XXX: Should round set the precision of the result?
# The previous version of the tests above is this but they only pass
# because Floats with unequal precision compare equal:
#
# assert a.round(10) == Float('3.0000000000', '')
# assert a.round(25) == Float('3.0000000000000000000000000', '')
# assert a.round(26) == Float('3.00000000000000000000000000', '')
# assert a.round(27) == Float('2.999999999999999999999999999', '')
# assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
raises(TypeError, lambda: f(1).round())
# exact magnitude of 10
assert str(S.One.round()) == '1'
assert str(S(100).round()) == '100'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert str((pi/10 + E*I).round(2).as_real_imag()) == '(0.31, 2.72)'
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
# issue 6914
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
# issue 8720
assert S(-123.6).round() == -124
assert S(-1.5).round() == -2
assert S(-100.5).round() == -100
assert S(-1.5 - 10.5*I).round() == -2 - 10*I
# issue 7961
assert str(S(0.006).round(2)) == '0.01'
assert str(S(0.00106).round(4)) == '0.0011'
# issue 8147
assert S.NaN.round() is S.NaN
assert S.Infinity.round() is S.Infinity
assert S.NegativeInfinity.round() is S.NegativeInfinity
assert S.ComplexInfinity.round() is S.ComplexInfinity
# check that types match
for i in range(2):
fi = float(i)
# 2 args
assert all(type(round(i, p)) is int for p in (-1, 0, 1))
assert all(S(i).round(p).is_Integer for p in (-1, 0, 1))
assert all(type(round(fi, p)) is float for p in (-1, 0, 1))
assert all(S(fi).round(p).is_Float for p in (-1, 0, 1))
# 1 arg (p is None)
assert type(round(i)) is int
assert S(i).round().is_Integer
assert type(round(fi)) is int
assert S(fi).round().is_Integer
# issue 25698
n = 6000002
assert int(n*(log(n) + log(log(n)))) == 110130079
one = cos(2)**2 + sin(2)**2
eq = exp(one*I*pi)
qr, qi = eq.as_real_imag()
assert qi.round(2) == 0.0
assert eq.round(2) == -1.0
eq = one - 1/S(10**120)
assert S.true not in (eq > 1, eq < 1)
assert int(eq) == int(.9) == 0
assert int(-eq) == int(-.9) == 0
# https://github.com/sympy/sympy/issues/28279
phi = (1+sqrt(5))/2
def a(n):
return int(2**n *log(phi)/log(10)-Rational(1, 2)*log(5)/log(10))+1
a857 = int("200829212952178927690909380949249948846461002549293765663931"
"695493228490311245625332991209824760595495085562557651697891"
"081559764342218116754386067315347927714197626480253219546461"
"109143700506913638072400645037349873738547576011048498684505"
"520181585966267100")
assert a(857) == a857
def test_held_expression_UnevaluatedExpr():
x = symbols("x")
he = UnevaluatedExpr(1/x)
e1 = x*he
assert isinstance(e1, Mul)
assert e1.args == (x, he)
assert e1.doit() == 1
assert UnevaluatedExpr(Derivative(x, x)).doit(deep=False
) == Derivative(x, x)
assert UnevaluatedExpr(Derivative(x, x)).doit() == 1
xx = Mul(x, x, evaluate=False)
assert xx != x**2
ue2 = UnevaluatedExpr(xx)
assert isinstance(ue2, UnevaluatedExpr)
assert ue2.args == (xx,)
assert ue2.doit() == x**2
assert ue2.doit(deep=False) == xx
x2 = UnevaluatedExpr(2)*2
assert type(x2) is Mul
assert x2.args == (2, UnevaluatedExpr(2))
def test_round_exception_nostr():
# Don't use the string form of the expression in the round exception, as
# it's too slow
s = Symbol('bad')
try:
s.round()
except TypeError as e:
assert 'bad' not in str(e)
else:
# Did not raise
raise AssertionError("Did not raise")
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_6325():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
assert e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
def test_issue_7426():
f1 = a % c
f2 = x % z
assert f1.equals(f2) is None
def test_issue_11122():
x = Symbol('x', extended_positive=False)
assert unchanged(Gt, x, 0) # (x > 0)
# (x > 0) should remain unevaluated after PR #16956
x = Symbol('x', positive=False, real=True)
assert (x > 0) is S.false
def test_issue_10651():
x = Symbol('x', real=True)
e1 = (-1 + x)/(1 - x)
e3 = (4*x**2 - 4)/((1 - x)*(1 + x))
e4 = 1/(cos(x)**2) - (tan(x))**2
x = Symbol('x', positive=True)
e5 = (1 + x)/x
assert e1.is_constant() is None
assert e3.is_constant() is None
assert e4.is_constant() is None
assert e5.is_constant() is False
def test_issue_10161():
x = symbols('x', real=True)
assert x*abs(x)*abs(x) == x**3
def test_issue_10755():
x = symbols('x')
raises(TypeError, lambda: int(log(x)))
raises(TypeError, lambda: log(x).round(2))
def test_issue_11877():
x = symbols('x')
assert integrate(log(S.Half - x), (x, 0, S.Half)) == Rational(-1, 2) -log(2)/2
@pytest.mark.parametrize('expr', [I / 3, I / 200])
def test_issue_28221(expr):
with pytest.raises(TypeError, match="Cannot convert non-comparable expression to int"):
int(expr)
def test_normal():
x = symbols('x')
e = Mul(S.Half, 1 + x, evaluate=False)
assert e.normal() == e
def test_expr():
x = symbols('x')
raises(TypeError, lambda: tan(x).series(x, 2, oo, "+"))
def test_ExprBuilder():
eb = ExprBuilder(Mul)
eb.args.extend([x, x])
assert eb.build() == x**2
def test_issue_22020():
from sympy.parsing.sympy_parser import parse_expr
x = parse_expr("log((2*V/3-V)/C)/-(R+r)*C")
y = parse_expr("log((2*V/3-V)/C)/-(R+r)*2")
assert x.equals(y) is False
def test_non_string_equality():
# Expressions should not compare equal to strings
x = symbols('x')
one = sympify(1)
assert (x == 'x') is False
assert (x != 'x') is True
assert (one == '1') is False
assert (one != '1') is True
assert (x + 1 == 'x + 1') is False
assert (x + 1 != 'x + 1') is True
# Make sure == doesn't try to convert the resulting expression to a string
# (e.g., by calling sympify() instead of _sympify())
class BadRepr:
def __repr__(self):
raise RuntimeError
assert (x == BadRepr()) is False
assert (x != BadRepr()) is True
def test_21494():
from sympy.testing.pytest import warns_deprecated_sympy
with warns_deprecated_sympy():
assert x.expr_free_symbols == {x}
with warns_deprecated_sympy():
assert Basic().expr_free_symbols == set()
with warns_deprecated_sympy():
assert S(2).expr_free_symbols == {S(2)}
with warns_deprecated_sympy():
assert Indexed("A", x).expr_free_symbols == {Indexed("A", x)}
with warns_deprecated_sympy():
assert Subs(x, x, 0).expr_free_symbols == set()
def test_Expr__eq__iterable_handling():
assert x != range(3)
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason = "Format works for Python version >= 3.12"
)
def test_format():
assert '{:1.2f}'.format(S.Zero) == '0.00'
assert '{:+3.0f}'.format(S(3)) == ' +3'
assert '{:23.20f}'.format(pi) == ' 3.14159265358979323846'
assert '{:50.48f}'.format(exp(sin(1))) == '2.319776824715853173956590377503266813254904772376'
def test_issue_24045():
assert powsimp(exp(a)/((c*a - c*b)*(Float(1.0)*c*a - Float(1.0)*c*b))) # doesn't raise
def test__unevaluated_Mul():
A, B = symbols('A B', commutative=False)
assert _unevaluated_Mul(x, A, B, S(2), A).args == (2, x, A, B, A)
assert _unevaluated_Mul(-x*A*B, S(2), A).args == (-2, x, A, B, A)
def test_Float_zero_division_error():
# issue 27165
assert Float('1.7567e-1417').round(15) == Float(0)
| CustomMul |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_streams.py | {
"start": 9491,
"end": 13373
} | class ____(FBMarketingStream, CheckpointMixin, ABC):
"""Base class for incremental streams"""
cursor_field = "updated_time"
def __init__(self, start_date: Optional[datetime], end_date: Optional[datetime], **kwargs):
super().__init__(**kwargs)
self._start_date = AirbyteDateTime.from_datetime(start_date) if start_date else None
self._end_date = AirbyteDateTime.from_datetime(end_date) if end_date else None
self._state = {}
@property
def state(self):
return self._state
@state.setter
def state(self, value: Mapping[str, Any]):
self._state.update(**value)
def _get_updated_state(
self,
current_stream_state: MutableMapping[str, Any],
latest_record: Mapping[str, Any],
):
"""Update stream state from latest record"""
account_id = latest_record["account_id"]
state_for_accounts = self._transform_state_from_one_account_format(current_stream_state, ["include_deleted"])
state_for_accounts = self._transform_state_from_old_deleted_format(state_for_accounts)
account_state = self.get_account_state(account_id, state_for_accounts)
potentially_new_records_in_the_past = self._filter_statuses and (
set(self._filter_statuses) - set(account_state.get("filter_statuses", []))
)
record_value = latest_record[self.cursor_field]
state_value = account_state.get(self.cursor_field) or record_value
max_cursor = max(ab_datetime_parse(state_value), ab_datetime_parse(record_value))
if potentially_new_records_in_the_past:
max_cursor = record_value
state_for_accounts.setdefault(account_id, {})[self.cursor_field] = str(max_cursor)
state_for_accounts[account_id]["filter_statuses"] = self._filter_statuses
return state_for_accounts
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""Include state filter"""
params = super().request_params(**kwargs)
params = deep_merge(params, self._state_filter(stream_state=stream_state or {}))
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Additional filters associated with state if any set"""
state_value = stream_state.get(self.cursor_field)
if stream_state and state_value:
filter_value = ab_datetime_parse(state_value)
elif self._start_date:
filter_value = self._start_date
else:
# if start_date is not specified then do not use date filters
return {}
potentially_new_records_in_the_past = set(self._filter_statuses) - set(stream_state.get("filter_statuses", []))
if potentially_new_records_in_the_past:
self.logger.info(f"Ignoring bookmark for {self.name} because `filter_statuses` were changed.")
if self._start_date:
filter_value = self._start_date
else:
# if start_date is not specified then do not use date filters
return {}
return {
"filtering": [
{
"field": f"{self.entity_prefix}.{self.cursor_field}",
"operator": "GREATER_THAN",
"value": int(filter_value.timestamp()),
},
],
}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for record in super().read_records(sync_mode, cursor_field, stream_slice, stream_state):
self.state = self._get_updated_state(self.state, record)
yield record
| FBMarketingIncrementalStream |
python | neetcode-gh__leetcode | python/0791-custom-sort-string.py | {
"start": 0,
"end": 532
} | class ____:
def customSortString(self, order: str, s: str) -> str:
char_count_of_s = {}
for i in s:
char_count_of_s[i] = char_count_of_s.get(i, 0) + 1
satisfied_string = ""
for char in order:
if char in char_count_of_s:
satisfied_string += char * char_count_of_s[char]
del char_count_of_s[char]
for key,val in char_count_of_s.items():
satisfied_string += key * val
return satisfied_string
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/function7.py | {
"start": 588,
"end": 723
} | class ____:
def write(self, a: str, b: str):
pass
def make_writer2(w: _Writer2):
pass
make_writer2(Writer2())
| Writer2 |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/synapse.py | {
"start": 10438,
"end": 16064
} | class ____(BaseAzureSynapseHook):
"""
A hook to interact with Azure Synapse Pipeline.
:param azure_synapse_conn_id: The :ref:`Azure Synapse connection id<howto/connection:synapse>`.
:param azure_synapse_workspace_dev_endpoint: The Azure Synapse Workspace development endpoint.
"""
default_conn_name: str = "azure_synapse_connection"
def __init__(
self,
azure_synapse_workspace_dev_endpoint: str,
azure_synapse_conn_id: str = default_conn_name,
**kwargs,
):
self._conn: ArtifactsClient | None = None
self.azure_synapse_workspace_dev_endpoint = azure_synapse_workspace_dev_endpoint
super().__init__(azure_synapse_conn_id=azure_synapse_conn_id, **kwargs)
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_conn(self) -> ArtifactsClient:
if self._conn is not None:
return self._conn
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
tenant = self._get_field(extras, "tenantId")
credential: Credentials
if not conn.login or not conn.password:
managed_identity_client_id = self._get_field(extras, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extras, "workload_identity_tenant_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
else:
if not tenant:
raise ValueError("A Tenant ID is required when authenticating with Client ID and Secret.")
credential = ClientSecretCredential(
client_id=conn.login, client_secret=conn.password, tenant_id=tenant
)
self._conn = self._create_client(credential, self.azure_synapse_workspace_dev_endpoint)
if self._conn is not None:
return self._conn
raise ValueError("Failed to create ArtifactsClient")
@staticmethod
def _create_client(credential: Credentials, endpoint: str) -> ArtifactsClient:
return ArtifactsClient(credential=credential, endpoint=endpoint)
def run_pipeline(self, pipeline_name: str, **config: Any) -> CreateRunResponse:
"""
Run a Synapse pipeline.
:param pipeline_name: The pipeline name.
:param config: Extra parameters for the Synapse Artifact Client.
:return: The pipeline run Id.
"""
return self.get_conn().pipeline.create_pipeline_run(pipeline_name, **config)
def get_pipeline_run(self, run_id: str) -> PipelineRun:
"""
Get the pipeline run.
:param run_id: The pipeline run identifier.
:return: The pipeline run.
"""
return self.get_conn().pipeline_run.get_pipeline_run(run_id=run_id)
def get_pipeline_run_status(self, run_id: str) -> str:
"""
Get a pipeline run's current status.
:param run_id: The pipeline run identifier.
:return: The status of the pipeline run.
"""
pipeline_run_status = self.get_pipeline_run(
run_id=run_id,
).status
return str(pipeline_run_status)
def refresh_conn(self) -> ArtifactsClient:
self._conn = None
return self.get_conn()
def wait_for_pipeline_run_status(
self,
run_id: str,
expected_statuses: str | set[str],
check_interval: int = 60,
timeout: int = 60 * 60 * 24 * 7,
) -> bool:
"""
Wait for a pipeline run to match an expected status.
:param run_id: The pipeline run identifier.
:param expected_statuses: The desired status(es) to check against a pipeline run's current status.
:param check_interval: Time in seconds to check on a pipeline run's status.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status or the expected
status.
:return: Boolean indicating if the pipeline run has reached the ``expected_status``.
"""
pipeline_run_status = self.get_pipeline_run_status(run_id=run_id)
executed_after_token_refresh = True
start_time = time.monotonic()
while (
pipeline_run_status not in AzureSynapsePipelineRunStatus.TERMINAL_STATUSES
and pipeline_run_status not in expected_statuses
):
if start_time + timeout < time.monotonic():
raise AzureSynapsePipelineRunException(
f"Pipeline run {run_id} has not reached a terminal status after {timeout} seconds."
)
# Wait to check the status of the pipeline run based on the ``check_interval`` configured.
time.sleep(check_interval)
try:
pipeline_run_status = self.get_pipeline_run_status(run_id=run_id)
executed_after_token_refresh = True
except ServiceRequestError:
if executed_after_token_refresh:
self.refresh_conn()
else:
raise
return pipeline_run_status in expected_statuses
def cancel_run_pipeline(self, run_id: str) -> None:
"""
Cancel the pipeline run.
:param run_id: The pipeline run identifier.
"""
self.get_conn().pipeline_run.cancel_pipeline_run(run_id)
| AzureSynapsePipelineHook |
python | ethereum__web3.py | web3/method.py | {
"start": 7800,
"end": 8570
} | class ____:
def __init__(
self,
method: Method[Callable[..., Any]],
old_name: str | None = None,
new_name: str | None = None,
msg: str | None = None,
) -> None:
self.method = method
self.old_name = old_name
self.new_name = new_name
self.msg = msg
def __get__(
self, obj: Optional["Module"] = None, obj_type: type["Module"] | None = None
) -> Any:
message = f"{self.old_name} is deprecated in favor of {self.new_name}"
if self.msg is not None:
message = self.msg
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=2,
)
return self.method.__get__(obj, obj_type)
| DeprecatedMethod |
python | fluentpython__example-code | 14-it-generator/isis2json/subfield.py | {
"start": 1767,
"end": 2809
} | class ____(object):
''' Represent an Isis field, with subfields, using
Python native datastructures
>>> author = CompositeString('John Tenniel^xillustrator',
... subkeys='x')
>>> unicode(author)
u'John Tenniel^xillustrator'
'''
def __init__(self, isis_raw, subkeys=None, encoding=DEFAULT_ENCODING):
if not isinstance(isis_raw, basestring):
raise TypeError('%r value must be unicode or str instance' % isis_raw)
self.__isis_raw = isis_raw.decode(encoding)
self.__expanded = expand(self.__isis_raw, subkeys)
def __getitem__(self, key):
for subfield in self.__expanded:
if subfield[0] == key:
return subfield[1]
else:
raise KeyError(key)
def __iter__(self):
return (subfield[0] for subfield in self.__expanded)
def items(self):
return self.__expanded
def __unicode__(self):
return self.__isis_raw
def __str__(self):
return str(self.__isis_raw)
| CompositeString |
python | weaviate__weaviate-python-client | weaviate/collections/classes/generative.py | {
"start": 5925,
"end": 7290
} | class ____(_GenerativeConfigRuntime):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.DATABRICKS, frozen=True, exclude=True
)
endpoint: AnyHttpUrl
frequency_penalty: Optional[float]
log_probs: Optional[bool]
max_tokens: Optional[int]
model: Optional[str]
n: Optional[int]
presence_penalty: Optional[float]
stop: Optional[List[str]]
temperature: Optional[float]
top_log_probs: Optional[int]
top_p: Optional[float]
def _to_grpc(self, opts: _GenerativeConfigRuntimeOptions) -> generative_pb2.GenerativeProvider:
self._validate_multi_modal(opts)
return generative_pb2.GenerativeProvider(
return_metadata=opts.return_metadata,
databricks=generative_pb2.GenerativeDatabricks(
endpoint=_parse_anyhttpurl(self.endpoint),
frequency_penalty=self.frequency_penalty,
log_probs=self.log_probs or False,
max_tokens=self.max_tokens,
model=self.model,
n=self.n,
presence_penalty=self.presence_penalty,
stop=_to_text_array(self.stop),
temperature=self.temperature,
top_log_probs=self.top_log_probs,
top_p=self.top_p,
),
)
| _GenerativeDatabricks |
python | doocs__leetcode | solution/0500-0599/0510.Inorder Successor in BST II/Solution.py | {
"start": 177,
"end": 513
} | class ____:
def inorderSuccessor(self, node: "Node") -> "Optional[Node]":
if node.right:
node = node.right
while node.left:
node = node.left
return node
while node.parent and node.parent.right is node:
node = node.parent
return node.parent
| Solution |
python | spack__spack | lib/spack/spack/util/compression.py | {
"start": 16987,
"end": 17166
} | class ____(CompressedFileTypeInterface):
_MAGIC_NUMBER_LZW = b"\x1f\x9d"
_MAGIC_NUMBER_LZH = b"\x1f\xa0"
extension = "Z"
name = "compress'd data"
| ZCompressedFileType |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 3848,
"end": 4143
} | class ____(BaseEstimator):
def __init__(self, key=0):
self.key = key
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
| ChangesDict |
python | pyca__cryptography | src/cryptography/hazmat/primitives/hashes.py | {
"start": 2175,
"end": 2263
} | class ____(HashAlgorithm):
name = "sha1"
digest_size = 20
block_size = 64
| SHA1 |
python | keras-team__keras | keras/src/ops/linalg_test.py | {
"start": 311,
"end": 6624
} | class ____(testing.TestCase):
def test_cholesky(self):
x = KerasTensor([None, 20, 20])
out = linalg.cholesky(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.cholesky(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky(x)
def test_cholesky_inverse(self):
x = KerasTensor([None, 20, 20])
out = linalg.cholesky_inverse(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.cholesky_inverse(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky_inverse(x)
def test_det(self):
x = KerasTensor([None, 20, 20])
out = linalg.det(x)
self.assertEqual(out.shape, (None,))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.det(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.det(x)
def test_eig(self):
x = KerasTensor([None, 20, 20])
w, v = linalg.eig(x)
self.assertEqual(w.shape, (None, 20))
self.assertEqual(v.shape, (None, 20, 20))
def test_eigh(self):
x = KerasTensor([None, 20, 20])
w, v = linalg.eigh(x)
self.assertEqual(w.shape, (None, 20))
self.assertEqual(v.shape, (None, 20, 20))
def test_inv(self):
x = KerasTensor([None, 20, 20])
out = linalg.inv(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.inv(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.inv(x)
def test_lu_factor(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
x = KerasTensor([None, 4, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 4, 3))
self.assertEqual(p.shape, (None, 3))
x = KerasTensor([None, 2, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 2, 3))
self.assertEqual(p.shape, (None, 2))
def test_norm(self):
x = KerasTensor((None, 3))
self.assertEqual(linalg.norm(x).shape, ())
x = KerasTensor((None, 3, 3))
self.assertEqual(linalg.norm(x, axis=1).shape, (None, 3))
self.assertEqual(
linalg.norm(x, axis=1, keepdims=True).shape, (None, 1, 3)
)
def test_qr(self):
x = KerasTensor((None, 4, 3), dtype="float32")
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="reduced")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="complete")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
def test_qr_invalid_mode(self):
# backend agnostic error message
x = np.array([[1, 2], [3, 4]])
invalid_mode = "invalid_mode"
with self.assertRaisesRegex(
ValueError, "Expected one of {'reduced', 'complete'}."
):
linalg.qr(x, mode=invalid_mode)
def test_solve(self):
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, None, 20])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
def test_solve_triangular(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
def test_svd(self):
x = KerasTensor((None, 3, 2))
u, s, v = linalg.svd(x)
self.assertEqual(u.shape, (None, 3, 3))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
u, s, v = linalg.svd(x, full_matrices=False)
self.assertEqual(u.shape, (None, 3, 2))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
s = linalg.svd(x, compute_uv=False)
self.assertEqual(s.shape, (None, 2))
| LinalgOpsDynamicShapeTest |
python | langchain-ai__langchain | libs/core/langchain_core/document_loaders/base.py | {
"start": 618,
"end": 3614
} | class ____(ABC): # noqa: B024
"""Interface for Document Loader.
Implementations should implement the lazy-loading method using generators
to avoid loading all documents into memory at once.
`load` is provided just for user convenience and should not be overridden.
"""
# Sub-classes should not implement this method directly. Instead, they
# should implement the lazy load method.
def load(self) -> list[Document]:
"""Load data into `Document` objects.
Returns:
The documents.
"""
return list(self.lazy_load())
async def aload(self) -> list[Document]:
"""Load data into `Document` objects.
Returns:
The documents.
"""
return [document async for document in self.alazy_load()]
def load_and_split(
self, text_splitter: TextSplitter | None = None
) -> list[Document]:
"""Load `Document` and split into chunks. Chunks are returned as `Document`.
!!! danger
Do not override this method. It should be considered to be deprecated!
Args:
text_splitter: `TextSplitter` instance to use for splitting documents.
Defaults to `RecursiveCharacterTextSplitter`.
Raises:
ImportError: If `langchain-text-splitters` is not installed
and no `text_splitter` is provided.
Returns:
List of `Document`.
"""
if text_splitter is None:
if not _HAS_TEXT_SPLITTERS:
msg = (
"Unable to import from langchain_text_splitters. Please specify "
"text_splitter or install langchain_text_splitters with "
"`pip install -U langchain-text-splitters`."
)
raise ImportError(msg)
text_splitter_: TextSplitter = RecursiveCharacterTextSplitter()
else:
text_splitter_ = text_splitter
docs = self.load()
return text_splitter_.split_documents(docs)
# Attention: This method will be upgraded into an abstractmethod once it's
# implemented in all the existing subclasses.
def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for `Document`.
Yields:
The `Document` objects.
"""
if type(self).load != BaseLoader.load:
return iter(self.load())
msg = f"{self.__class__.__name__} does not implement lazy_load()"
raise NotImplementedError(msg)
async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for `Document`.
Yields:
The `Document` objects.
"""
iterator = await run_in_executor(None, self.lazy_load)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
| BaseLoader |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 11046,
"end": 11464
} | class ____(collector.BaseFactCollector):
name = 'concat_collected'
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
fact_dict = {}
con_cat_list = []
for key, value in collected_facts.items():
con_cat_list.append(value)
fact_dict['concat_fact'] = '-'.join(con_cat_list)
return fact_dict
| ConCatFactCollector |
python | psf__black | src/black/ranges.py | {
"start": 16588,
"end": 20594
} | class ____:
"""1-based lines mapping from original source to modified source.
Lines [original_start, original_end] from original source
are mapped to [modified_start, modified_end].
The ranges are inclusive on both ends.
"""
original_start: int
original_end: int
modified_start: int
modified_end: int
# Whether this range corresponds to a changed block, or an unchanged block.
is_changed_block: bool
def _calculate_lines_mappings(
original_source: str,
modified_source: str,
) -> Sequence[_LinesMapping]:
"""Returns a sequence of _LinesMapping by diffing the sources.
For example, given the following diff:
import re
- def func(arg1,
- arg2, arg3):
+ def func(arg1, arg2, arg3):
pass
It returns the following mappings:
original -> modified
(1, 1) -> (1, 1), is_changed_block=False (the "import re" line)
(2, 3) -> (2, 2), is_changed_block=True (the diff)
(4, 4) -> (3, 3), is_changed_block=False (the "pass" line)
You can think of this visually as if it brings up a side-by-side diff, and tries
to map the line ranges from the left side to the right side:
(1, 1)->(1, 1) 1. import re 1. import re
(2, 3)->(2, 2) 2. def func(arg1, 2. def func(arg1, arg2, arg3):
3. arg2, arg3):
(4, 4)->(3, 3) 4. pass 3. pass
Args:
original_source: the original source.
modified_source: the modified source.
"""
matcher = difflib.SequenceMatcher(
None,
original_source.splitlines(keepends=True),
modified_source.splitlines(keepends=True),
)
matching_blocks = matcher.get_matching_blocks()
lines_mappings: list[_LinesMapping] = []
# matching_blocks is a sequence of "same block of code ranges", see
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
# Each block corresponds to a _LinesMapping with is_changed_block=False,
# and the ranges between two blocks corresponds to a _LinesMapping with
# is_changed_block=True,
# NOTE: matching_blocks is 0-based, but _LinesMapping is 1-based.
for i, block in enumerate(matching_blocks):
if i == 0:
if block.a != 0 or block.b != 0:
lines_mappings.append(
_LinesMapping(
original_start=1,
original_end=block.a,
modified_start=1,
modified_end=block.b,
is_changed_block=False,
)
)
else:
previous_block = matching_blocks[i - 1]
lines_mappings.append(
_LinesMapping(
original_start=previous_block.a + previous_block.size + 1,
original_end=block.a,
modified_start=previous_block.b + previous_block.size + 1,
modified_end=block.b,
is_changed_block=True,
)
)
if i < len(matching_blocks) - 1:
lines_mappings.append(
_LinesMapping(
original_start=block.a + 1,
original_end=block.a + block.size,
modified_start=block.b + 1,
modified_end=block.b + block.size,
is_changed_block=False,
)
)
return lines_mappings
def _find_lines_mapping_index(
original_line: int,
lines_mappings: Sequence[_LinesMapping],
start_index: int,
) -> int:
"""Returns the original index of the lines mappings for the original line."""
index = start_index
while index < len(lines_mappings):
mapping = lines_mappings[index]
if mapping.original_start <= original_line <= mapping.original_end:
return index
index += 1
return index
| _LinesMapping |
python | scrapy__scrapy | tests/test_pipeline_media.py | {
"start": 7971,
"end": 14314
} | class ____(TestBaseMediaPipeline):
pipeline_class = MockedMediaPipeline
def _errback(self, result):
self.pipe._mockcalled.append("request_errback")
return result
@inlineCallbacks
def test_result_succeed(self):
rsp = Response("http://url1")
req = Request(
"http://url1",
meta={"response": rsp},
errback=self._errback,
)
item = {"requests": req}
new_item = yield self.pipe.process_item(item)
assert new_item["results"] == [(True, {})]
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"media_downloaded",
"item_completed",
]
@inlineCallbacks
def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
exc = Exception("foo")
fail = Failure(exc)
req = Request(
"http://url1",
meta={"response": fail},
errback=self._errback,
)
item = {"requests": req}
new_item = yield self.pipe.process_item(item)
assert len(new_item["results"]) == 1
assert new_item["results"][0][0] is False
assert isinstance(new_item["results"][0][1], Failure)
assert new_item["results"][0][1].value == exc
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"media_failed",
"request_errback",
"item_completed",
]
@inlineCallbacks
def test_mix_of_success_and_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
exc = Exception("foo")
fail = Failure(exc)
req2 = Request("http://url2", meta={"response": fail})
item = {"requests": [req1, req2]}
new_item = yield self.pipe.process_item(item)
assert len(new_item["results"]) == 2
assert new_item["results"][0] == (True, {})
assert new_item["results"][1][0] is False
assert isinstance(new_item["results"][1][1], Failure)
assert new_item["results"][1][1].value == exc
m = self.pipe._mockcalled
# only once
assert m[0] == "get_media_requests" # first hook called
assert m.count("get_media_requests") == 1
assert m.count("item_completed") == 1
assert m[-1] == "item_completed" # last hook called
# twice, one per request
assert m.count("media_to_download") == 2
# one to handle success and other for failure
assert m.count("media_downloaded") == 1
assert m.count("media_failed") == 1
@inlineCallbacks
def test_get_media_requests(self):
# returns single Request (without callback)
req = Request("http://url")
item = {"requests": req} # pass a single item
new_item = yield self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req) in self.info.downloaded
# returns iterable of Requests
req1 = Request("http://url1")
req2 = Request("http://url2")
item = {"requests": iter([req1, req2])}
new_item = yield self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req1) in self.info.downloaded
assert self.fingerprint(req2) in self.info.downloaded
@inlineCallbacks
def test_results_are_cached_across_multiple_items(self):
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
item = {"requests": req1}
new_item = yield self.pipe.process_item(item)
assert new_item is item
assert new_item["results"] == [(True, {})]
# rsp2 is ignored, rsp1 must be in results because request fingerprints are the same
req2 = Request(
req1.url, meta={"response": Response("http://donot.download.me")}
)
item = {"requests": req2}
new_item = yield self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req1) == self.fingerprint(req2)
assert new_item["results"] == [(True, {})]
@inlineCallbacks
def test_results_are_cached_for_requests_of_single_item(self):
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
req2 = Request(
req1.url, meta={"response": Response("http://donot.download.me")}
)
item = {"requests": [req1, req2]}
new_item = yield self.pipe.process_item(item)
assert new_item is item
assert new_item["results"] == [(True, {}), (True, {})]
@inlineCallbacks
def test_wait_if_request_is_downloading(self):
def _check_downloading(response):
fp = self.fingerprint(req1)
assert fp in self.info.downloading
assert fp in self.info.waiting
assert fp not in self.info.downloaded
assert len(self.info.waiting[fp]) == 2
return response
rsp1 = Response("http://url")
def rsp1_func():
dfd = Deferred().addCallback(_check_downloading)
call_later(0.1, dfd.callback, rsp1)
return dfd
def rsp2_func():
pytest.fail("it must cache rsp1 result and must not try to redownload")
req1 = Request("http://url", meta={"response": rsp1_func})
req2 = Request(req1.url, meta={"response": rsp2_func})
item = {"requests": [req1, req2]}
new_item = yield self.pipe.process_item(item)
assert new_item["results"] == [(True, {}), (True, {})]
@inlineCallbacks
def test_use_media_to_download_result(self):
req = Request("http://url", meta={"result": "ITSME"})
item = {"requests": req}
new_item = yield self.pipe.process_item(item)
assert new_item["results"] == [(True, "ITSME")]
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"item_completed",
]
def test_key_for_pipe(self):
assert (
self.pipe._key_for_pipe("IMAGES", base_class_name="MediaPipeline")
== "MOCKEDMEDIAPIPELINE_IMAGES"
)
| TestMediaPipeline |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 7968,
"end": 8236
} | class ____(Message):
message = "'...'.format(...) has unused named argument(s): %s"
def __init__(self, filename, loc, extra_keywords):
Message.__init__(self, filename, loc)
self.message_args = (extra_keywords,)
| StringDotFormatExtraNamedArguments |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifacts.py | {
"start": 553,
"end": 748
} | class ____(GQLResult):
artifact_collection: Optional[
ProjectArtifactsProjectArtifactTypeArtifactCollection
] = Field(alias="artifactCollection")
| ProjectArtifactsProjectArtifactType |
python | pypa__warehouse | tests/unit/oidc/models/test_github.py | {
"start": 28619,
"end": 30855
} | class ____:
def test_reify_does_not_exist_yet(self, db_request):
pending_publisher = PendingGitHubPublisherFactory.create()
assert (
db_request.db.query(github.GitHubPublisher)
.filter_by(
repository_name=pending_publisher.repository_name,
repository_owner=pending_publisher.repository_owner,
repository_owner_id=pending_publisher.repository_owner_id,
workflow_filename=pending_publisher.workflow_filename,
environment=pending_publisher.environment,
)
.one_or_none()
is None
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher does not already exist,
# a new one is created and the pending publisher is marked for deletion.
assert isinstance(publisher, github.GitHubPublisher)
assert pending_publisher in db_request.db.deleted
assert publisher.repository_name == pending_publisher.repository_name
assert publisher.repository_owner == pending_publisher.repository_owner
assert publisher.repository_owner_id == pending_publisher.repository_owner_id
assert publisher.workflow_filename == pending_publisher.workflow_filename
assert publisher.environment == pending_publisher.environment
def test_reify_already_exists(self, db_request):
existing_publisher = GitHubPublisherFactory.create()
pending_publisher = PendingGitHubPublisherFactory.create(
repository_name=existing_publisher.repository_name,
repository_owner=existing_publisher.repository_owner,
repository_owner_id=existing_publisher.repository_owner_id,
workflow_filename=existing_publisher.workflow_filename,
environment=existing_publisher.environment,
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher already exists,
# it is returned and the pending publisher is marked for deletion.
assert existing_publisher == publisher
assert pending_publisher in db_request.db.deleted
| TestPendingGitHubPublisher |
python | numba__numba | numba/tests/test_np_randomgen.py | {
"start": 5988,
"end": 53649
} | class ____(MemoryLeakMixin, TestCase):
def check_numpy_parity(self, distribution_func,
bitgen_type=None, seed=None,
test_size=None, test_dtype=None,
ulp_prec=5):
distribution_func = numba.njit(distribution_func)
if seed is None:
seed = 1
if bitgen_type is None:
numba_rng_instance = np.random.default_rng(seed=seed)
numpy_rng_instance = np.random.default_rng(seed=seed)
else:
numba_rng_instance = Generator(bitgen_type(seed))
numpy_rng_instance = Generator(bitgen_type(seed))
# Check parity for different size cases
numba_res = distribution_func(numba_rng_instance,
test_size, test_dtype)
numpy_res = distribution_func.py_func(numpy_rng_instance,
test_size, test_dtype)
if (isinstance(numba_res, np.ndarray) and
np.issubdtype(numba_res.dtype, np.floating)) \
or isinstance(numba_res, float):
# Float scalars and arrays
np.testing.assert_array_max_ulp(numpy_res, numba_res,
maxulp=ulp_prec, dtype=test_dtype)
else:
# Bool/int scalars and arrays
np.testing.assert_equal(numba_res, numpy_res)
# Check if the end state of both BitGenerators is same
# after drawing the distributions
numba_gen_state = numba_rng_instance.bit_generator.state['state']
numpy_gen_state = numpy_rng_instance.bit_generator.state['state']
for _state_key in numpy_gen_state:
self.assertPreciseEqual(numba_gen_state[_state_key],
numpy_gen_state[_state_key])
def _test_bitgen_func_parity(self, func_name, bitgen_func, seed=1):
numba_rng_instance = np.random.default_rng(seed=seed)
numpy_rng_instance = np.random.default_rng(seed=seed)
numpy_func = getattr(numpy_rng_instance.bit_generator.ctypes, func_name)
numpy_res = numpy_func(numpy_rng_instance.bit_generator.ctypes.state)
numba_func = numba.njit(lambda x: bitgen_func(x.bit_generator))
numba_res = numba_func(numba_rng_instance)
self.assertPreciseEqual(numba_res, numpy_res)
def _check_invalid_types(self, dist_func, arg_list,
valid_args, invalid_args):
rng = np.random.default_rng()
for idx, _arg in enumerate(arg_list):
curr_args = valid_args.copy()
curr_args[idx] = invalid_args[idx]
curr_args = [rng] + curr_args
nb_dist_func = numba.njit(dist_func)
with self.assertRaises(TypingError) as raises:
nb_dist_func(*curr_args)
self.assertIn(
f'Argument {_arg} is not one of the expected type(s):',
str(raises.exception)
)
def test_npgen_boxing_unboxing(self):
rng_instance = np.random.default_rng()
numba_func = numba.njit(lambda x: x)
self.assertEqual(rng_instance, numba_func(rng_instance))
self.assertEqual(id(rng_instance), id(numba_func(rng_instance)))
def test_npgen_boxing_refcount(self):
rng_instance = np.random.default_rng()
no_box = numba.njit(lambda x:x.random())
do_box = numba.njit(lambda x:x)
y = do_box(rng_instance)
gc.collect()
ref_1 = sys.getrefcount(rng_instance)
del y
no_box(rng_instance)
gc.collect()
ref_2 = sys.getrefcount(rng_instance)
self.assertEqual(ref_1, ref_2 + 1)
def test_bitgen_funcs(self):
func_names = ["next_uint32", "next_uint64", "next_double"]
funcs = [next_uint32, next_uint64, next_double]
for _func, _func_name in zip(funcs, func_names):
with self.subTest(_func=_func, _func_name=_func_name):
self._test_bitgen_func_parity(_func_name, _func)
def test_integers(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.int64, np.int32, np.int16, np.int8,
np.uint64, np.uint32, np.uint16, np.uint8]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.integers(0, 100)
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None, ulp_prec=0)
dist_func = lambda x, size, dtype:\
x.integers(5, 10, size=size, dtype=dtype)
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype, 0)
# Checking dtype = bool seperately
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.integers(False, True, size=size, dtype=np.bool_)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, np.bool_, 0)
# Test dtype casting for high and low
dist_func = lambda x, size, dtype: \
x.integers(np.uint8(0), np.int64(100))
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, low, high, size, dtype, endpoint:\
x.integers(low=low, high=high, size=size,
dtype=dtype, endpoint=endpoint)
self._check_invalid_types(dist_func,
['low', 'high', 'size', 'dtype', 'endpoint'],
[1, 5, (1,), np.int64, True],
['x', 'x', ('x',), np.float64, 'x'])
# Testing .integers() dtype wise
def test_integers_cases(self):
cases = [
# low, high, dtype
(5, 6, np.uint64), # rng == 0 (rng stands for range)
(5, 100, np.uint64), # rng <= 0xFFFFFFFF
(0, 0xFFFFFFFFFF, np.uint64), # rng > 0xFFFFFFFF
(0, 0xFFFFFFFFFFFFFFFF - 1, np.uint64),# rng == 0xFFFFFFFFFFFFFFFF-1
(0, 0xFFFFFFFFFFFFFFFF, np.uint64), # rng == 0xFFFFFFFFFFFFFFFF
(5, 6, np.int64), # rng == 0
(5, 100, np.int64), # rng <= 0xFFFFFFFF
(0, 0xFFFFFFFFFF, np.int64), # rng > 0xFFFFFFFF
(0, 0xFFFFFFFFFFFFFFF - 1, np.int64), # rng == 0xFFFFFFFFFFFFFFF - 1
(0, 0xFFFFFFFFFFFFFFF, np.int64), # rng == 0xFFFFFFFFFFFFFFF
(-0xFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFF, np.int64), # min/max
(5, 6, np.uint32), # rng == 0
(5, 100, np.uint32), # rng < 0xFFFFFFFF
(0, 0xFFFFFFFF - 1, np.uint32), # rng == 0xFFFFFFFF - 1
(0, 0xFFFFFFFF, np.uint32), # rng == 0xFFFFFFFF
(5, 6, np.int32), # rng == 0
(5, 100, np.int32), # rng < 0xFFFFFFFF
(0, 0xFFFFFFF - 1, np.int32), # rng == 0xFFFFFFF - 1
(0, 0xFFFFFFF, np.int32), # rng == 0xFFFFFFF
(-0xFFFFFFF, 0xFFFFFFF, np.int32),
(5, 6, np.uint16), # rng == 0
(5, 100, np.uint16), # rng < 0xFFFF
(0, 0xFFFF - 1, np.uint16), # rng == 0xFFFF - 1
(0, 0xFFFF, np.uint16), # rng == 0xFFFF
(5, 6, np.int16), # rng == 0
(5, 10, np.int16), # rng < 0xFFF
(0, 0xFFF - 1, np.int16), # rng == 0xFFF - 1
(0, 0xFFF, np.int16), # rng == 0xFFF
(-0xFFF, 0xFFF, np.int16),
(5, 6, np.uint8), # rng == 0
(5, 10, np.uint8), # rng < 0xFF
(0, 0xFF - 1, np.uint8), # rng == 0xFF - 1
(0, 0xFF, np.uint8), # rng == 0xFF
(5, 6, np.int8), # rng == 0
(5, 10, np.int8), # rng < 0xF
(0, 0xF - 1, np.int8), # rng == 0xF-1
(0, 0xF, np.int8), # rng == 0xF
(-0xF, 0xF, np.int8),
]
size = (2, 3)
for low, high, dtype in cases:
with self.subTest(low=low, high=high, dtype=dtype):
dist_func = lambda x, size, dtype:\
x.integers(low, high, size=size, dtype=dtype)
self.check_numpy_parity(dist_func, None,
None, size, dtype, 0)
def test_random(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.float32, np.float64]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.random()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:x.random(size=size, dtype=dtype)
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype)
dist_func = lambda x, size, dtype:\
x.random(size=size, dtype=dtype)
self._check_invalid_types(dist_func, ['size', 'dtype'],
[(1,), np.float64], [('x',), 0.])
def test_standard_normal(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.float32, np.float64]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.standard_normal()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:\
x.standard_normal(size=size, dtype=dtype)
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype)
dist_func = lambda x, size, dtype:\
x.standard_normal(size=size, dtype=dtype)
self._check_invalid_types(dist_func, ['size', 'dtype'],
[(1,), np.float32], [('x',), 0])
def test_standard_exponential(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.float32, np.float64]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.standard_exponential()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:\
x.standard_exponential(size=size, dtype=dtype)
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype)
dist_func = lambda x, method, size, dtype:\
x.standard_exponential(method=method, size=size, dtype=dtype)
self._check_invalid_types(dist_func, ['method', 'size', 'dtype'],
['zig', (1,), np.float32], [0, ('x',), 0])
def test_standard_exponential_inv(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.float32, np.float64]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.standard_exponential(size=size, dtype=dtype, method='inv')
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype)
def test_standard_gamma(self):
test_sizes = [None, (), (100,), (10, 20, 30)]
test_dtypes = [np.float32, np.float64]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype: \
x.standard_gamma(shape=5.0, size=size, dtype=dtype)
for _size in test_sizes:
for _dtype in test_dtypes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _dtype=_dtype,
_bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, _dtype,
adjusted_ulp_prec)
dist_func = lambda x, shape, size, dtype:\
x.standard_gamma(shape=shape, size=size, dtype=dtype)
self._check_invalid_types(dist_func, ['shape', 'size', 'dtype'],
[5.0, (1,), np.float32], ['x', ('x',), 0])
def test_normal(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.normal()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None,
ulp_prec=adjusted_ulp_prec)
dist_func = lambda x, size, dtype:x.normal(loc=1.5, scale=3, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, loc, scale, size:\
x.normal(loc=loc, scale=scale, size=size)
self._check_invalid_types(dist_func, ['loc', 'scale', 'size'],
[1.5, 3, (1,)], ['x', 'x', ('x',)])
def test_uniform(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.uniform()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None,
ulp_prec=adjusted_ulp_prec)
dist_func = lambda x, size, dtype:x.uniform(low=1.5, high=3, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, low, high, size:\
x.uniform(low=low, high=high, size=size)
self._check_invalid_types(dist_func, ['low', 'high', 'size'],
[1.5, 3, (1,)], ['x', 'x', ('x',)])
def test_exponential(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.exponential()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:x.exponential(scale=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, scale, size:\
x.exponential(scale=scale, size=size)
self._check_invalid_types(dist_func, ['scale', 'size'],
[1.5, (1,)], ['x', ('x',)])
def test_gamma(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.gamma(shape=5.0, scale=1.5,
size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, shape, scale, size:\
x.gamma(shape=shape, scale=scale, size=size)
self._check_invalid_types(dist_func, ['shape', 'scale', 'size'],
[5.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_beta(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.beta(a=1.5, b=2.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, a, b, size:x.beta(a=a, b=b, size=size)
self._check_invalid_types(dist_func, ['a', 'b', 'size'],
[5.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_f(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.f(dfnum=2, dfden=3, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, dfnum, dfden, size:\
x.f(dfnum=dfnum, dfden=dfden, size=size)
self._check_invalid_types(dist_func, ['dfnum', 'dfden', 'size'],
[5, 1, (1,)], ['x', 'x', ('x',)])
def test_chisquare(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.chisquare(df=2, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, df, size:\
x.chisquare(df=df, size=size)
self._check_invalid_types(dist_func, ['df', 'size'],
[2, (1,)], ['x', ('x',)])
def test_standard_cauchy(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.standard_cauchy()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:x.standard_cauchy(size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, size:x.standard_cauchy(size=size)
self._check_invalid_types(dist_func, ['size'],
[(1,)], [('x',)])
def test_pareto(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.pareto(a=1.0, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, a, size:x.pareto(a=a, size=size)
self._check_invalid_types(dist_func, ['a', 'size'],
[1, (1,)], ['x', ('x',)])
def test_weibull(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.weibull(a=1.0, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, a, size:x.weibull(a=a, size=size)
self._check_invalid_types(dist_func, ['a', 'size'],
[1, (1,)], ['x', ('x',)])
def test_power(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.power(a=0.75, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, a, size:x.power(a=a, size=size)
self._check_invalid_types(dist_func, ['a', 'size'],
[0.75, (1,)], ['x', ('x',)])
def test_laplace(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.laplace()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None,
ulp_prec=adjusted_ulp_prec)
dist_func = lambda x, size, dtype:\
x.laplace(loc=1.0, scale=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, loc, scale, size:\
x.laplace(loc=loc, scale=scale, size=size)
self._check_invalid_types(dist_func, ['loc', 'scale', 'size'],
[1.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_logistic(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.logistic()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None,
ulp_prec=adjusted_ulp_prec)
dist_func = lambda x, size, dtype:\
x.logistic(loc=1.0,scale=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, loc, scale, size:\
x.logistic(loc=loc, scale=scale, size=size)
self._check_invalid_types(dist_func, ['loc', 'scale', 'size'],
[1.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_lognormal(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.lognormal()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None,
ulp_prec=adjusted_ulp_prec)
dist_func = lambda x, size, dtype:\
x.lognormal(mean=5.0, sigma=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, mean, sigma, size:\
x.lognormal(mean=mean, sigma=sigma, size=size)
self._check_invalid_types(dist_func, ['mean', 'sigma', 'size'],
[1.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_rayleigh(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
# Test with no arguments
dist_func = lambda x, size, dtype:x.rayleigh()
with self.subTest():
self.check_numpy_parity(dist_func, test_size=None,
test_dtype=None)
dist_func = lambda x, size, dtype:x.rayleigh(scale=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, scale, size:x.rayleigh(scale=scale, size=size)
self._check_invalid_types(dist_func, ['scale', 'size'],
[1.5, (1,)], ['x', ('x',)])
def test_standard_t(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.standard_t(df=2, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, df, size:x.standard_t(df=df, size=size)
self._check_invalid_types(dist_func, ['df', 'size'],
[2, (1,)], ['x', ('x',)])
def test_wald(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.wald(mean=5.0, scale=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, mean, scale, size:\
x.wald(mean=mean, scale=scale, size=size)
self._check_invalid_types(dist_func, ['mean', 'scale', 'size'],
[1.0, 1.5, (1,)], ['x', 'x', ('x',)])
def test_geometric(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.geometric(p=0.75, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, p, size:x.geometric(p=p, size=size)
self._check_invalid_types(dist_func, ['p', 'size'],
[0.75, (1,)], ['x', ('x',)])
def test_zipf(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.zipf(a=1.5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, a, size:x.zipf(a=a, size=size)
self._check_invalid_types(dist_func, ['a', 'size'],
[1, (1,)], ['x', ('x',)])
def test_triangular(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.triangular(left=0, mode=3, right=5, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, left, mode, right, size:\
x.triangular(left=left, mode=mode, right=right, size=size)
self._check_invalid_types(dist_func, ['left', 'mode', 'right', 'size'],
[0, 3, 5, (1,)], ['x', 'x', 'x', ('x',)])
def test_poisson(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:x.poisson(lam=15, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, lam, size:x.poisson(lam=lam, size=size)
self._check_invalid_types(dist_func, ['lam', 'size'],
[15, (1,)], ['x', ('x',)])
def test_negative_binomial(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.negative_binomial(n=1, p=0.1, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, n, p, size:\
x.negative_binomial(n=n, p=p, size=size)
self._check_invalid_types(dist_func, ['n', 'p', 'size'],
[1, 0.75, (1,)], ['x', 'x', ('x',)])
# NumPy tests at:
# https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/tests/test_generator_mt19937.py#L936
# Written in following format for semblance with existing Generator tests.
def test_shuffle(self):
test_sizes = [(10, 20, 30)]
bitgen_types = [None, MT19937]
axes = [0, 1, 2]
for _size, _bitgen, _axis in itertools.product(test_sizes,
bitgen_types,
axes):
with self.subTest(_size=_size, _bitgen=_bitgen, _axis=_axis):
def dist_func(x, size, dtype):
arr = x.random(size=size)
x.shuffle(arr, axis=_axis)
return arr
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
0)
def test_shuffle_empty(self):
a = np.array([])
b = np.array([])
def dist_func(x, arr):
x.shuffle(arr)
return arr
nb_func = numba.njit(dist_func)
rng = lambda: np.random.default_rng(1)
self.assertPreciseEqual(dist_func(rng(), a), nb_func(rng(), b))
def test_shuffle_check(self):
self.disable_leak_check()
def dist_func(x, arr, axis):
x.shuffle(arr, axis=axis)
return arr
self._check_invalid_types(dist_func, ['x', 'axis'],
[np.array([3,4,5]), 0], ['x', 'x'])
rng = np.random.default_rng(1)
with self.assertRaises(IndexError) as raises:
numba.njit(dist_func)(rng, np.array([3,4,5]), 2)
self.assertIn(
'Axis is out of bounds for the given array',
str(raises.exception)
)
# NumPy tests at:
# https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/tests/test_generator_mt19937.py#L1030
# Written in following format for semblance with existing Generator tests.
def test_permutation(self):
test_sizes = [(10, 20, 30)]
bitgen_types = [None, MT19937]
axes = [0, 1, 2, -1, -2]
for _size, _bitgen, _axis in itertools.product(test_sizes,
bitgen_types,
axes):
with self.subTest(_size=_size, _bitgen=_bitgen, _axis=_axis):
def dist_func(x, size, dtype):
arr = x.random(size=size)
return x.permutation(arr, axis=1)
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
0)
# Test that permutation is actually done on a copy of the array
dist_func = numba.njit(lambda rng, arr: rng.permutation(arr))
rng = np.random.default_rng()
arr = rng.random(size=(10, 20))
arr_cpy = arr.copy()
dist_func(rng, arr)
self.assertPreciseEqual(arr, arr_cpy)
def test_permutation_exception(self):
self.disable_leak_check()
def dist_func(x, arr, axis):
return x.permutation(arr, axis=axis)
self._check_invalid_types(dist_func, ['x', 'axis'],
[np.array([3,4,5]), 0], ['x', 'x'])
rng = np.random.default_rng(1)
with self.assertRaises(IndexError) as raises:
numba.njit(dist_func)(rng, np.array([3,4,5]), 2)
self.assertIn(
'Axis is out of bounds for the given array',
str(raises.exception)
)
with self.assertRaises(IndexError) as raises:
numba.njit(dist_func)(rng, np.array([3,4,5]), -2)
self.assertIn(
'Axis is out of bounds for the given array',
str(raises.exception)
)
def test_permutation_empty(self):
a = np.array([])
b = np.array([])
def dist_func(x, arr):
return x.permutation(arr)
nb_func = numba.njit(dist_func)
rng = lambda: np.random.default_rng(1)
self.assertPreciseEqual(dist_func(rng(), a), nb_func(rng(), b))
def test_noncentral_chisquare(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.noncentral_chisquare(3.0, 20.0, size=size)
for _size, _bitgen in itertools.product(test_sizes, bitgen_types):
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, df, nonc, size:\
x.noncentral_chisquare(df=df, nonc=nonc, size=size)
valid_args = [3.0, 5.0, (1,)]
self._check_invalid_types(dist_func, ['df', 'nonc', 'size'],
valid_args, ['x', 'x', ('x',)])
# Test argument bounds
rng = np.random.default_rng()
valid_args = [rng] + valid_args
nb_dist_func = numba.njit(dist_func)
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change df to an invalid value
curr_args[1] = 0
nb_dist_func(*curr_args)
self.assertIn('df <= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change nonc to an invalid value
curr_args[2] = -1
nb_dist_func(*curr_args)
self.assertIn('nonc < 0', str(raises.exception))
# Exceptions leak references
self.disable_leak_check()
def test_noncentral_f(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.noncentral_f(3.0, 20.0, 3.0, size=size)
for _size, _bitgen in itertools.product(test_sizes, bitgen_types):
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, dfnum, dfden, nonc, size:\
x.noncentral_f(dfnum=dfnum, dfden=dfden, nonc=nonc, size=size)
valid_args = [3.0, 5.0, 3.0, (1,)]
self._check_invalid_types(dist_func, ['dfnum', 'dfden', 'nonc', 'size'],
valid_args, ['x', 'x', 'x', ('x',)])
# Test argument bounds
rng = np.random.default_rng()
valid_args = [rng] + valid_args
nb_dist_func = numba.njit(dist_func)
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change dfnum to an invalid value
curr_args[1] = 0
nb_dist_func(*curr_args)
self.assertIn('dfnum <= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change dfden to an invalid value
curr_args[2] = 0
nb_dist_func(*curr_args)
self.assertIn('dfden <= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change nonc to an invalid value
curr_args[3] = -1
nb_dist_func(*curr_args)
self.assertIn('nonc < 0', str(raises.exception))
# Exceptions leak references
self.disable_leak_check()
def test_logseries(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.logseries(0.3, size=size)
for _size, _bitgen in itertools.product(test_sizes, bitgen_types):
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None)
dist_func = lambda x, p, size:\
x.logseries(p=p, size=size)
valid_args = [0.3, (1,)]
self._check_invalid_types(dist_func, ['p', 'size'],
valid_args, ['x', ('x',)])
# Test argument bounds
rng = np.random.default_rng(1)
valid_args = [rng] + valid_args
nb_dist_func = numba.njit(dist_func)
for _p in [-0.1, 1, np.nan]:
with self.assertRaises(ValueError) as raises:
curr_args = valid_args.copy()
# Change p to an invalid negative, positive and nan value
curr_args[1] = _p
nb_dist_func(*curr_args)
self.assertIn('p < 0, p >= 1 or p is NaN', str(raises.exception))
# Exceptions leak references
self.disable_leak_check()
def test_binomial(self):
# For this test dtype argument is never used, so we pass [None] as dtype
# to make sure it runs only once with default system type.
test_sizes = [None, (), (100,), (10, 20, 30)]
bitgen_types = [None, MT19937]
dist_func = lambda x, size, dtype:\
x.binomial(n=1, p=0.1, size=size)
for _size in test_sizes:
for _bitgen in bitgen_types:
with self.subTest(_size=_size, _bitgen=_bitgen):
self.check_numpy_parity(dist_func, _bitgen,
None, _size, None,
adjusted_ulp_prec)
dist_func = lambda x, n, p, size:\
x.binomial(n=n, p=p, size=size)
self._check_invalid_types(dist_func, ['n', 'p', 'size'],
[1, 0.75, (1,)], ['x', 'x', ('x',)])
def test_binomial_cases(self):
cases = [
(1, 0.1), # p <= 0.5 && n * p <= 30
(50, 0.9), # p > 0.5 && n * p <= 30
(100, 0.4), # p <= 0.5 && n * p > 30
(100, 0.9) # p > 0.5 && n * p > 30
]
size = None
for n, p in cases:
with self.subTest(n=n, p=p):
dist_func = lambda x, size, dtype:\
x.binomial(n, p, size=size)
self.check_numpy_parity(dist_func, None,
None, size, None, 0)
def test_binomial_specific_issues(self):
# The algorithm for "binomial" is quite involved. This test contains
# subtests for specific issues reported on the issue tracker.
# testing specific bugs found in binomial.
with self.subTest("infinite loop issue #9493"):
# This specific generator state caused a "hang" as noted in #9493
gen1 = np.random.default_rng(0)
gen2 = np.random.default_rng(0)
@numba.jit
def foo(gen):
return gen.binomial(700, 0.1, 100)
got = foo(gen1)
expected = foo.py_func(gen2)
self.assertPreciseEqual(got, expected)
with self.subTest("issue with midrange value branch #9493/#9734"):
# The use of 301 is specific to trigger use of random_binomial_btpe
# with an input state that caused incorrect values to be computed.
gen1 = np.random.default_rng(0)
gen2 = np.random.default_rng(0)
@numba.jit
def foo(gen):
return gen.binomial(301, 0.1, 100)
got = foo(gen1)
expected = foo.py_func(gen2)
self.assertPreciseEqual(got, expected)
| TestRandomGenerators |
python | kamyu104__LeetCode-Solutions | Python/analyze-user-website-visit-pattern.py | {
"start": 71,
"end": 666
} | class ____(object):
def mostVisitedPattern(self, username, timestamp, website):
"""
:type username: List[str]
:type timestamp: List[int]
:type website: List[str]
:rtype: List[str]
"""
lookup = collections.defaultdict(list)
A = zip(timestamp, username, website)
A.sort()
for t, u, w in A:
lookup[u].append(w)
count = sum([collections.Counter(set(itertools.combinations(lookup[u], 3))) for u in lookup], collections.Counter())
return list(min(count, key=lambda x: (-count[x], x)))
| Solution |
python | indygreg__python-build-standalone | cpython-windows/build.py | {
"start": 6579,
"end": 66041
} | class ____(Exception):
"""Represents a missing search string when replacing content in a file."""
def static_replace_in_file(p: pathlib.Path, search, replace):
"""Replace occurrences of a string in a file.
The updated file contents are written out in place.
"""
with p.open("rb") as fh:
data = fh.read()
# Build should be as deterministic as possible. Assert that wanted changes
# actually occur.
if search not in data:
raise NoSearchStringError("search string (%s) not in %s" % (search, p))
log("replacing `%s` with `%s` in %s" % (search, replace, p))
data = data.replace(search, replace)
with p.open("wb") as fh:
fh.write(data)
OPENSSL_PROPS_REMOVE_RULES_LEGACY = b"""
<ItemGroup>
<_SSLDLL Include="$(opensslOutDir)\libcrypto$(_DLLSuffix).dll" />
<_SSLDLL Include="$(opensslOutDir)\libcrypto$(_DLLSuffix).pdb" />
<_SSLDLL Include="$(opensslOutDir)\libssl$(_DLLSuffix).dll" />
<_SSLDLL Include="$(opensslOutDir)\libssl$(_DLLSuffix).pdb" />
</ItemGroup>
<Target Name="_CopySSLDLL" Inputs="@(_SSLDLL)" Outputs="@(_SSLDLL->'$(OutDir)%(Filename)%(Extension)')" AfterTargets="Build">
<Copy SourceFiles="@(_SSLDLL)" DestinationFolder="$(OutDir)" />
</Target>
<Target Name="_CleanSSLDLL" BeforeTargets="Clean">
<Delete Files="@(_SSLDLL->'$(OutDir)%(Filename)%(Extension)')" TreatErrorsAsWarnings="true" />
</Target>
"""
OPENSSL_PROPS_REMOVE_RULES = b"""
<ItemGroup>
<_SSLDLL Include="$(opensslOutDir)\libcrypto$(_DLLSuffix).dll" />
<_SSLDLL Include="$(opensslOutDir)\libcrypto$(_DLLSuffix).pdb" />
<_SSLDLL Include="$(opensslOutDir)\libssl$(_DLLSuffix).dll" />
<_SSLDLL Include="$(opensslOutDir)\libssl$(_DLLSuffix).pdb" />
</ItemGroup>
<Target Name="_CopySSLDLL"
Inputs="@(_SSLDLL)"
Outputs="@(_SSLDLL->'$(OutDir)%(Filename)%(Extension)')"
Condition="$(SkipCopySSLDLL) == ''"
AfterTargets="Build">
<Copy SourceFiles="@(_SSLDLL)" DestinationFolder="$(OutDir)" />
</Target>
<Target Name="_CleanSSLDLL" Condition="$(SkipCopySSLDLL) == ''" BeforeTargets="Clean">
<Delete Files="@(_SSLDLL->'$(OutDir)%(Filename)%(Extension)')" TreatErrorsAsWarnings="true" />
</Target>
"""
LIBFFI_PROPS_REMOVE_RULES = b"""
<Target Name="_CopyLIBFFIDLL" Inputs="@(_LIBFFIDLL)" Outputs="@(_LIBFFIDLL->'$(OutDir)%(Filename)%(Extension)')" AfterTargets="Build">
<Copy SourceFiles="@(_LIBFFIDLL)" DestinationFolder="$(OutDir)" />
</Target>
"""
def hack_props(
td: pathlib.Path,
pcbuild_path: pathlib.Path,
arch: str,
python_version: str,
zlib_entry: str,
):
# TODO can we pass props into msbuild.exe?
# Our dependencies are in different directories from what CPython's
# build system expects. Modify the config file appropriately.
bzip2_version = DOWNLOADS["bzip2"]["version"]
sqlite_version = DOWNLOADS["sqlite"]["version"]
xz_version = DOWNLOADS["xz"]["version"]
zlib_version = DOWNLOADS[zlib_entry]["version"]
zstd_version = DOWNLOADS["zstd"]["version"]
mpdecimal_version = DOWNLOADS["mpdecimal"]["version"]
if meets_python_minimum_version(python_version, "3.14") or arch == "arm64":
tcltk_commit = DOWNLOADS["tk-windows-bin"]["git_commit"]
else:
tcltk_commit = DOWNLOADS["tk-windows-bin-8612"]["git_commit"]
sqlite_path = td / ("sqlite-autoconf-%s" % sqlite_version)
bzip2_path = td / ("bzip2-%s" % bzip2_version)
libffi_path = td / "libffi"
tcltk_path = td / ("cpython-bin-deps-%s" % tcltk_commit)
xz_path = td / ("xz-%s" % xz_version)
zlib_prefix = "cpython-source-deps-" if zlib_entry == "zlib-ng" else ""
zlib_path = td / ("%s%s-%s" % (zlib_prefix, zlib_entry, zlib_version))
zstd_path = td / ("cpython-source-deps-zstd-%s" % zstd_version)
mpdecimal_path = td / ("mpdecimal-%s" % mpdecimal_version)
openssl_root = td / "openssl" / arch
openssl_libs_path = openssl_root / "lib"
openssl_include_path = openssl_root / "include"
python_props_path = pcbuild_path / "python.props"
lines = []
with python_props_path.open("rb") as fh:
for line in fh:
line = line.rstrip()
# The syntax of these lines changed in 3.10+. 3.10 backport commit
# 3139ea33ed84190e079d6ff4859baccdad778dae. Once we drop support for
# Python 3.9 we can pass these via properties instead of editing the
# properties file.
if b"<bz2Dir" in line:
line = b"<bz2Dir>%s\\</bz2Dir>" % bzip2_path
elif b"<libffiOutDir" in line:
line = b"<libffiOutDir>%s\\</libffiOutDir>" % libffi_path
elif b"<lzmaDir" in line:
line = b"<lzmaDir>%s\\</lzmaDir>" % xz_path
elif b"<opensslIncludeDir" in line:
line = (
b"<opensslIncludeDir>%s</opensslIncludeDir>" % openssl_include_path
)
elif b"<opensslOutDir" in line:
line = b"<opensslOutDir>%s\\</opensslOutDir>" % openssl_libs_path
elif b"<sqlite3Dir" in line:
line = b"<sqlite3Dir>%s\\</sqlite3Dir>" % sqlite_path
elif b"<zlibDir" in line:
line = b"<zlibDir>%s\\</zlibDir>" % zlib_path
# On 3.14+, it's zlib-ng and the name changed
elif b"<zlibNgDir" in line:
line = b"<zlibNgDir>%s\\</zlibNgDir>" % zlib_path
elif b"<zstdDir" in line:
line = b"<zstdDir>%s\\</zstdDir>" % zstd_path
elif b"<mpdecimalDir" in line:
line = b"<mpdecimalDir>%s\\</mpdecimalDir>" % mpdecimal_path
lines.append(line)
with python_props_path.open("wb") as fh:
fh.write(b"\n".join(lines))
tcltkprops_path = pcbuild_path / "tcltk.props"
# Later versions of 3.10 and 3.11 enabled support for defining paths via properties.
# See CPython commit 3139ea33ed84190e079d6ff4859baccdad778dae.
# Once we drop support for CPython 3.9 we can replace this with passing properties.
try:
static_replace_in_file(
tcltkprops_path,
rb"""<tcltkDir Condition="$(tcltkDir) == ''">$(ExternalsDir)tcltk-$(TclVersion)\$(ArchName)\</tcltkDir>""",
rb"<tcltkDir>%s\$(ArchName)\</tcltkDir>" % tcltk_path,
)
except NoSearchStringError:
static_replace_in_file(
tcltkprops_path,
rb"<tcltkDir>$(ExternalsDir)tcltk-$(TclMajorVersion).$(TclMinorVersion).$(TclPatchLevel).$(TclRevision)\$(ArchName)\</tcltkDir>",
rb"<tcltkDir>%s\$(ArchName)\</tcltkDir>" % tcltk_path,
)
# We want to statically link against OpenSSL. This requires using our own
# OpenSSL build. This requires some hacking of various files.
openssl_props = pcbuild_path / "openssl.props"
if arch == "amd64":
suffix = b"-x64"
elif arch == "win32":
suffix = b""
elif arch == "arm64":
suffix = b""
else:
raise Exception("unhandled architecture: %s" % arch)
try:
# CPython 3.11+ builds with OpenSSL 3.x by default.
static_replace_in_file(
openssl_props,
b"<_DLLSuffix>-3</_DLLSuffix>",
b"<_DLLSuffix>-3%s</_DLLSuffix>" % suffix,
)
except NoSearchStringError:
static_replace_in_file(
openssl_props,
b"<_DLLSuffix>-1_1</_DLLSuffix>",
b"<_DLLSuffix>-1_1%s</_DLLSuffix>" % suffix,
)
libffi_props = pcbuild_path / "libffi.props"
# Always use libffi-8 / 3.4.2. (Python < 3.11 use libffi-7 by default.)
try:
static_replace_in_file(
libffi_props,
rb"""<_LIBFFIDLL Include="$(libffiOutDir)\libffi-7.dll" />""",
rb"""<_LIBFFIDLL Include="$(libffiOutDir)\libffi-8.dll" />""",
)
static_replace_in_file(
libffi_props,
rb"<AdditionalDependencies>libffi-7.lib;%(AdditionalDependencies)</AdditionalDependencies>",
rb"<AdditionalDependencies>libffi-8.lib;%(AdditionalDependencies)</AdditionalDependencies>",
)
except NoSearchStringError:
pass
def hack_project_files(
td: pathlib.Path,
cpython_source_path: pathlib.Path,
build_directory: str,
python_version: str,
zlib_entry: str,
arch: str,
):
"""Hacks Visual Studio project files to work with our build."""
pcbuild_path = cpython_source_path / "PCbuild"
hack_props(
td,
pcbuild_path,
build_directory,
python_version,
zlib_entry,
)
# `--include-tcltk` is forced off on arm64, undo that
# See https://github.com/python/cpython/pull/132650
try:
static_replace_in_file(
cpython_source_path / "PC" / "layout" / "main.py",
rb'if ns.arch in ("arm32", "arm64"):',
rb'if ns.arch == "arm32":',
)
except NoSearchStringError:
pass
# Our SQLite directory is named weirdly. This throws off version detection
# in the project file. Replace the parsing logic with a static string.
sqlite3_version = DOWNLOADS["sqlite"]["actual_version"].encode("ascii")
sqlite3_version_parts = sqlite3_version.split(b".")
sqlite3_path = pcbuild_path / "sqlite3.vcxproj"
static_replace_in_file(
sqlite3_path,
rb"<_SqliteVersion>$([System.Text.RegularExpressions.Regex]::Match(`$(sqlite3Dir)`, `((\d+)\.(\d+)\.(\d+)\.(\d+))\\?$`).Groups)</_SqliteVersion>",
rb"<_SqliteVersion>%s</_SqliteVersion>" % sqlite3_version,
)
static_replace_in_file(
sqlite3_path,
rb"<SqliteVersion>$(_SqliteVersion.Split(`;`)[1])</SqliteVersion>",
rb"<SqliteVersion>%s</SqliteVersion>" % sqlite3_version,
)
static_replace_in_file(
sqlite3_path,
rb"<SqliteMajorVersion>$(_SqliteVersion.Split(`;`)[2])</SqliteMajorVersion>",
rb"<SqliteMajorVersion>%s</SqliteMajorVersion>" % sqlite3_version_parts[0],
)
static_replace_in_file(
sqlite3_path,
rb"<SqliteMinorVersion>$(_SqliteVersion.Split(`;`)[3])</SqliteMinorVersion>",
rb"<SqliteMinorVersion>%s</SqliteMinorVersion>" % sqlite3_version_parts[1],
)
static_replace_in_file(
sqlite3_path,
rb"<SqliteMicroVersion>$(_SqliteVersion.Split(`;`)[4])</SqliteMicroVersion>",
rb"<SqliteMicroVersion>%s</SqliteMicroVersion>" % sqlite3_version_parts[2],
)
static_replace_in_file(
sqlite3_path,
rb"<SqlitePatchVersion>$(_SqliteVersion.Split(`;`)[5])</SqlitePatchVersion>",
rb"<SqlitePatchVersion>%s</SqlitePatchVersion>" % sqlite3_version_parts[3],
)
# Please try to keep these in sync with cpython-unix/build-sqlite.sh
sqlite_build_flags = {
b"SQLITE_ENABLE_DBSTAT_VTAB",
b"SQLITE_ENABLE_FTS3",
b"SQLITE_ENABLE_FTS3_PARENTHESIS",
b"SQLITE_ENABLE_FTS4",
b"SQLITE_ENABLE_FTS5",
b"SQLITE_ENABLE_GEOPOLY",
b"SQLITE_ENABLE_RTREE",
}
with sqlite3_path.open("rb") as fh:
data = fh.read()
sqlite_preprocessor_regex = (
rb"<PreprocessorDefinitions>(SQLITE_ENABLE.*)</PreprocessorDefinitions>"
)
m = re.search(sqlite_preprocessor_regex, data)
if m is None:
raise NoSearchStringError(
"search string (%s) not in %s" % (sqlite_preprocessor_regex, sqlite3_path)
)
current_flags = set(m.group(1).split(b";"))
data = (
data[: m.start(1)]
+ b";".join(sqlite_build_flags - current_flags)
+ b";"
+ data[m.start(1) :]
)
with sqlite3_path.open("wb") as fh:
fh.write(data)
# Our version of the xz sources may be newer than what's in cpython-source-deps.
# The source files and locations may have changed. Hack the project file
# accordingly.
#
# CPython updates xz occasionally. When these changes make it into a release
# these modification to the project file are not needed.
# The most recent change was an update to version 5.8.1:
# https://github.com/python/cpython/pull/141022
try:
liblzma_path = pcbuild_path / "liblzma.vcxproj"
static_replace_in_file(
liblzma_path,
rb"$(lzmaDir)windows/vs2019;$(lzmaDir)src/liblzma/common;",
rb"$(lzmaDir)windows;$(lzmaDir)src/liblzma/common;",
)
static_replace_in_file(
liblzma_path,
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc32_fast.c" />\r\n <ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc32_table.c" />\r\n',
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc32_fast.c" />\r\n ',
)
static_replace_in_file(
liblzma_path,
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc64_fast.c" />\r\n <ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc64_table.c" />\r\n',
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\check\\crc64_fast.c" />\r\n ',
)
static_replace_in_file(
liblzma_path,
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\simple\\arm.c" />',
b'<ClCompile Include="$(lzmaDir)src\\liblzma\\simple\\arm.c" />\r\n <ClCompile Include="$(lzmaDir)src\\liblzma\\simple\\arm64.c" />',
)
static_replace_in_file(
liblzma_path,
rb'<ClInclude Include="$(lzmaDir)windows\vs2019\config.h" />',
rb'<ClInclude Include="$(lzmaDir)windows\config.h" />',
)
except NoSearchStringError:
pass
# Our logic for rewriting extension projects gets confused by _sqlite.vcxproj not
# having a `<PreprocessorDefinitions>` line in 3.10+. So adjust that.
try:
static_replace_in_file(
pcbuild_path / "_sqlite3.vcxproj",
rb"<AdditionalIncludeDirectories>$(sqlite3Dir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>",
b"<AdditionalIncludeDirectories>$(sqlite3Dir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r\n <PreprocessorDefinitions>%(PreprocessorDefinitions)</PreprocessorDefinitions>",
)
except NoSearchStringError:
pass
# Our custom OpenSSL build has applink.c in a different location from the
# binary OpenSSL distribution. This is no longer relevant for 3.12+ per
# https://github.com/python/cpython/pull/131839, so we allow it to fail.swe
try:
ssl_proj = pcbuild_path / "_ssl.vcxproj"
static_replace_in_file(
ssl_proj,
rb'<ClCompile Include="$(opensslIncludeDir)\applink.c">',
rb'<ClCompile Include="$(opensslIncludeDir)\openssl\applink.c">',
)
except NoSearchStringError:
pass
# Python 3.12+ uses the the pre-built tk-windows-bin 8.6.12 which doesn't
# have a standalone zlib DLL, so we remove references to it. For Python
# 3.14+, we're using tk-windows-bin 8.6.14 which includes a prebuilt zlib
# DLL, so we skip this patch there.
# On arm64, we use the new version of tk-windows-bin for all versions.
if meets_python_minimum_version(python_version, "3.12") and (
meets_python_maximum_version(python_version, "3.13") or arch == "arm64"
):
try:
static_replace_in_file(
pcbuild_path / "_tkinter.vcxproj",
rb'<_TclTkDLL Include="$(tcltkdir)\bin\$(tclZlibDllName)" />',
rb"",
)
except NoSearchStringError:
pass
# We don't need to produce python_uwp.exe and its *w variant. Or the
# python3.dll, pyshellext, or pylauncher.
# Cut them from the build to save time and so their presence doesn't
# interfere with packaging up the build artifacts.
pcbuild_proj = pcbuild_path / "pcbuild.proj"
static_replace_in_file(
pcbuild_proj,
b'<Projects2 Include="python_uwp.vcxproj;pythonw_uwp.vcxproj" Condition="$(IncludeUwp)" />',
b"",
)
static_replace_in_file(
pcbuild_proj,
b'<Projects Include="pylauncher.vcxproj;pywlauncher.vcxproj" />',
b"",
)
static_replace_in_file(
pcbuild_proj, b'<Projects Include="pyshellext.vcxproj" />', b""
)
# Ditto for freeze_importlib, which isn't needed since we don't modify
# the frozen importlib baked into the source distribution (
# Python/importlib.h and Python/importlib_external.h).
#
# But Python 3.11 refactored the frozen module project handling and if
# we attempt to disable this project there we get a build failure due to
# a missing /Python/frozen_modules/getpath.h file. So we skip this on
# newer Python.
try:
static_replace_in_file(
pcbuild_proj,
b"""<Projects2 Condition="$(Platform) != 'ARM' and $(Platform) != 'ARM64'" Include="_freeze_importlib.vcxproj" />""",
b"",
)
except NoSearchStringError:
pass
def run_msbuild(
msbuild: pathlib.Path,
pcbuild_path: pathlib.Path,
configuration: str,
platform: str,
python_version: str,
windows_sdk_version: str,
freethreaded: bool,
):
args = [
str(msbuild),
str(pcbuild_path / "pcbuild.proj"),
"/target:Build",
"/property:Configuration=%s" % configuration,
"/property:Platform=%s" % platform,
"/maxcpucount",
"/nologo",
"/verbosity:normal",
"/property:IncludeExternals=true",
"/property:IncludeSSL=true",
"/property:IncludeTkinter=true",
"/property:IncludeTests=true",
"/property:OverrideVersion=%s" % python_version,
"/property:IncludeCTypes=true",
# We pin the Windows 10 SDK version to make builds more deterministic.
# This can also work around known incompatibilities with the Windows 11
# SDK as of at least CPython 3.9.7.
f"/property:DefaultWindowsSDKVersion={windows_sdk_version}",
]
if freethreaded:
args.append("/property:DisableGil=true")
exec_and_log(args, str(pcbuild_path), os.environ)
def build_openssl_for_arch(
perl_path,
arch: str,
openssl_archive,
openssl_version: str,
nasm_archive,
build_root: pathlib.Path,
*,
jom_archive,
):
nasm_version = DOWNLOADS["nasm-windows-bin"]["version"]
log("extracting %s to %s" % (openssl_archive, build_root))
extract_tar_to_directory(openssl_archive, build_root)
log("extracting %s to %s" % (nasm_archive, build_root))
extract_zip_to_directory(nasm_archive, build_root)
log("extracting %s to %s" % (jom_archive, build_root))
extract_zip_to_directory(jom_archive, build_root / "jom")
nasm_path = build_root / ("nasm-%s" % nasm_version)
jom_path = build_root / "jom"
env = dict(os.environ)
# Add Perl and nasm paths to front of PATH.
env["PATH"] = "%s;%s;%s;%s" % (perl_path.parent, nasm_path, jom_path, env["PATH"])
source_root = build_root / ("openssl-%s" % openssl_version)
# uplink.c tries to find the OPENSSL_Applink function exported from the current
# executable. However, it is exported from _ssl[_d].pyd in shared builds. So
# update its sounce to look for it from there.
static_replace_in_file(
source_root / "ms" / "uplink.c",
b"((h = GetModuleHandle(NULL)) == NULL)",
b'((h = GetModuleHandleA("_ssl.pyd")) == NULL) if ((h = GetModuleHandleA("_ssl_d.pyd")) == NULL) if ((h = GetModuleHandle(NULL)) == NULL)',
)
if arch == "x86":
configure = "VC-WIN32"
prefix = "32"
elif arch == "amd64":
configure = "VC-WIN64A"
prefix = "64"
elif arch == "arm64":
configure = "VC-WIN64-ARM"
prefix = "arm64"
else:
raise Exception("unhandled architecture: %s" % arch)
# The official CPython OpenSSL builds hack ms/uplink.c to change the
# ``GetModuleHandle(NULL)`` invocation to load things from _ssl.pyd
# instead. But since we statically link the _ssl extension, this hackery
# is not required.
# Set DESTDIR to affect install location.
dest_dir = build_root / "install"
env["DESTDIR"] = str(dest_dir)
install_root = dest_dir / prefix
exec_and_log(
[
str(perl_path),
"Configure",
configure,
"no-idea",
"no-mdc2",
"no-tests",
"--prefix=/%s" % prefix,
],
source_root,
{
**env,
"CFLAGS": env.get("CFLAGS", "") + " /FS",
},
)
# exec_and_log(["nmake"], source_root, env)
exec_and_log(
[str(jom_path / "jom"), "/J", str(multiprocessing.cpu_count())],
source_root,
env,
)
# We don't care about accessory files, docs, etc. So just run `install_sw`
# target to get the main files.
exec_and_log(["nmake", "install_sw"], source_root, env)
# Copy the _static libraries as well.
for l in ("crypto", "ssl"):
basename = "lib%s_static.lib" % l
source = source_root / basename
dest = install_root / "lib" / basename
log("copying %s to %s" % (source, dest))
shutil.copyfile(source, dest)
# Copy `applink.c` to the include directory.
source_applink = source_root / "ms" / "applink.c"
dest_applink = install_root / "include" / "openssl" / "applink.c"
log("copying %s to %s" % (source_applink, dest_applink))
shutil.copyfile(source_applink, dest_applink)
def build_openssl(
entry: str,
perl_path: pathlib.Path,
arch: str,
dest_archive: pathlib.Path,
):
"""Build OpenSSL from sources using the Perl executable specified."""
openssl_version = DOWNLOADS[entry]["version"]
# First ensure the dependencies are in place.
openssl_archive = download_entry(entry, BUILD)
nasm_archive = download_entry("nasm-windows-bin", BUILD)
jom_archive = download_entry("jom-windows-bin", BUILD)
with tempfile.TemporaryDirectory(prefix="openssl-build-") as td:
td = pathlib.Path(td)
root_32 = td / "x86"
root_64 = td / "x64"
root_arm64 = td / "arm64"
if arch == "x86":
root_32.mkdir()
build_openssl_for_arch(
perl_path,
"x86",
openssl_archive,
openssl_version,
nasm_archive,
root_32,
jom_archive=jom_archive,
)
elif arch == "amd64":
root_64.mkdir()
build_openssl_for_arch(
perl_path,
"amd64",
openssl_archive,
openssl_version,
nasm_archive,
root_64,
jom_archive=jom_archive,
)
elif arch == "arm64":
root_arm64.mkdir()
build_openssl_for_arch(
perl_path,
"arm64",
openssl_archive,
openssl_version,
nasm_archive,
root_arm64,
jom_archive=jom_archive,
)
else:
raise Exception("unhandled architecture: %s" % arch)
install = td / "out"
if arch == "x86":
shutil.copytree(root_32 / "install" / "32", install / "openssl" / "win32")
elif arch == "arm64":
shutil.copytree(
root_arm64 / "install" / "arm64", install / "openssl" / "arm64"
)
else:
shutil.copytree(root_64 / "install" / "64", install / "openssl" / "amd64")
with dest_archive.open("wb") as fh:
create_tar_from_directory(fh, install)
def build_libffi(
python: str,
arch: str,
sh_exe: pathlib.Path,
msvc_version: str,
dest_archive: pathlib.Path,
):
with tempfile.TemporaryDirectory(prefix="libffi-build-") as td:
td = pathlib.Path(td)
ffi_source_path = td / "libffi"
# As of April 15, 2020, the libffi source release on GitHub doesn't
# have patches that we need to build. https://bugs.python.org/issue40293
# tracks getting a proper release. Until then, git clone the repo.
subprocess.run(
[
"git.exe",
"-c",
"core.autocrlf=input",
"clone",
"--single-branch",
"--branch",
"libffi",
"https://github.com/python/cpython-source-deps.git",
str(ffi_source_path),
],
check=True,
)
subprocess.run(
[
"git.exe",
"-c",
"core.autocrlf=input",
"checkout",
"16fad4855b3d8c03b5910e405ff3a04395b39a98",
],
cwd=ffi_source_path,
check=True,
)
# We build libffi by running the build script that CPython ships.
python_archive = download_entry(python, BUILD)
extract_tar_to_directory(python_archive, td)
python_entry = DOWNLOADS[python]
prepare_libffi = (
td
/ ("Python-%s" % python_entry["version"])
/ "PCbuild"
/ "prepare_libffi.bat"
)
env = dict(os.environ)
env["LIBFFI_SOURCE"] = str(ffi_source_path)
env["VCVARSALL"] = str(find_vcvarsall_path(msvc_version))
env["SH"] = str(sh_exe)
args = [str(prepare_libffi), "-pdb"]
if arch == "x86":
args.append("-x86")
artifacts_path = ffi_source_path / "i686-pc-cygwin"
elif arch == "arm64":
args.append("-arm64")
artifacts_path = ffi_source_path / "aarch64-w64-cygwin"
elif arch == "amd64":
args.append("-x64")
artifacts_path = ffi_source_path / "x86_64-w64-cygwin"
else:
raise Exception("unhandled architecture: %s" % arch)
subprocess.run(args, env=env, check=True)
out_dir = td / "out" / "libffi"
out_dir.mkdir(parents=True)
for f in os.listdir(artifacts_path / ".libs"):
if f.endswith((".lib", ".exp", ".dll", ".pdb")):
shutil.copyfile(artifacts_path / ".libs" / f, out_dir / f)
shutil.copytree(artifacts_path / "include", out_dir / "include")
shutil.copyfile(
artifacts_path / "fficonfig.h", out_dir / "include" / "fficonfig.h"
)
with dest_archive.open("wb") as fh:
create_tar_from_directory(fh, td / "out")
RE_ADDITIONAL_DEPENDENCIES = re.compile(
"<AdditionalDependencies>([^<]+)</AdditionalDependencies>"
)
def collect_python_build_artifacts(
pcbuild_path: pathlib.Path,
out_dir: pathlib.Path,
python_majmin: str,
arch: str,
config: str,
openssl_entry: str,
zlib_entry: str,
freethreaded: bool,
):
"""Collect build artifacts from Python.
Copies them into an output directory and returns a data structure describing
the files.
"""
outputs_path = pcbuild_path / arch
intermediates_path = (
pcbuild_path / "obj" / ("%s%s_%s" % (python_majmin, arch, config))
)
if not outputs_path.exists():
log("%s does not exist" % outputs_path)
sys.exit(1)
if not intermediates_path.exists():
log("%s does not exist" % intermediates_path)
sys.exit(1)
# Things we want to collect:
# 1. object files that contribute to libpython
# 2. libraries for dependencies
# The build throws everything in the same directory hierarchy, so we can't
# easily filter by path to identify e.g. core versus extensions. We rely on
# tagging projects instead. We validate that all directories are known to
# us.
# Projects that aren't relevant to us.
ignore_projects = {
# We don't care about build artifacts for the python executable.
"python",
"pythonw",
# Used to bootstrap interpreter.
"_freeze_module",
# Don't care about venvlauncher executable.
"venvlauncher",
"venvwlauncher",
# Test extensions.
"_ctypes_test",
"_testbuffer",
"_testcapi",
"_testclinic_limited",
"_testclinic",
"_testconsole",
"_testembed",
"_testimportmultiple",
"_testinternalcapi",
"_testlimitedcapi",
"_testmultiphase",
"_testsinglephase",
"xxlimited_35",
"xxlimited",
}
other_projects = {"pythoncore"}
other_projects.add("python3dll")
# Projects providing dependencies.
depends_projects = set()
# Projects that provide extensions.
extension_projects = set()
dirs = {p for p in os.listdir(intermediates_path)}
for extension, entry in CONVERT_TO_BUILTIN_EXTENSIONS.items():
if extension not in dirs:
if entry.get("ignore_missing"):
continue
else:
log("extension not present: %s" % extension)
sys.exit(1)
extension_projects.add(extension)
depends_projects |= {
"liblzma",
"sqlite3",
}
if zlib_entry == "zlib-ng":
depends_projects |= {"zlib-ng"}
known_projects = (
ignore_projects | other_projects | depends_projects | extension_projects
)
unknown = dirs - known_projects
if unknown:
log(
"encountered build directory for unknown projects: %s"
% ", ".join(sorted(unknown))
)
sys.exit(1)
res = {"core": {"objs": []}, "extensions": {}}
res["object_file_format"] = "coff"
def process_project(project: pathlib.Path, dest_dir: pathlib.Path):
for f in sorted(os.listdir(intermediates_path / project)):
p = intermediates_path / project / f
dest = dest_dir / p.name
if p.suffix == ".obj":
log("copying object file %s to %s" % (p, dest_dir))
shutil.copyfile(p, dest)
yield f
def find_additional_dependencies(project: pathlib.Path):
vcproj = pcbuild_path / ("%s.vcxproj" % project)
with vcproj.open("r", encoding="utf8") as fh:
for line in fh:
m = RE_ADDITIONAL_DEPENDENCIES.search(line)
if not m:
continue
depends = set(m.group(1).split(";"))
depends.discard("%(AdditionalDependencies)")
return depends
return set()
if arch == "amd64":
abi_platform = "win_amd64"
elif arch == "win32":
abi_platform = "win32"
elif arch == "arm64":
abi_platform = "win_arm64"
else:
raise Exception("unhandled architecture: %s" % arch)
if freethreaded:
abi_tag = ".cp%st-%s" % (python_majmin, abi_platform)
lib_suffix = "t"
else:
abi_tag = ""
lib_suffix = ""
# Copy object files for core sources into their own directory.
core_dir = out_dir / "build" / "core"
core_dir.mkdir(parents=True)
for obj in process_project("pythoncore", core_dir):
res["core"]["objs"].append("build/core/%s" % obj)
# Copy config.c into output directory, next to its object file.
shutil.copyfile(
pcbuild_path / ".." / "PC" / "config.c", out_dir / "build" / "core" / "config.c"
)
assert "build/core/config.obj" in res["core"]["objs"]
res["inittab_object"] = "build/core/config.obj"
res["inittab_source"] = "build/core/config.c"
res["inittab_cflags"] = ["-DNDEBUG", "-DPy_BUILD_CORE"]
exts = ("lib", "exp")
for ext in exts:
source = outputs_path / ("python%s%s.%s" % (python_majmin, lib_suffix, ext))
dest = core_dir / ("python%s%s.%s" % (python_majmin, lib_suffix, ext))
log("copying %s" % source)
shutil.copyfile(source, dest)
res["core"]["shared_lib"] = "install/python%s%s.dll" % (python_majmin, lib_suffix)
# We hack up pythoncore.vcxproj and the list in it when this function
# runs isn't totally accurate. We hardcode the list from the CPython
# distribution.
# TODO pull from unaltered file
res["core"]["links"] = [
{"name": "version", "system": True},
{"name": "ws2_32", "system": True},
# In addition to the ones explicitly in the project, there are some
# implicit link libraries not present. We list those as well.
{"name": "Ole32", "system": True},
{"name": "OleAut32", "system": True},
{"name": "User32", "system": True},
# Presence of pathcch drops support for Windows 7.
{"name": "pathcch", "system": True},
]
# Copy files for extensions into their own directories.
for ext in sorted(extension_projects):
dest_dir = out_dir / "build" / "extensions" / ext
dest_dir.mkdir(parents=True)
additional_depends = find_additional_dependencies(ext)
additional_depends -= CONVERT_TO_BUILTIN_EXTENSIONS.get(ext, {}).get(
"ignore_additional_depends", set()
)
entry = {
"in_core": False,
"objs": [],
"init_fn": "PyInit_%s" % ext,
"shared_lib": None,
"static_lib": None,
"links": [
{"name": n[:-4], "system": True} for n in sorted(additional_depends)
],
"variant": "default",
}
for obj in process_project(ext, dest_dir):
entry["objs"].append("build/extensions/%s/%s" % (ext, obj))
for lib in CONVERT_TO_BUILTIN_EXTENSIONS.get(ext, {}).get("shared_depends", []):
entry["links"].append(
{"name": lib, "path_dynamic": "install/DLLs/%s.dll" % lib}
)
for lib in CONVERT_TO_BUILTIN_EXTENSIONS.get(ext, {}).get(
"shared_depends_%s" % arch, []
):
entry["links"].append(
{"name": lib, "path_dynamic": "install/DLLs/%s.dll" % lib}
)
if ext in EXTENSION_TO_LIBRARY_DOWNLOADS_ENTRY:
licenses = set()
license_paths = set()
license_public_domain = False
for name in EXTENSION_TO_LIBRARY_DOWNLOADS_ENTRY[ext]:
if name == "openssl":
name = openssl_entry
if name == "zlib":
name = zlib_entry
# On 3.14+ and aarch64, we use the latest tcl/tk version
if ext == "_tkinter" and (python_majmin == "314" or arch == "arm64"):
name = name.replace("-8612", "")
download_entry = DOWNLOADS[name]
# This will raise if no license metadata defined. This is
# intentional because EXTENSION_TO_LIBRARY_DOWNLOADS_ENTRY is
# manually curated and we want to fail fast.
licenses |= set(download_entry["licenses"])
license_paths.add("licenses/%s" % download_entry["license_file"])
license_public_domain = download_entry.get("license_public_domain")
entry["licenses"] = list(sorted(licenses))
entry["license_paths"] = list(sorted(license_paths))
entry["license_public_domain"] = license_public_domain
res["extensions"][ext] = [entry]
# Copy the extension static library.
ext_static = outputs_path / ("%s%s.lib" % (ext, abi_tag))
dest = dest_dir / ("%s%s.lib" % (ext, abi_tag))
log("copying static extension %s" % ext_static)
shutil.copyfile(ext_static, dest)
res["extensions"][ext][0]["shared_lib"] = "install/DLLs/%s%s.pyd" % (
ext,
abi_tag,
)
lib_dir = out_dir / "build" / "lib"
lib_dir.mkdir()
# Copy libraries for dependencies into the lib directory.
for depend in sorted(depends_projects):
static_source = outputs_path / ("%s.lib" % depend)
static_dest = lib_dir / ("%s.lib" % depend)
log("copying link library %s" % static_source)
shutil.copyfile(static_source, static_dest)
shared_source = outputs_path / ("%s.dll" % depend)
if shared_source.exists():
shared_dest = lib_dir / ("%s.dll" % depend)
log("copying shared library %s" % shared_source)
shutil.copyfile(shared_source, shared_dest)
return res
def build_cpython(
python_entry_name: str,
target_triple: str,
arch: str,
build_options: str,
msvc_version: str,
windows_sdk_version: str,
openssl_archive,
libffi_archive,
openssl_entry: str,
) -> pathlib.Path:
parsed_build_options = set(build_options.split("+"))
pgo = "pgo" in parsed_build_options
freethreaded = "freethreaded" in parsed_build_options
msbuild = find_msbuild(msvc_version)
log("found MSBuild at %s" % msbuild)
# The python.props file keys off MSBUILD, so it needs to be set.
os.environ["MSBUILD"] = str(msbuild)
python_archive = download_entry(python_entry_name, BUILD)
entry = DOWNLOADS[python_entry_name]
python_version = entry["version"]
zlib_entry = (
"zlib-ng" if meets_python_minimum_version(python_version, "3.14") else "zlib"
)
bzip2_archive = download_entry("bzip2", BUILD)
sqlite_archive = download_entry("sqlite", BUILD)
xz_archive = download_entry("xz", BUILD)
zlib_archive = download_entry(zlib_entry, BUILD)
setuptools_wheel = download_entry("setuptools", BUILD)
pip_wheel = download_entry("pip", BUILD)
# On CPython 3.14+, we use the latest tcl/tk version which has additional
# runtime dependencies, so we are conservative and use the old version
# elsewhere. The old version isn't built for arm64, so we use the new
# version there too
tk_bin_entry = (
"tk-windows-bin"
if meets_python_minimum_version(python_version, "3.14") or arch == "arm64"
else "tk-windows-bin-8612"
)
tk_bin_archive = download_entry(
tk_bin_entry, BUILD, local_name="tk-windows-bin.tar.gz"
)
# On CPython 3.14+, zstd is included
if meets_python_minimum_version(python_version, "3.14"):
zstd_archive = download_entry("zstd", BUILD)
else:
zstd_archive = None
# CPython 3.13+ no longer uses a bundled `mpdecimal` version so we build it
if meets_python_minimum_version(python_version, "3.13"):
mpdecimal_archive = download_entry("mpdecimal", BUILD)
else:
# TODO: Consider using the built mpdecimal for earlier versions as well,
# as we do for Unix builds.
mpdecimal_archive = None
if freethreaded:
(major, minor, _) = python_version.split(".")
python_exe = f"python{major}.{minor}t.exe"
pythonw_exe = f"pythonw{major}.{minor}t.exe"
else:
python_exe = "python.exe"
pythonw_exe = "pythonw.exe"
if arch == "amd64":
build_platform = "x64"
build_directory = "amd64"
elif arch == "x86":
build_platform = "win32"
build_directory = "win32"
elif arch == "arm64":
build_platform = "arm64"
build_directory = "arm64"
else:
raise Exception("unhandled architecture: %s" % arch)
tempdir_opts = (
{"ignore_cleanup_errors": True} if sys.version_info >= (3, 12) else {}
)
with tempfile.TemporaryDirectory(prefix="python-build-", **tempdir_opts) as td:
td = pathlib.Path(td)
with concurrent.futures.ThreadPoolExecutor(10) as e:
fs = []
for a in (
python_archive,
bzip2_archive,
mpdecimal_archive,
openssl_archive,
sqlite_archive,
tk_bin_archive,
xz_archive,
zlib_archive,
zstd_archive,
):
if a is None:
continue
log("extracting %s to %s" % (a, td))
fs.append(e.submit(extract_tar_to_directory, a, td))
for f in fs:
f.result()
# Copy the config.h file used by upstream CPython for xz 5.8.1
# https://github.com/python/cpython-source-deps/blob/665d407bd6bc941944db2152e4b5dca388ea586e/windows/config.h
xz_version = DOWNLOADS["xz"]["version"]
xz_path = td / ("xz-%s" % xz_version)
config_src = SUPPORT / "xz-support" / "config.h"
config_dest = xz_path / "windows" / "config.h"
log(f"copying {config_src} to {config_dest}")
shutil.copyfile(config_src, config_dest)
extract_tar_to_directory(libffi_archive, td)
# We need all the OpenSSL library files in the same directory to appease
# install rules.
openssl_arch = {"amd64": "amd64", "x86": "win32", "arm64": "arm64"}[arch]
openssl_root = td / "openssl" / openssl_arch
openssl_bin_path = openssl_root / "bin"
openssl_lib_path = openssl_root / "lib"
for f in sorted(os.listdir(openssl_bin_path)):
if not f.startswith("lib"):
continue
source = openssl_bin_path / f
dest = openssl_lib_path / f
log("copying %s to %s" % (source, dest))
shutil.copyfile(source, dest)
# Delete the tk nmake helper, it's not needed and links msvc
if tk_bin_entry == "tk-windows-bin":
tcltk_commit: str = DOWNLOADS[tk_bin_entry]["git_commit"]
tcltk_path = td / ("cpython-bin-deps-%s" % tcltk_commit)
(
tcltk_path
/ build_directory
/ "lib"
/ "nmake"
/ "x86_64-w64-mingw32-nmakehlp.exe"
).unlink()
cpython_source_path = td / ("Python-%s" % python_version)
pcbuild_path = cpython_source_path / "PCbuild"
out_dir = td / "out"
build_dir = out_dir / "python" / "build"
build_dir.mkdir(parents=True)
# Parse config.c before we hack it up: we want a pristine copy.
config_c_path = cpython_source_path / "PC" / "config.c"
with config_c_path.open("r", encoding="utf8") as fh:
config_c = fh.read()
builtin_extensions = parse_config_c(config_c)
hack_project_files(
td,
cpython_source_path,
build_directory,
python_version=python_version,
zlib_entry=zlib_entry,
arch=arch,
)
if pgo:
run_msbuild(
msbuild,
pcbuild_path,
configuration="PGInstrument",
platform=build_platform,
python_version=python_version,
windows_sdk_version=windows_sdk_version,
freethreaded=freethreaded,
)
# build-windows.py sets some environment variables which cause the
# test harness to pick up the wrong `test` module. We unset these
# so things work as expected.
env = dict(os.environ)
paths = [
p for p in env["PATH"].split(";") if p != str(BUILD / "venv" / "bin")
]
env["PATH"] = ";".join(paths)
del env["PYTHONPATH"]
env["PYTHONHOME"] = str(cpython_source_path)
# For some reason, .pgc files aren't being created if we invoke the
# test harness normally (all tests) or with -j to perform parallel
# test execution. We work around this by invoking the test harness
# separately for each test.
instrumented_python = (
pcbuild_path / build_directory / "instrumented" / python_exe
)
tests = subprocess.run(
[str(instrumented_python), "-m", "test", "--list-tests"],
cwd=cpython_source_path,
env=env,
check=False,
stdout=subprocess.PIPE,
).stdout
tests = [l.strip() for l in tests.decode("utf-8").splitlines() if l.strip()]
for test in sorted(tests):
# Only look at specific tests, to keep runtime down.
if test not in PGO_TESTS:
continue
# test_regrtest hangs for some reason. It is the test for the
# test harness itself and isn't exercising useful code. Skip it.
if test == "test_regrtest":
continue
exec_and_log(
[
str(instrumented_python),
"-m",
"test",
# --pgo simply disables some tests, quiets output, and ignores the
# exit code. We could disable it if we wanted more verbose test
# output...
"--pgo",
test,
],
str(pcbuild_path),
env,
exit_on_error=False,
)
run_msbuild(
msbuild,
pcbuild_path,
configuration="PGUpdate",
platform=build_platform,
python_version=python_version,
windows_sdk_version=windows_sdk_version,
freethreaded=freethreaded,
)
artifact_config = "PGUpdate"
else:
run_msbuild(
msbuild,
pcbuild_path,
configuration="Release",
platform=build_platform,
python_version=python_version,
windows_sdk_version=windows_sdk_version,
freethreaded=freethreaded,
)
artifact_config = "Release"
install_dir = out_dir / "python" / "install"
# The PC/layout directory contains a script for copying files into
# a release-like directory. Use that for assembling the standalone
# build.
# It doesn't clean up the temp directory it creates. So pass one to it
# under our tempdir.
layout_tmp = td / "layouttmp"
layout_tmp.mkdir()
args = [
str(cpython_source_path / "python.bat"),
str(cpython_source_path / "PC" / "layout"),
"-vv",
"--source",
str(cpython_source_path),
"--build",
str(pcbuild_path / build_directory),
"--copy",
str(install_dir),
"--temp",
str(layout_tmp),
"--include-dev",
"--include-symbols",
"--include-tests",
"--include-venv",
]
if freethreaded:
args.append("--include-freethreaded")
# CPython 3.12 removed distutils.
if not meets_python_minimum_version(python_version, "3.12"):
args.append("--include-distutils")
args.extend(["--include-idle", "--include-stable", "--include-tcltk"])
exec_and_log(
args,
pcbuild_path,
os.environ,
)
# We install pip by using pip to install itself. This leverages a feature
# where Python can automatically recognize wheel/zip files on sys.path and
# import their contents. According to
# https://github.com/pypa/pip/issues/11146 running pip from a wheel is not
# supported. But it has historically worked and is simple. So do this until
# it stops working and we need to switch to running pip from the filesystem.
pip_env = dict(os.environ)
pip_env["PYTHONPATH"] = str(pip_wheel)
# Install pip and setuptools.
exec_and_log(
[
str(install_dir / python_exe),
"-m",
"pip",
"install",
"--no-cache-dir",
"--no-index",
str(pip_wheel),
],
td,
pip_env,
)
# Setuptools is only installed for Python 3.11 and older, for parity with
# `ensurepip` and `venv`: https://github.com/python/cpython/pull/101039
if meets_python_maximum_version(python_version, "3.11"):
exec_and_log(
[
str(install_dir / python_exe),
"-m",
"pip",
"install",
"--no-cache-dir",
"--no-index",
str(setuptools_wheel),
],
td,
pip_env,
)
# The executables in the Scripts/ directory don't work because they reference
# python.dll in the wrong path. You can run these via e.g. `python.exe -m pip`.
# So just delete them for now.
for filename in sorted(os.listdir(install_dir / "Scripts")):
assert filename.startswith("pip") and filename.endswith(".exe")
p = install_dir / "Scripts" / filename
log("removing non-functional executable: %s" % p)
os.unlink(p)
# But this leaves the Scripts directory empty, which we don't want. So
# create a placeholder file to ensure the directory is created on archive
# extract.
with (install_dir / "Scripts" / ".empty").open("ab"):
pass
# Now copy the build artifacts into the output directory.
build_info = collect_python_build_artifacts(
pcbuild_path,
out_dir / "python",
"".join(entry["version"].split(".")[0:2]),
build_directory,
artifact_config,
openssl_entry=openssl_entry,
zlib_entry=zlib_entry,
freethreaded=freethreaded,
)
for ext, init_fn in sorted(builtin_extensions.items()):
if ext in build_info["extensions"]:
log("built-in extension should not have a build entry: %s" % ext)
sys.exit(1)
build_info["extensions"][ext] = [
{
"in_core": True,
"objs": [],
"init_fn": init_fn,
"links": [],
"shared_lib": None,
"static_lib": None,
"variant": "default",
}
]
for extension, entries in build_info["extensions"].items():
for record in entries:
record["required"] = extension in REQUIRED_EXTENSIONS
# Copy OpenSSL libraries as a one-off.
for lib in ("crypto", "ssl"):
name = "lib%s.lib" % lib
source = td / "openssl" / build_directory / "lib" / name
dest = out_dir / "python" / "build" / "lib" / name
log("copying %s to %s" % (source, dest))
shutil.copyfile(source, dest)
# Create a `python.exe` copy when an alternative executable is built, e.g., when
# free-threading is enabled the name is `python3.13t.exe`.
canonical_python_exe = install_dir / "python.exe"
if not canonical_python_exe.exists():
shutil.copy2(
install_dir / python_exe,
canonical_python_exe,
)
# Create a `pythonw.exe` copy when an alternative executable is built, e.g., when
# free-threading is enabled the name is `pythonw3.13t.exe`.
canonical_pythonw_exe = install_dir / "pythonw.exe"
if not canonical_pythonw_exe.exists():
shutil.copy2(
install_dir / pythonw_exe,
canonical_pythonw_exe,
)
# CPython 3.13 removed `run_tests.py`, we provide a compatibility script
# for now.
if meets_python_minimum_version(python_version, "3.13"):
shutil.copyfile(
SUPPORT / "run_tests-13.py",
out_dir / "python" / "build" / "run_tests.py",
)
else:
shutil.copyfile(
cpython_source_path / "Tools" / "scripts" / "run_tests.py",
out_dir / "python" / "build" / "run_tests.py",
)
licenses_dir = out_dir / "python" / "licenses"
licenses_dir.mkdir()
for f in sorted(os.listdir(ROOT)):
if f.startswith("LICENSE.") and f.endswith(".txt"):
shutil.copyfile(ROOT / f, licenses_dir / f)
extension_module_loading = ["builtin", "shared-library"]
# Patches to CPython above (search for __declspec) always force
# __declspec(dllexport), even for static distributions.
python_symbol_visibility = "dllexport"
crt_features = ["vcruntime:140"]
if pgo:
optimizations = "pgo"
else:
optimizations = "noopt"
# Create PYTHON.json file describing this distribution.
python_info = {
"version": "8",
"target_triple": target_triple,
"optimizations": optimizations,
"build_options": build_options,
"python_tag": entry["python_tag"],
"python_version": python_version,
"python_symbol_visibility": python_symbol_visibility,
"python_stdlib_test_packages": sorted(STDLIB_TEST_PACKAGES),
"python_extension_module_loading": extension_module_loading,
"libpython_link_mode": "shared",
"crt_features": crt_features,
"build_info": build_info,
"licenses": entry["licenses"],
"license_path": "licenses/LICENSE.cpython.txt",
"run_tests": "build/run_tests.py",
}
# Collect information from running Python script.
metadata_path = td / "metadata.json"
env = dict(os.environ)
env["ROOT"] = str(out_dir / "python")
subprocess.run(
[
str(canonical_python_exe),
str(SUPPORT / "generate_metadata.py"),
str(metadata_path),
],
env=env,
check=True,
)
with metadata_path.open("rb") as fh:
metadata = json.load(fh)
python_info.update(metadata)
python_info["tcl_library_path"] = "install/tcl"
python_info["tcl_library_paths"] = [
"dde1.4",
"reg1.3",
"tcl8.6",
"tk8.6",
"tcl8",
"tix8.4.3",
]
validate_python_json(python_info, extension_modules=None)
with (out_dir / "python" / "PYTHON.json").open("w", encoding="utf8") as fh:
json.dump(python_info, fh, sort_keys=True, indent=4)
dest_path = BUILD / (
"cpython-%s-%s-%s.tar"
% (
entry["version"],
target_triple,
build_options,
)
)
data = io.BytesIO()
create_tar_from_directory(data, td / "out")
data.seek(0)
data = normalize_tar_archive(data)
with dest_path.open("wb") as fh:
while True:
chunk = data.read(32768)
if not chunk:
break
fh.write(chunk)
return dest_path
def fetch_strawberry_perl() -> pathlib.Path:
strawberryperl_zip = download_entry("strawberryperl", BUILD)
strawberryperl = BUILD / "strawberry-perl"
strawberryperl.mkdir(exist_ok=True)
with zipfile.ZipFile(strawberryperl_zip) as zf:
zf.extractall(strawberryperl)
return strawberryperl
def main() -> None:
BUILD.mkdir(exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument(
"--vs",
choices={"2019", "2022"},
default="2022",
help="Visual Studio version to use",
)
parser.add_argument(
"--python",
choices={
"cpython-3.10",
"cpython-3.11",
"cpython-3.12",
"cpython-3.13",
"cpython-3.14",
"cpython-3.15",
},
default="cpython-3.11",
help="Python distribution to build",
)
optimizations = {"noopt", "pgo"}
parser.add_argument(
"--options",
choices=optimizations.union({f"freethreaded+{o}" for o in optimizations}),
default="noopt",
help="Build options to apply when compiling Python",
)
parser.add_argument(
"--sh", required=True, help="Path to sh.exe in a cygwin or mingw installation"
)
parser.add_argument(
"--windows-sdk-version",
default="10.0.26100.0",
help="Windows SDK version to build with",
)
args = parser.parse_args()
build_options = args.options
log_path = BUILD / "build.log"
with log_path.open("wb") as log_fh:
LOG_FH[0] = log_fh
if os.environ.get("Platform") == "x86":
target_triple = "i686-pc-windows-msvc"
arch = "x86"
elif os.environ.get("Platform") == "arm64":
target_triple = "aarch64-pc-windows-msvc"
arch = "arm64"
elif os.environ.get("Platform") == "x64":
target_triple = "x86_64-pc-windows-msvc"
arch = "amd64"
else:
raise Exception("unhandled architecture: %s" % os.environ.get("Platform"))
# TODO need better dependency checking.
# CPython 3.11+ have native support for OpenSSL 3.x. We anticipate this
# will change in a future minor release once OpenSSL 1.1 goes out of support.
# But who knows.
if args.python == "cpython-3.10":
openssl_entry = "openssl-1.1"
else:
openssl_entry = "openssl-3.5"
openssl_archive = BUILD / (
"%s-%s-%s.tar" % (openssl_entry, target_triple, build_options)
)
if not openssl_archive.exists():
perl_path = fetch_strawberry_perl() / "perl" / "bin" / "perl.exe"
LOG_PREFIX[0] = "openssl"
build_openssl(
openssl_entry,
perl_path,
arch,
dest_archive=openssl_archive,
)
libffi_archive = BUILD / ("libffi-%s-%s.tar" % (target_triple, build_options))
if not libffi_archive.exists():
build_libffi(
args.python,
arch,
pathlib.Path(args.sh),
args.vs,
libffi_archive,
)
LOG_PREFIX[0] = "cpython"
tar_path = build_cpython(
args.python,
target_triple,
arch,
build_options=build_options,
msvc_version=args.vs,
windows_sdk_version=args.windows_sdk_version,
openssl_archive=openssl_archive,
libffi_archive=libffi_archive,
openssl_entry=openssl_entry,
)
if "PYBUILD_RELEASE_TAG" in os.environ:
release_tag = os.environ["PYBUILD_RELEASE_TAG"]
else:
release_tag = release_tag_from_git()
# Create, e.g., `cpython-3.10.13+20240224-x86_64-pc-windows-msvc-pgo.tar.zst`.
compress_python_archive(
tar_path,
DIST,
"%s-%s" % (tar_path.stem, release_tag),
)
if __name__ == "__main__":
sys.exit(main())
| NoSearchStringError |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/attributes/duplicates.py | {
"start": 60,
"end": 260
} | class ____():
example1: int
example2: int
def __init__(self):
self.example1 = 1
self.example2 = 2
# Test for https://github.com/pylint-dev/pylint/issues/8522
| DuplicateFields |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 54146,
"end": 58613
} | class ____(ElectraPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"generator_lm_head.weight": "electra.embeddings.word_embeddings.weight"}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`")
self.electra = ElectraModel(config)
self.generator_predictions = ElectraGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
self.post_init()
def get_output_embeddings(self):
return self.generator_lm_head
def set_output_embeddings(self, new_embeddings):
self.generator_lm_head = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
>>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
>>> config.is_decoder = True
>>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPastAndCrossAttentions = self.electra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.generator_lm_head(self.generator_predictions(hidden_states[:, slice_indices, :]))
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = [
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
]
| ElectraForCausalLM |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/classification.py | {
"start": 269,
"end": 3873
} | class ____(BatchMetricCallback):
"""Multiclass PrecisionRecallF1Support metric callback.
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
num_classes: number of classes
zero_division: value to set in case of zero division during metrics
(precision, recall) computation; should be one of 0 or 1
log_on_batch: boolean flag to log computed metrics every batch
compute_per_class_metrics: boolean flag to compute per-class metrics
(default: SETTINGS.compute_per_class_metrics or False).
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples,) * num_classes).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy03",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.AUCCallback(input_key="logits", target_key="targets"),
],
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
num_classes: Optional[int] = None,
zero_division: int = 0,
log_on_batch: bool = True,
compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=MulticlassPrecisionRecallF1SupportMetric(
zero_division=zero_division,
prefix=prefix,
suffix=suffix,
compute_per_class_metrics=compute_per_class_metrics,
num_classes=num_classes,
),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
| PrecisionRecallF1SupportCallback |
python | huggingface__transformers | src/transformers/models/focalnet/modeling_focalnet.py | {
"start": 24187,
"end": 27566
} | class ____(FocalNetPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.num_stages = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FocalNetModelOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return FocalNetModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
FocalNet Model with a decoder on top for masked image modeling.
This follows the same implementation as in [SimMIM](https://huggingface.co/papers/2111.09886).
<Tip>
Note that we provide a script to pre-train this model on custom data in our [examples
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
</Tip>
"""
)
| FocalNetModel |
python | Netflix__metaflow | metaflow/unbounded_foreach.py | {
"start": 86,
"end": 387
} | class ____(object):
"""
Plugins that wish to support `UnboundedForeach` need their special
input(s) subclass this class.
This is used by the runtime to detect the difference between bounded
and unbounded foreach, based on the variable passed to `foreach`.
"""
| UnboundedForeachInput |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 48379,
"end": 49294
} | class ____(themeable):
"""
y-axis major-tick length
Parameters
----------
theme_element : float | complex
Value in points. A negative value creates the ticks
inside the plot panel. A complex value (e.g. `3j`)
creates ticks that span both in and out of the panel.
"""
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
value: float | complex = self.properties["value"]
try:
visible = ax.yaxis.get_major_ticks()[0].tick1line.get_visible()
except IndexError:
value = 0
else:
if not visible:
value = 0
if isinstance(value, (float, int)):
tickdir = "in" if value < 0 else "out"
else:
tickdir = "inout"
ax.yaxis.set_tick_params(
which="major", length=abs(value), tickdir=tickdir
)
| axis_ticks_length_major_y |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_pathconverter.py | {
"start": 8194,
"end": 8794
} | class ____(TestAbsolute):
"""Test absolute paths with file:// scheme."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": "/Some/fake/path",
"absolute": True,
"file_scheme": True,
}
}
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
r'',
r'<p><img alt="picture" src="file:///Some/fake/path/test_extensions/_assets/bg.png" /></p>'
)
| TestAbsoluteFileScheme |
python | pdm-project__pdm | src/pdm/termui.py | {
"start": 2922,
"end": 3141
} | class ____(enum.IntEnum):
QUIET = -1
NORMAL = 0
DETAIL = 1
DEBUG = 2
LOG_LEVELS = {
Verbosity.NORMAL: logging.WARN,
Verbosity.DETAIL: logging.INFO,
Verbosity.DEBUG: logging.DEBUG,
}
| Verbosity |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_grad_test.py | {
"start": 3520,
"end": 12302
} | class ____(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
@test_util.run_without_tensor_float_32(
'Tests `tf.linalg.lstsq`, which call matmul. Additionally, calls ops '
'which do matmul in their gradient, such as MatrixSolveLs.')
# TODO(b/164254522): With TensorFloat-32, some tests fails with extremely high
# absolute and relative differences when calling assertAllClose. For example,
# the test test_MatrixSolveLsGradient_float32_10_10_1e-06 of class
# MatrixBinaryFunctorGradientTest fails with a max absolute difference of
# 0.883 and a max relative difference of 736892. We should consider disabling
# TensorFloat-32 within `tf.linalg.lstsq and perhaps other linear algebra
# functions, even if TensorFloat-32 is allowed globally.
def Test(self):
def RandomInput():
np.random.seed(1)
return np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
fixed = RandomInput()
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# check gradient w.r.t. left argument.
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: functor_(x, fixed, **kwargs_), [RandomInput()], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
# check gradient w.r.t. right argument.
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda y: functor_(fixed, y, **kwargs_), [RandomInput()], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
def _GetBandedTriangularSolveGradientTest(
functor_,
dtype_,
shape_,
float32_tol_fudge=1.0, # pylint: disable=redefined-outer-name
**kwargs_):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
n = shape_[-1]
np.random.seed(1)
# Make sure invertible.
a_np = np.random.uniform(low=1.0, high=2.0, size=shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(low=-1.0, high=1.0, size=[n, n]).astype(dtype_)
b = constant_op.constant(b_np)
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# check gradient w.r.t. left argument.
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: functor_(x, b, **kwargs_), [a], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
# check gradient w.r.t. right argument.
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda y: functor_(a, y, **kwargs_), [b], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(
str, shape)), str(adjoint))
_AddTest(
MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
band_shape = extra + (size // 2 + 1, size)
name = '%s_%s_adj_%s_low_%s' % (dtype.__name__, '_'.join(
map(str, band_shape)), str(adjoint), lower)
_AddTest(
MatrixBinaryFunctorGradientTest,
'BandedTriangularSolveGradient', name,
_GetBandedTriangularSolveGradientTest(
linalg_ops.banded_triangular_solve,
dtype,
band_shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
# _AddTest(
# MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
# _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
# dtype, shape))
# _AddTest(
# MatrixUnaryFunctorGradientTest,
# 'MatrixAdjointInverseGradient', name,
# _GetMatrixUnaryFunctorGradientTest(
# lambda x: linalg_ops.matrix_inverse(x, adjoint=True),
# dtype, shape))
# if True: # not test_lib.is_built_with_rocm():
# TODO(b/417809163):
# re-enable this test when upstream issues are resolved
# see commit msg for details
# _AddTest(
# MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient', name,
# _GetMatrixUnaryFunctorGradientTest(linalg_impl.matrix_exponential,
# dtype, shape))
# _AddTest(
# MatrixUnaryFunctorGradientTest,
# 'MatrixDeterminantGradient', name,
# _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
# dtype, shape))
# _AddTest(
# MatrixUnaryFunctorGradientTest,
# 'LogMatrixDeterminantGradient',
# name,
# _GetMatrixUnaryFunctorGradientTest(lambda x:
# linalg_ops.log_matrix_determinant(x)[1], dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical
# Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
# _AddTest(
# MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
# _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
# dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(
str, shape)), l2_regularization)
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization: linalg_ops.matrix_solve_ls(
a, b, l)), dtype, shape, float32_tol_fudge))
test_lib.main()
| MatrixBinaryFunctorGradientTest |
python | sanic-org__sanic | sanic/models/futures.py | {
"start": 756,
"end": 852
} | class ____(NamedTuple):
listener: ListenerType
event: str
priority: int
| FutureListener |
python | kamyu104__LeetCode-Solutions | Python/brace-expansion.py | {
"start": 1309,
"end": 2769
} | class ____(object):
def expand(self, S): # nested is fine
"""
:type S: str
:rtype: List[str]
"""
def form_words(options):
words = []
total = 1
for opt in options:
total *= len(opt)
for i in xrange(total):
tmp = []
for opt in reversed(options):
i, c = divmod(i, len(opt))
tmp.append(opt[c])
tmp.reverse()
words.append("".join(tmp))
words.sort()
return words
def generate_option(expr, i):
option_set = set()
while i[0] != len(expr) and expr[i[0]] != "}":
i[0] += 1 # { or ,
for option in generate_words(expr, i):
option_set.add(option)
i[0] += 1 # }
option = list(option_set)
option.sort()
return option
def generate_words(expr, i):
options = []
while i[0] != len(expr) and expr[i[0]] not in ",}":
tmp = []
if expr[i[0]] not in "{,}":
tmp.append(expr[i[0]])
i[0] += 1 # a-z
elif expr[i[0]] == "{":
tmp = generate_option(expr, i)
options.append(tmp)
return form_words(options)
return generate_words(S, [0])
| Solution2 |
python | ray-project__ray | python/ray/tests/test_batch_node_provider_unit.py | {
"start": 773,
"end": 3619
} | class ____(BatchingNodeProvider):
"""Mock implementation of a BatchingNodeProvider."""
def __init__(
self,
provider_config: Dict[str, Any],
cluster_name: str,
) -> None:
BatchingNodeProvider.__init__(self, provider_config, cluster_name)
# Fake cluster manager state:
self._node_data_dict: Dict[NodeID, NodeData] = {}
self._add_node(node_type="head", node_kind=NODE_KIND_HEAD)
# Allow unit test to the control output of safe_to_scale.
self._safe_to_scale_flag = True
self._scale_request_submitted_count = 0
# Track non_terminated_nodes_calls for use in test_autoscaler.py
self.num_non_terminated_nodes_calls = 0
def get_node_data(self) -> Dict[NodeID, NodeData]:
self.num_non_terminated_nodes_calls += 1
return self._node_data_dict
def set_node_replica_index(self, node_id, replica_index):
self._node_data_dict[node_id].replica_index = replica_index
def submit_scale_request(self, scale_request: ScaleRequest) -> None:
"""Simulate modification of cluster state by an external cluster manager."""
self._scale_request_submitted_count += 1
# Delete workers.
for node_id in self.scale_request.workers_to_delete:
del self._node_data_dict[node_id]
# Get counts of workers after the deletion.
cur_num_workers = self._cur_num_workers(self._node_data_dict)
for node_type in self.scale_request.desired_num_workers:
# How many nodes to add.
diff = (
self.scale_request.desired_num_workers[node_type]
- cur_num_workers[node_type]
)
# The next assertion validates the node provider and the test structure.
# After removing nodes to terminate, there should be no more
# nodes to terminate!
assert diff >= 0, diff
for _ in range(diff):
self._add_node(node_type, NODE_KIND_WORKER)
def _add_node(self, node_type, node_kind):
new_node_id = str(uuid4())
self._node_data_dict[new_node_id] = NodeData(
kind=node_kind, ip=str(uuid4()), status=STATUS_UP_TO_DATE, type=node_type
)
def non_terminated_node_ips(self, tag_filters):
"""This method is used in test_autoscaler.py."""
return [
node_data.ip
for node_id, node_data in self._node_data_dict.items()
if tag_filters.items() <= self.node_tags(node_id).items()
]
def safe_to_scale(self) -> bool:
return self.safe_to_scale_flag
def _assert_worker_counts(
self, expected_worker_counts: Dict[NodeType, int]
) -> None:
assert self._cur_num_workers(self._node_data_dict) == expected_worker_counts
| MockBatchingNodeProvider |
python | ray-project__ray | python/ray/_private/authentication/grpc_authentication_client_interceptor.py | {
"start": 2892,
"end": 4774
} | class ____(
aiogrpc.UnaryUnaryClientInterceptor,
aiogrpc.UnaryStreamClientInterceptor,
aiogrpc.StreamUnaryClientInterceptor,
aiogrpc.StreamStreamClientInterceptor,
):
"""Async gRPC client interceptor that adds authentication metadata."""
def _intercept_call_details(self, client_call_details):
"""Helper method to add authentication metadata to client call details."""
metadata = list(client_call_details.metadata or [])
metadata.extend(_get_authentication_metadata_tuple())
return _ClientCallDetails(
method=client_call_details.method,
timeout=client_call_details.timeout,
metadata=metadata,
credentials=client_call_details.credentials,
wait_for_ready=getattr(client_call_details, "wait_for_ready", None),
compression=getattr(client_call_details, "compression", None),
)
async def intercept_unary_unary(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return await continuation(new_details, request)
async def intercept_unary_stream(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return await continuation(new_details, request)
async def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return await continuation(new_details, request_iterator)
async def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return await continuation(new_details, request_iterator)
| AsyncAuthenticationMetadataClientInterceptor |
python | huggingface__transformers | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | {
"start": 19302,
"end": 36545
} | class ____(DeepseekVLImageProcessor):
r"""
Constructs a DEEPSEEK_VL_HYBRID image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
high_res_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
Size of the high resolution output image after resizing. Can be overridden by the `high_res_size` parameter in the `preprocess`
method.
min_size (`int`, *optional*, defaults to 14):
The minimum allowed size for the resized image. Ensures that neither the height nor width
falls below this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
high_res_resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `high_res_resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
high_res_image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the high resolution image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `high_res_image_mean` parameter in the `preprocess` method.
high_res_image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`):
Standard deviation to use if normalizing the high resolution image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `high_res_image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to square or not.
"""
model_input_names = ["pixel_values", "high_res_pixel_values"]
valid_kwargs = DeepseekVLHybridImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
high_res_size: Optional[dict[str, int]] = None,
min_size: int = 14,
resample: PILImageResampling = PILImageResampling.BICUBIC,
high_res_resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
high_res_image_mean: Optional[Union[float, list[float]]] = None,
high_res_image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
do_pad: bool = True,
**kwargs,
) -> None:
high_res_size = high_res_size if high_res_size is not None else {"height": 1024, "width": 1024}
high_res_size = get_size_dict(high_res_size, default_to_square=True)
self.high_res_size = high_res_size
self.high_res_image_mean = high_res_image_mean if high_res_image_mean is not None else OPENAI_CLIP_MEAN
self.high_res_image_std = high_res_image_std if high_res_image_std is not None else OPENAI_CLIP_STD
self.resample = resample
self.high_res_resample = high_res_resample
super().__init__(
do_resize=do_resize,
size=size,
min_size=min_size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=do_convert_rgb,
do_pad=do_pad,
**kwargs,
)
if high_res_image_mean is None:
self.high_res_background_color = (127, 127, 127)
else:
self.high_res_background_color = tuple(int(x * 255) for x in high_res_image_mean)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
high_res_size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
high_res_resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
high_res_image_mean: Optional[Union[float, list[float]]] = None,
high_res_image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
do_convert_rgb: Optional[bool] = None,
do_pad: Optional[bool] = None,
background_color: Optional[tuple[int, int, int]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
high_res_size (`Dict[str, int]`, *optional*, defaults to `self.high_res_size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the high resolution output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
high_res_resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
high_res_image_mean (`float` or `List[float]`, *optional*, defaults to `self.high_res_image_mean`):
Image mean to use if `do_normalize` is set to `True`.
high_res_image_std (`float` or `List[float]`, *optional*, defaults to `self.high_res_image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to square or not.
background_color (`tuple[int, int, int]`):
The background color to use for the padding.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
high_res_resample = high_res_resample if high_res_resample is not None else self.high_res_resample
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
high_res_image_mean = high_res_image_mean if high_res_image_mean is not None else self.high_res_image_mean
high_res_image_std = high_res_image_std if high_res_image_std is not None else self.high_res_image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
background_color = background_color if background_color is not None else self.background_color
size = size if size is not None else self.size
size_dict = get_size_dict(size)
high_res_size = high_res_size if high_res_size is not None else self.high_res_size
high_res_size_dict = get_size_dict(high_res_size)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
all_high_res_images = []
for image in images:
# high_res_image: resize (high) -> rescale -> normalize (high)
# low_res_image: resize (high) -> rescale -> resize (low) -> normalize (low)
high_res_image = image
if do_resize:
high_res_image = self.resize(
image=high_res_image,
size=high_res_size_dict,
resample=high_res_resample,
input_data_format=input_data_format,
)
if do_pad:
# Expand and pad the images to obtain a square image of dimensions `size x size`
high_res_image = self.pad_to_square(
image=high_res_image,
background_color=background_color,
input_data_format=input_data_format,
)
image = self.resize(
image=high_res_image,
size=size_dict,
resample=resample,
input_data_format=input_data_format,
)
if do_pad:
image = self.pad_to_square(
image=image,
background_color=background_color,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
high_res_image = self.rescale(
image=high_res_image, scale=rescale_factor, input_data_format=input_data_format
)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
high_res_image = self.normalize(
image=high_res_image,
mean=high_res_image_mean,
std=high_res_image_std,
input_data_format=input_data_format,
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
high_res_image = to_channel_dimension_format(
high_res_image, data_format, input_channel_dim=input_data_format
)
all_images.append(image)
all_high_res_images.append(high_res_image)
data = {"pixel_values": all_images, "high_res_pixel_values": all_high_res_images}
return BatchFeature(data=data, tensor_type=return_tensors)
| DeepseekVLHybridImageProcessor |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 85181,
"end": 87017
} | class ____(CIntType):
to_py_function = "__Pyx_PyBool_FromLong"
from_py_function = "__Pyx_PyObject_IsTrue"
exception_check = 1 # for C++ bool
default_format_spec = ''
def can_coerce_to_pystring(self, env, format_spec=None):
return not format_spec or super().can_coerce_to_pystring(env, format_spec)
def convert_to_pystring(self, cvalue, code, format_spec=None, name_type=None):
if format_spec:
return super().convert_to_pystring(cvalue, code, format_spec, name_type)
if name_type is None:
name_type = self
# NOTE: no caching here as the string constant cnames depend on the current module
utility_code_name = "__Pyx_PyUnicode_FromBInt_" + name_type.specialization_name()
to_pyunicode_utility = TempitaUtilityCode.load_cached(
"CBIntToPyUnicode", "TypeConversion.c", context={
"TRUE_CONST": code.get_py_string_const(StringEncoding.EncodedString("True")),
"FALSE_CONST": code.get_py_string_const(StringEncoding.EncodedString("False")),
"TO_PY_FUNCTION": utility_code_name,
})
code.globalstate.use_utility_code(to_pyunicode_utility)
return "%s(%s)" % (utility_code_name, cvalue)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if for_display:
base_code = 'bool'
elif pyrex:
base_code = 'bint'
else:
base_code = public_decl('int', dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def specialization_name(self):
return "bint"
def __repr__(self):
return "<CNumericType bint>"
def __str__(self):
return 'bint'
def py_type_name(self):
return "bool"
| CBIntType |
python | skorch-dev__skorch | skorch/tests/test_hf.py | {
"start": 42583,
"end": 54533
} | class ____:
# Note: Since we mock away the HfApi, we cannot be sure that these tests
# wouldn't miss certain types of bugs. Alternatively, we could not use the
# mock but in this case, we would create real uploads (and need to have a
# valid token), which we want to avoid. Other than that, we could try to
# patch more specific functions used by the Hub API, e.g. requests.post, but
# that is more difficult to get right and the success of the patching
# depends on implementation details of the Hub API. Therefore, the current
# approach seems to be most reasonable.
# If any changes to HfHubStorage are made, please test them "end-to-end"
# using the Hugging_Face_Model_Checkpoint.ipynb in this repo and a real
# token.
@pytest.fixture
def net(self, classifier_module):
from skorch import NeuralNetClassifier
net = NeuralNetClassifier(
classifier_module,
max_epochs=3,
)
return net
@pytest.fixture
def data(self, classifier_data):
X, y = classifier_data
# actual training not important, thus only 100 samples for speed
return X[:100], y[:100]
@pytest.fixture
def mock_hf_api(self):
# We cannot use a mock or a class defined in the fixture, since neither
# can be pickled.
hf_api = MockHfApi()
yield hf_api
# pylint: disable=pointless-statement
hf_api.saved.close()
@pytest.fixture
def hf_hub_storer_cls(self):
from skorch.hf import HfHubStorage
return HfHubStorage
def test_kwargs_passed_to_upload(self, net, data, mock_hf_api, hf_hub_storer_cls):
from skorch.callbacks import TrainEndCheckpoint
params = {
'path_in_repo': 'my-model',
'repo_id': 'my-user/my-repo',
'token': 'my-token',
'some_argument': 'foobar',
}
storer = hf_hub_storer_cls(mock_hf_api, **params)
checkpoint = TrainEndCheckpoint(
f_pickle=storer,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
assert len(mock_hf_api.calls) == storer._call_count == 1
_, kwargs = mock_hf_api.calls[0]
assert kwargs == params
def test_train_end_checkpoint_pickle(
self, net, data, mock_hf_api, hf_hub_storer_cls
):
from skorch.callbacks import TrainEndCheckpoint
storer = hf_hub_storer_cls(
mock_hf_api, path_in_repo='my-model', repo_id='my-user/my-repo', token='123'
)
checkpoint = TrainEndCheckpoint(
f_pickle=storer,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
assert len(mock_hf_api.calls) == storer._call_count == 1
obj, _ = mock_hf_api.calls[0]
assert isinstance(obj, io.IOBase)
def test_train_end_checkpoint_torch_save(
self, net, data, mock_hf_api, hf_hub_storer_cls
):
# f_pickle uses pickle but f_params et al use torch.save, which works a
# bit differently. Therefore, we need to test both.
from skorch.callbacks import TrainEndCheckpoint
storer = hf_hub_storer_cls(
mock_hf_api,
path_in_repo='weights.pt',
repo_id='my-user/my-repo',
buffered=True,
)
checkpoint = TrainEndCheckpoint(
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
assert len(mock_hf_api.calls) == storer._call_count == 1
obj, _ = mock_hf_api.calls[0]
assert isinstance(obj, io.IOBase)
def test_checkpoint_pickle(self, net, data, mock_hf_api, hf_hub_storer_cls):
# Checkpoint saves the model multiple times
from skorch.callbacks import Checkpoint
storer = hf_hub_storer_cls(
mock_hf_api, path_in_repo='my-model', repo_id='my-user/my-repo', token='123'
)
checkpoint = Checkpoint(
f_pickle=storer,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint], max_epochs=10)
net.fit(*data)
# each time the valid loss improves, there should be a checkpoint
num_checkpoints_expected = sum(net.history[:, 'valid_loss_best'])
num_checkpoints_actual = len(mock_hf_api.calls)
assert num_checkpoints_actual == num_checkpoints_expected
def test_checkpoint_torch_save(self, net, data, mock_hf_api, hf_hub_storer_cls):
from skorch.callbacks import Checkpoint
storer = hf_hub_storer_cls(
mock_hf_api, path_in_repo='my-model', repo_id='my-user/my-repo', token='123'
)
checkpoint = Checkpoint(
f_params=None,
f_optimizer=storer,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint], max_epochs=10)
net.fit(*data)
# each time the valid loss improves, there should be a checkpoint
num_checkpoints_expected = sum(net.history[:, 'valid_loss_best'])
num_checkpoints_actual = len(mock_hf_api.calls)
assert num_checkpoints_actual > 0 # sanity check
assert num_checkpoints_actual == num_checkpoints_expected
@pytest.mark.parametrize('storage', ['memory', 'str', 'path'])
def test_saved_net_is_same(
self, net, data, mock_hf_api, hf_hub_storer_cls, storage, tmp_path
):
# Check that the pickled net has the same params after loading, both for
# in-memory and on disk
from skorch.callbacks import TrainEndCheckpoint
if storage == 'memory':
local_storage = None
elif storage == 'str':
local_storage = str(tmp_path / 'my-net.pkl')
else:
local_storage = tmp_path / 'my-net.pkl'
storer = hf_hub_storer_cls(
mock_hf_api,
path_in_repo='my-model',
repo_id='my-user/my-repo',
local_storage=local_storage,
)
checkpoint = TrainEndCheckpoint(
f_pickle=storer,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
net_loaded = pickle.loads(mock_hf_api.saved.read())
assert len(net_loaded.module_.state_dict()) == len(net.module_.state_dict())
for key, original in net_loaded.module_.state_dict().items():
original = net.module_.state_dict()[key]
loaded = net_loaded.module_.state_dict()[key]
torch.testing.assert_close(loaded, original)
@pytest.mark.parametrize('storage', ['memory', 'str', 'path'])
def test_saved_params_is_same(
self, net, data, mock_hf_api, hf_hub_storer_cls, storage, tmp_path
):
# check that the module parameters are the same after loading, both for
# in-memory and on disk
from skorch.callbacks import TrainEndCheckpoint
if storage == 'memory':
local_storage = None
elif storage == 'str':
local_storage = str(tmp_path / 'my-weights.pt')
else:
local_storage = tmp_path / 'my-weights.pt'
storer = hf_hub_storer_cls(
mock_hf_api,
path_in_repo='my-model',
repo_id='my-user/my-repo',
local_storage=local_storage,
)
checkpoint = TrainEndCheckpoint(
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
state_dict_before = net.module_.state_dict()
state_dict_after = torch.load(mock_hf_api.saved)
assert len(state_dict_before) == len(state_dict_after)
for key, original in state_dict_before.items():
loaded = state_dict_after[key]
torch.testing.assert_close(loaded, original)
def test_latest_url_attribute(self, net, data, hf_hub_storer_cls):
# Check that the URL returned by the HF API is stored as latest_url. In
# the mock, it is formatted by the call count so that we can check that
# it's not always just returning the same URL
from skorch.callbacks import TrainEndCheckpoint
url = 'my-return-url-{}'
mock_hf_api = MockHfApi(return_url=url)
storer = hf_hub_storer_cls(
mock_hf_api, path_in_repo='my-model', repo_id='my-user/my-repo', token='123'
)
checkpoint = TrainEndCheckpoint(
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
assert storer.latest_url_ == 'my-return-url-0'
net.partial_fit(*data)
assert storer.latest_url_ == 'my-return-url-1'
def test_verbose_print_output(self, net, data, hf_hub_storer_cls):
from skorch.callbacks import TrainEndCheckpoint
printed = []
def _print(s):
printed.append(s)
url = 'my-return-url'
mock_hf_api = MockHfApi(return_url=url)
storer = hf_hub_storer_cls(
mock_hf_api,
path_in_repo='my-model',
repo_id='my-user/my-repo',
verbose=1,
sink=_print,
)
checkpoint = TrainEndCheckpoint(
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
assert len(printed) == 1
text = printed[0]
expected = "Uploaded file to my-return-url"
assert text == expected
def test_templated_name(self, net, data, mock_hf_api, hf_hub_storer_cls):
from skorch.callbacks import Checkpoint
storer = hf_hub_storer_cls(
mock_hf_api,
path_in_repo='my-model-{}',
repo_id='my-user/my-repo',
)
checkpoint = Checkpoint(
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint], max_epochs=10)
net.fit(*data)
for i, (_, kwargs) in enumerate(mock_hf_api.calls):
path_in_repo = kwargs['path_in_repo']
expected = f'my-model-{i}'
assert path_in_repo == expected
def test_with_load_init_state_callback(
self, net, data, mock_hf_api, hf_hub_storer_cls
):
from skorch.callbacks import LoadInitState, TrainEndCheckpoint
params = {
'path_in_repo': 'my-model',
'repo_id': 'my-user/my-repo',
}
storer = hf_hub_storer_cls(mock_hf_api, **params)
checkpoint = TrainEndCheckpoint(
f_pickle=None,
f_params=storer,
f_optimizer=None,
f_criterion=None,
f_history=None,
)
net.set_params(callbacks=[checkpoint])
net.fit(*data)
load_state = LoadInitState(checkpoint)
net.set_params(max_epochs=0, callbacks=[load_state])
# we don't check the exact method that raises (seek, tell, read), as
# that is an implementation detail of pytorch
msg = r"is not \(yet\) implemented"
with pytest.raises(NotImplementedError, match=msg):
net.fit(*data)
| TestHfHubStorage |
python | ansible__ansible | lib/ansible/plugins/action/set_stats.py | {
"start": 894,
"end": 2476
} | class ____(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('aggregate', 'data', 'per_host'))
_requires_connection = False
# TODO: document this in non-empty set_stats.py module
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
stats = {'data': {}, 'per_host': False, 'aggregate': True}
if self._task.args:
data = self._task.args.get('data', {})
if not isinstance(data, dict):
data = self._templar.template(data)
if not isinstance(data, dict):
result['failed'] = True
result['msg'] = "The 'data' option needs to be a dictionary/hash"
return result
# set boolean options, defaults are set above in stats init
for opt in ['per_host', 'aggregate']:
val = self._task.args.get(opt, None)
if val is not None:
if not isinstance(val, bool):
stats[opt] = boolean(self._templar.template(val), strict=False)
else:
stats[opt] = val
for (k, v) in data.items():
k = self._templar.template(k)
validate_variable_name(k)
stats['data'][k] = self._templar.template(v)
result['changed'] = False
result['ansible_stats'] = stats
return result
| ActionModule |
python | doocs__leetcode | solution/0100-0199/0104.Maximum Depth of Binary Tree/Solution.py | {
"start": 192,
"end": 397
} | class ____:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
l, r = self.maxDepth(root.left), self.maxDepth(root.right)
return 1 + max(l, r)
| Solution |
python | huggingface__transformers | src/transformers/models/vitdet/modeling_vitdet.py | {
"start": 21059,
"end": 23063
} | class ____(nn.Module):
def __init__(self, config: VitDetConfig) -> None:
super().__init__()
self.config = config
depth = config.num_hidden_layers
# stochastic depth decay rule
drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, depth, device="cpu")]
layers = []
for i in range(depth):
layers.append(
VitDetLayer(
config,
drop_path_rate=drop_path_rate[i],
window_size=config.window_size if i in config.window_block_indices else 0,
use_residual_block=i in config.residual_block_indices,
)
)
self.layer = nn.ModuleList(layers)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
| VitDetEncoder |
python | pypa__setuptools | setuptools/_vendor/jaraco/collections/__init__.py | {
"start": 1365,
"end": 2998
} | class ____(collections.abc.Mapping):
"""
Project a set of keys over a mapping
>>> sample = {'a': 1, 'b': 2, 'c': 3}
>>> prj = Projection(['a', 'c', 'd'], sample)
>>> dict(prj)
{'a': 1, 'c': 3}
Projection also accepts an iterable or callable or pattern.
>>> iter_prj = Projection(iter('acd'), sample)
>>> call_prj = Projection(lambda k: ord(k) in (97, 99, 100), sample)
>>> pat_prj = Projection(re.compile(r'[acd]'), sample)
>>> prj == iter_prj == call_prj == pat_prj
True
Keys should only appear if they were specified and exist in the space.
Order is retained.
>>> list(prj)
['a', 'c']
Attempting to access a key not in the projection
results in a KeyError.
>>> prj['b']
Traceback (most recent call last):
...
KeyError: 'b'
Use the projection to update another dict.
>>> target = {'a': 2, 'b': 2}
>>> target.update(prj)
>>> target
{'a': 1, 'b': 2, 'c': 3}
Projection keeps a reference to the original dict, so
modifying the original dict may modify the Projection.
>>> del sample['a']
>>> dict(prj)
{'c': 3}
"""
def __init__(self, keys: _Matchable, space: Mapping):
self._match = _dispatch(keys)
self._space = space
def __getitem__(self, key):
if not self._match(key):
raise KeyError(key)
return self._space[key]
def _keys_resolved(self):
return filter(self._match, self._space)
def __iter__(self):
return self._keys_resolved()
def __len__(self):
return len(tuple(self._keys_resolved()))
| Projection |
python | wandb__wandb | wandb/automations/_generated/create_generic_webhook_integration.py | {
"start": 542,
"end": 819
} | class ____(GQLResult):
integration: Union[
CreateGenericWebhookIntegrationCreateGenericWebhookIntegrationIntegrationIntegration,
WebhookIntegrationFields,
] = Field(discriminator="typename__")
| CreateGenericWebhookIntegrationCreateGenericWebhookIntegration |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/traversal_context.py | {
"start": 2911,
"end": 5737
} | class ____(ContextData):
config_type: ConfigType
do_post_process: bool
@staticmethod
def from_config_type(
config_type: ConfigType,
stack: EvaluationStackEntry,
do_post_process: bool,
) -> "TraversalContext":
return TraversalContext(
config_schema_snapshot=config_type.schema_snapshot,
config_type_snap=config_type.snapshot,
config_type=config_type,
stack=stack,
do_post_process=do_post_process,
)
def for_array(self, index: int) -> "TraversalContext":
check.int_param(index, "index")
return TraversalContext(
config_schema_snapshot=self.config_schema_snapshot,
config_type_snap=self.config_schema_snapshot.get_config_snap(
self.config_type_snap.inner_type_key
),
config_type=self.config_type.inner_type, # type: ignore
stack=self.stack.for_array_index(index),
do_post_process=self.do_post_process,
)
def for_map(self, key: object) -> "TraversalContext":
return TraversalContext(
config_schema_snapshot=self.config_schema_snapshot,
config_type_snap=self.config_schema_snapshot.get_config_snap(
self.config_type_snap.inner_type_key
),
config_type=self.config_type.inner_type, # type: ignore
stack=self.stack.for_map_value(key),
do_post_process=self.do_post_process,
)
def for_field(self, field_def: Field, field_name: str) -> "TraversalContext":
return TraversalContext(
config_schema_snapshot=self.config_schema_snapshot,
config_type_snap=self.config_schema_snapshot.get_config_snap(field_def.config_type.key),
config_type=field_def.config_type,
stack=self.stack.for_field(field_name),
do_post_process=self.do_post_process,
)
def for_nullable_inner_type(self) -> "TraversalContext":
return TraversalContext(
config_schema_snapshot=self.config_schema_snapshot,
config_type_snap=self.config_schema_snapshot.get_config_snap(
self.config_type_snap.inner_type_key
),
config_type=self.config_type.inner_type, # type: ignore
stack=self.stack,
do_post_process=self.do_post_process,
)
def for_new_config_type(self, config_type: ConfigType) -> "TraversalContext":
return TraversalContext(
config_schema_snapshot=self.config_schema_snapshot,
config_type_snap=self.config_schema_snapshot.get_config_snap(config_type.key),
config_type=config_type,
stack=self.stack,
do_post_process=self.do_post_process,
)
| TraversalContext |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9984
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.marker.colorbar.title"
_path_str = "scatterternary.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | django__django | django/core/management/commands/diffsettings.py | {
"start": 285,
"end": 3564
} | class ____(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings."""
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
"--all",
action="store_true",
help=(
'Display all settings, regardless of their value. In "hash" '
'mode, default values are prefixed by "###".'
),
)
parser.add_argument(
"--default",
metavar="MODULE",
help=(
"The settings module to compare the current settings against. Leave "
"empty to compare against Django's default settings."
),
)
parser.add_argument(
"--output",
default="hash",
choices=("hash", "unified"),
help=(
"Selects the output format. 'hash' mode displays each changed "
"setting, with the settings that don't appear in the defaults "
"followed by ###. 'unified' mode prefixes the default setting "
"with a minus sign, followed by the changed setting prefixed "
"with a plus sign."
),
)
def handle(self, **options):
from django.conf import Settings, global_settings, settings
# Because settings are imported lazily, we need to explicitly load
# them.
if not settings.configured:
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default = options["default"]
default_settings = module_to_dict(
Settings(default) if default else global_settings
)
output_func = {
"hash": self.output_hash,
"unified": self.output_unified,
}[options["output"]]
return "\n".join(output_func(user_settings, default_settings, **options))
def output_hash(self, user_settings, default_settings, **options):
# Inspired by Postfix's "postconf -n".
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options["all"]:
output.append("### %s = %s" % (key, user_settings[key]))
return output
def output_unified(self, user_settings, default_settings, **options):
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append(
self.style.SUCCESS("+ %s = %s" % (key, user_settings[key]))
)
elif user_settings[key] != default_settings[key]:
output.append(
self.style.ERROR("- %s = %s" % (key, default_settings[key]))
)
output.append(
self.style.SUCCESS("+ %s = %s" % (key, user_settings[key]))
)
elif options["all"]:
output.append(" %s = %s" % (key, user_settings[key]))
return output
| Command |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/views/user.py | {
"start": 3902,
"end": 4055
} | class ____(MultiResourceUserMixin, UserOAuthModelView):
"""Customize permission names for FAB's builtin UserOAuthModelView."""
| CustomUserOAuthModelView |
python | huggingface__transformers | src/transformers/models/mpnet/tokenization_mpnet.py | {
"start": 1150,
"end": 9013
} | class ____(TokenizersBackend):
r"""
Construct a MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab (`dict`, *optional*):
Dictionary mapping tokens to their IDs. If not provided, an empty vocab is initialized.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab: Optional[dict] = None,
do_lower_case=True,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="[UNK]",
pad_token="<pad>",
mask_token="<mask>",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
# Initialize vocab
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {}
# Initialize the tokenizer with WordPiece model
self._tokenizer = Tokenizer(WordPiece(self._vocab, unk_token=str(unk_token)))
# Set normalizer based on MPNetConverter logic
self._tokenizer.normalizer = normalizers.BertNormalizer(
clean_text=True,
handle_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
lowercase=do_lower_case,
)
# Set pre-tokenizer
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
# Set decoder
self._tokenizer.decoder = decoders.WordPiece(prefix="##")
# Store do_lower_case for later use
self.do_lower_case = do_lower_case
# Handle special token initialization
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
# Store for later use
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
do_lower_case=do_lower_case,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
# Set post_processor after super().__init__ to ensure we have token IDs
cls_str = str(self.cls_token)
sep_str = str(self.sep_token)
cls_token_id = self.cls_token_id if self.cls_token_id is not None else 0
sep_token_id = self.sep_token_id if self.sep_token_id is not None else 2
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{cls_str}:0 $A:0 {sep_str}:0",
pair=f"{cls_str}:0 $A:0 {sep_str}:0 {sep_str}:0 $B:1 {sep_str}:1", # MPNet uses two [SEP] tokens
special_tokens=[
(cls_str, cls_token_id),
(sep_str, sep_token_id),
],
)
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
comprise the space before the *<mask>*.
"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def mask_token(self, value):
"""
Overriding the default behavior of the mask token to have it eat the space before it.
This is needed to preserve backward compatibility with all the previously used models based on MPNet.
"""
# Mask token behave like a normal word, i.e. include the space before it
# So we set lstrip to True
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
self._mask_token = value
__all__ = ["MPNetTokenizer"]
| MPNetTokenizer |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 63969,
"end": 65382
} | class ____(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print(UNABLE_READ_INFO_PYTHON_FRAME)
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s' % (
pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN),
))
PyLocals()
##################################################################
## added, not in CPython
##################################################################
import re
import warnings
import tempfile
import functools
import textwrap
import itertools
import traceback
def dont_suppress_errors(function):
"*sigh*, readline"
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper
| PyLocals |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/system.py | {
"start": 6255,
"end": 6789
} | class ____(NamedTuple):
"""The data about a run that is available during both orchestration and execution.
This object does not contain any information that requires access to user code, such as the
pipeline definition and resources.
"""
job: IJob
dagster_run: DagsterRun
instance: "DagsterInstance"
execution_plan: "ExecutionPlan"
raise_on_error: bool = False
retry_mode: RetryMode = RetryMode.DISABLED
step_dependency_config: StepDependencyConfig = StepDependencyConfig.default()
| PlanData |
python | dagster-io__dagster | python_modules/libraries/dagster-k8s/dagster_k8s/client.py | {
"start": 1876,
"end": 2494
} | class ____(Exception):
def __init__(self, *args, **kwargs):
k8s_api_exception = check.inst_param(
kwargs.pop("k8s_api_exception"), "k8s_api_exception", Exception
)
original_exc_info = check.tuple_param(kwargs.pop("original_exc_info"), "original_exc_info")
check.invariant(original_exc_info[0] is not None)
super().__init__(args[0], *args[1:], **kwargs)
self.k8s_api_exception = check.opt_inst_param(
k8s_api_exception, "k8s_api_exception", Exception
)
self.original_exc_info = original_exc_info
| DagsterK8sUnrecoverableAPIError |
python | python-jsonschema__jsonschema | jsonschema/exceptions.py | {
"start": 8231,
"end": 8832
} | class ____(Exception):
"""
A validator was asked to validate an instance against an unknown type.
"""
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
def __str__(self):
prefix = 16 * " "
return dedent(
f"""\
Unknown type {self.type!r} for validator with schema:
{_pretty(self.schema, prefix=prefix)}
While checking instance:
{_pretty(self.instance, prefix=prefix)}
""".rstrip(),
)
| UnknownType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 2885,
"end": 2933
} | class ____(TypedDict):
a: ReadOnly[float]
| TD12 |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 82152,
"end": 84675
} | class ____(UserDefinedObjectVariable):
"""
Represents user defined objects that are subclasses of dict/OrderedDict.
Internally, it uses a ConstDictVariable to represent the dict part of the
variable tracker. For everything else, it falls back to
UserDefinedObjectVariable.
"""
def __init__(self, value, dict_vt=None, **kwargs):
super().__init__(value, **kwargs)
self._dict_vt = dict_vt
if self._dict_vt is None:
assert self.source is None, (
"dict_vt must be constructed by builder.py when source is present"
)
self._dict_vt = variables.ConstDictVariable(
{}, type(value), mutation_type=ValueMutationNew()
)
self._dict_methods = dict_methods
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
method = self._maybe_get_baseclass_method(name)
if method in self._dict_methods:
# Dict subclasses can override __missing__ to provide fallback
# behavior instead of raising a KeyError. This is used, for example,
# by collections.Counter.
try:
return self._dict_vt.call_method(tx, name, args, kwargs)
except ObservedKeyError:
if (
name == "__getitem__"
and issubclass(self.python_type(), dict)
and self._maybe_get_baseclass_method("__missing__")
):
return self.call_method(tx, "__missing__", args, kwargs)
else:
raise
return super().call_method(tx, name, args, kwargs)
def unpack_var_sequence(self, tx):
if type(self.value).__iter__ in (
dict.__iter__,
collections.OrderedDict.__iter__,
):
return self._dict_vt.unpack_var_sequence(tx)
raise NotImplementedError
def is_underlying_vt_modified(self, side_effects):
return side_effects.is_modified(self._dict_vt)
@property
def user_cls(self):
return self._dict_vt.user_cls
@property
def items(self):
return self._dict_vt.items
def install_dict_keys_match_guard(self):
return self._dict_vt.install_dict_keys_match_guard()
def install_dict_contains_guard(self):
return self._dict_vt.install_dict_contains_guard()
| UserDefinedDictVariable |
python | PrefectHQ__prefect | src/prefect/client/schemas/sorting.py | {
"start": 2184,
"end": 2384
} | class ____(AutoEnum):
"""Defines variables sorting options."""
CREATED_DESC = "CREATED_DESC"
UPDATED_DESC = "UPDATED_DESC"
NAME_DESC = "NAME_DESC"
NAME_ASC = "NAME_ASC"
| VariableSort |
python | kamyu104__LeetCode-Solutions | Python/longest-increasing-path-in-a-matrix.py | {
"start": 1633,
"end": 2694
} | class ____(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
def longestpath(matrix, i, j, max_lengths):
if max_lengths[i][j]:
return max_lengths[i][j]
max_depth = 0
for di, dj in directions:
x, y = i+di, j+dj
if 0 <= x < len(matrix) and 0 <= y < len(matrix[0]) and \
matrix[x][y] < matrix[i][j]:
max_depth = max(max_depth, longestpath(matrix, x, y, max_lengths))
max_lengths[i][j] = max_depth + 1
return max_lengths[i][j]
if not matrix:
return 0
result = 0
max_lengths = [[0 for _ in xrange(len(matrix[0]))] for _ in xrange(len(matrix))]
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
result = max(result, longestpath(matrix, i, j, max_lengths))
return result
| Solution2 |
python | redis__redis-py | redis/exceptions.py | {
"start": 1540,
"end": 1663
} | class ____(LockError):
"Error trying to extend or release a lock that is not owned (anymore)"
pass
| LockNotOwnedError |
python | catalyst-team__catalyst | catalyst/contrib/data/sampler_inbatch.py | {
"start": 561,
"end": 1666
} | class ____(ABC):
"""An abstraction of inbatch triplet sampler."""
@abstractmethod
def _check_input_labels(self, labels: List[int]) -> None:
"""
Check if the batch labels list is valid for the sampler.
We expect you to implement this method to guarantee correct
performance of sampling method. You can pass it
but we strongly do not recommend you to do it.
Args:
labels: labels of the samples in the batch;
list or Tensor of shape (batch_size;)
"""
raise NotImplementedError()
@abstractmethod
def sample(self, features: Tensor, labels: TLabels) -> TTriplets:
"""
This method includes the logic of sampling/selecting triplets.
Args:
features: tensor of features
labels: labels of the samples in the batch, list or Tensor
of shape (batch_size;)
Returns: the batch of triplets
Raises:
NotImplementedError: you should implement it
"""
raise NotImplementedError()
| IInbatchTripletSampler |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 84675,
"end": 87148
} | class ____(UserDefinedObjectVariable):
"""
Represents user defined objects that are subclasses of set.
Internally, it uses a SetVariable to represent the set part of the
variable tracker. For everything else, it falls back to
UserDefinedObjectVariable.
"""
def __init__(self, value, set_vt=None, **kwargs):
super().__init__(value, **kwargs)
self._set_vt = set_vt
python_type = set if isinstance(value, set) else frozenset
self._set_methods = set_methods if python_type is set else frozenset_methods
if self._set_vt is None:
assert self.source is None, (
"set_vt must be constructed by builder.py when source is present"
)
if python_type is set:
# set is initialized later
self._set_vt = variables.SetVariable(
{}, mutation_type=ValueMutationNew()
)
else:
init_args = kwargs.get("init_args", {})
tx = torch._dynamo.symbolic_convert.InstructionTranslator.current_tx()
self._set_vt = variables.BuiltinVariable(python_type).call_function(
tx, init_args, {}
)
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
method = self._maybe_get_baseclass_method(name)
if method in self._set_methods:
return self._set_vt.call_method(tx, name, args, kwargs)
return super().call_method(tx, name, args, kwargs)
def as_python_constant(self):
return self._set_vt.as_python_constant()
def unpack_var_sequence(self, tx):
if inspect.getattr_static(self.value, "__iter__") in (
set.__iter__,
frozenset.__iter__,
):
return self._set_vt.unpack_var_sequence(tx)
raise NotImplementedError
@property
def set_items(self):
return self._set_vt.set_items
@property
def items(self):
return self._set_vt.items
def is_underlying_vt_modified(self, side_effects):
return side_effects.is_modified(self._set_vt)
def install_dict_keys_match_guard(self):
return self._set_vt.install_dict_keys_match_guard()
def install_dict_contains_guard(self):
return self._set_vt.install_dict_contains_guard()
| UserDefinedSetVariable |
python | tensorflow__tensorflow | tensorflow/python/eager/small_constants_optimizer_test.py | {
"start": 1382,
"end": 5962
} | class ____(test.TestCase):
@test_util.run_v2_only
def test_grappler_optimization(self):
@polymorphic_function.function
def brancher(inp):
x = constant_op.constant(1)
for _ in range(1000):
if inp:
x = x + constant_op.constant(1)
else:
x = x + constant_op.constant(2)
return x
@polymorphic_function.function
def brancher_true():
left = constant_op.constant(True)
x = constant_op.constant(1)
for _ in range(1000):
if left:
x = x + constant_op.constant(1)
else:
x = x + constant_op.constant(2)
return x
x = constant_op.constant(True)
self.assertEqual(brancher(x), brancher_true()) # Trace each function once.
benchmark = min(timeit.repeat(lambda: brancher(x), repeat=5, number=100))
opt_benchmark = min(timeit.repeat(brancher_true, repeat=5, number=100))
# Constant folded execution is usually 15 - 20 times faster. Here we check
# for a 3x speedup to account for various machines the test might run on.
self.assertLess(opt_benchmark * 3, benchmark)
@test_util.run_v2_only
def test_small_constants_optimization_with_grappler(self):
def func(inp):
x = constant_op.constant(1)
for _ in range(1000):
if inp:
x = x + constant_op.constant(1)
else:
x = x + constant_op.constant(2)
return x
brancher = polymorphic_function.function(func)
brancher_opt = polymorphic_function.function(
func, experimental_attributes={'runtime_constant_optimization': True}
)
# Trace each function once.
with ops.device_v2('CPU'):
x = constant_op.constant(True)
self.assertEqual(brancher(x), brancher_opt(x))
benchmark = min(timeit.repeat(lambda: brancher(x), repeat=5, number=100))
opt_benchmark = min(
timeit.repeat(lambda: brancher_opt(x), repeat=5, number=100)
)
# Constant folded execution is usually 15 - 20 times faster. Here we check
# for a 2x speedup to account for various machines the test might run on.
# Specially the kokoro machines seems to run much slower.
self.assertLess(opt_benchmark * 2, benchmark)
@test_util.run_v2_only
@test_util.run_gpu_only
def test_small_constants_optimization_disabled(self):
@polymorphic_function.function(
experimental_attributes={'runtime_constant_optimization': True}
)
def func(inp):
return inp
x = constant_op.constant(True)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
(
'Expecting boolean tensor to be on host when'
' small_constants_optimizer is enabled.'
),
):
func(x)
@test_util.run_v2_only
def test_small_constants_optimization_invalid_input(self):
@polymorphic_function.function(
experimental_attributes={'runtime_constant_optimization': True}
)
def func(inp):
return inp
with ops.device_v2('CPU'):
x = constant_op.constant([True, True])
# runtime_constant_optimization should not crash when the tf.function
# is passed in a boolean tensor having > 1 element.
self.assertAllEqual(func(x), x)
@test_util.run_v2_only
def test_small_constants_optimization_without_grappler(self):
def func(inp):
x = constant_op.constant(1)
for _ in range(1000):
if inp:
x = x + constant_op.constant(1)
else:
x = x + constant_op.constant(2)
return x
brancher = polymorphic_function.function(func)
brancher_opt = polymorphic_function.function(
func, experimental_attributes={'runtime_constant_optimization': True}
)
# Trace each function once.
with ops.device_v2('CPU'):
x = constant_op.constant(True)
self.assertEqual(brancher(x), brancher_opt(x))
# Disable grappler and check that performance is still good with
# small_constants_optimizer.
with options({'disable_meta_optimizer': True}):
benchmark = min(timeit.repeat(lambda: brancher(x), repeat=5, number=100))
opt_benchmark = min(
timeit.repeat(lambda: brancher_opt(x), repeat=5, number=100)
)
# Constant folded execution is usually 150x times faster (against a base
# that has no grappler optimization). Here we check
# for a 5x speedup to account for various machines the test might run on.
# Specially the kokoro machines seems to run much slower.
self.assertLess(opt_benchmark * 5, benchmark)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| FunctionTest |
python | encode__django-rest-framework | tests/test_validation.py | {
"start": 8049,
"end": 8215
} | class ____(TestCase):
def test_regex_repr(self):
serializer_repr = repr(RegexSerializer())
assert serializer_repr == expected_repr
| TestRegexSerializer |
python | plotly__plotly.py | plotly/express/_core.py | {
"start": 2070,
"end": 116298
} | class ____(object):
__slots__ = [
"template",
"width",
"height",
"color_discrete_sequence",
"color_discrete_map",
"color_continuous_scale",
"symbol_sequence",
"symbol_map",
"line_dash_sequence",
"line_dash_map",
"pattern_shape_sequence",
"pattern_shape_map",
"size_max",
"category_orders",
"labels",
]
def __init__(self):
self.reset()
def reset(self):
self.template = None
self.width = None
self.height = None
self.color_discrete_sequence = None
self.color_discrete_map = {}
self.color_continuous_scale = None
self.symbol_sequence = None
self.symbol_map = {}
self.line_dash_sequence = None
self.line_dash_map = {}
self.pattern_shape_sequence = None
self.pattern_shape_map = {}
self.size_max = 20
self.category_orders = {}
self.labels = {}
defaults = PxDefaults()
del PxDefaults
MAPBOX_TOKEN = None
def set_mapbox_access_token(token):
"""
Arguments:
token: A Mapbox token to be used in `plotly.express.scatter_mapbox` and \
`plotly.express.line_mapbox` figures. See \
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/ for more details
"""
global MAPBOX_TOKEN
MAPBOX_TOKEN = token
def get_trendline_results(fig):
"""
Extracts fit statistics for trendlines (when applied to figures generated with
the `trendline` argument set to `"ols"`).
Arguments:
fig: the output of a `plotly.express` charting call
Returns:
A `pandas.DataFrame` with a column "px_fit_results" containing the `statsmodels`
results objects, along with columns identifying the subset of the data the
trendline was fit on.
"""
return fig._px_trendlines
Mapping = namedtuple(
"Mapping",
[
"show_in_trace_name",
"grouper",
"val_map",
"sequence",
"updater",
"variable",
"facet",
],
)
TraceSpec = namedtuple("TraceSpec", ["constructor", "attrs", "trace_patch", "marginal"])
def get_label(args, column):
try:
return args["labels"][column]
except Exception:
return column
def invert_label(args, column):
"""Invert mapping.
Find key corresponding to value column in dict args["labels"].
Returns `column` if the value does not exist.
"""
reversed_labels = {value: key for (key, value) in args["labels"].items()}
try:
return reversed_labels[column]
except Exception:
return column
def _is_continuous(df: nw.DataFrame, col_name: str) -> bool:
if nw.dependencies.is_pandas_like_dataframe(df_native := df.to_native()):
# fastpath for pandas: Narwhals' Series.dtype has a bit of overhead, as it
# tries to distinguish between true "object" columns, and "string" columns
# disguised as "object". But here, we deal with neither.
return df_native[col_name].dtype.kind in "ifc"
return df.get_column(col_name).dtype.is_numeric()
def _to_unix_epoch_seconds(s: nw.Series) -> nw.Series:
dtype = s.dtype
if dtype == nw.Date:
return s.dt.timestamp("ms") / 1_000
if dtype == nw.Datetime:
if dtype.time_unit in ("s", "ms"):
return s.dt.timestamp("ms") / 1_000
elif dtype.time_unit == "us":
return s.dt.timestamp("us") / 1_000_000
elif dtype.time_unit == "ns":
return s.dt.timestamp("ns") / 1_000_000_000
else:
msg = "Unexpected dtype, please report a bug"
raise ValueError(msg)
else:
msg = f"Expected Date or Datetime, got {dtype}"
raise TypeError(msg)
def _generate_temporary_column_name(n_bytes, columns) -> str:
"""Wraps of Narwhals generate_temporary_column_name to generate a token
which is guaranteed to not be in columns, nor in [col + token for col in columns]
"""
counter = 0
while True:
# This is guaranteed to not be in columns by Narwhals
token = nw.generate_temporary_column_name(n_bytes, columns=columns)
# Now check that it is not in the [col + token for col in columns] list
if token not in {f"{c}{token}" for c in columns}:
return token
counter += 1
if counter > 100:
msg = (
"Internal Error: Plotly was not able to generate a column name with "
f"{n_bytes=} and not in {columns}.\n"
"Please report this to "
"https://github.com/plotly/plotly.py/issues/new and we will try to "
"replicate and fix it."
)
raise AssertionError(msg)
def get_decorated_label(args, column, role):
original_label = label = get_label(args, column)
if "histfunc" in args and (
(role == "z")
or (role == "x" and "orientation" in args and args["orientation"] == "h")
or (role == "y" and "orientation" in args and args["orientation"] == "v")
):
histfunc = args["histfunc"] or "count"
if histfunc != "count":
label = "%s of %s" % (histfunc, label)
else:
label = "count"
if "histnorm" in args and args["histnorm"] is not None:
if label == "count":
label = args["histnorm"]
else:
histnorm = args["histnorm"]
if histfunc == "sum":
if histnorm == "probability":
label = "%s of %s" % ("fraction", label)
elif histnorm == "percent":
label = "%s of %s" % (histnorm, label)
else:
label = "%s weighted by %s" % (histnorm, original_label)
elif histnorm == "probability":
label = "%s of sum of %s" % ("fraction", label)
elif histnorm == "percent":
label = "%s of sum of %s" % ("percent", label)
else:
label = "%s of %s" % (histnorm, label)
if "barnorm" in args and args["barnorm"] is not None:
label = "%s (normalized as %s)" % (label, args["barnorm"])
return label
def make_mapping(args, variable):
if variable == "line_group" or variable == "animation_frame":
return Mapping(
show_in_trace_name=False,
grouper=args[variable],
val_map={},
sequence=[""],
variable=variable,
updater=(lambda trace, v: v),
facet=None,
)
if variable == "facet_row" or variable == "facet_col":
letter = "x" if variable == "facet_col" else "y"
return Mapping(
show_in_trace_name=False,
variable=letter,
grouper=args[variable],
val_map={},
sequence=[i for i in range(1, 1000)],
updater=(lambda trace, v: v),
facet="row" if variable == "facet_row" else "col",
)
(parent, variable, *other_variables) = variable.split(".")
vprefix = variable
arg_name = variable
if variable == "color":
vprefix = "color_discrete"
if variable == "dash":
arg_name = "line_dash"
vprefix = "line_dash"
if variable in ["pattern", "shape"]:
arg_name = "pattern_shape"
vprefix = "pattern_shape"
if args[vprefix + "_map"] == "identity":
val_map = IdentityMap()
else:
val_map = args[vprefix + "_map"].copy()
return Mapping(
show_in_trace_name=True,
variable=variable,
grouper=args[arg_name],
val_map=val_map,
sequence=args[vprefix + "_sequence"],
updater=lambda trace, v: trace.update(
{parent: {".".join([variable] + other_variables): v}}
),
facet=None,
)
def make_trace_kwargs(args, trace_spec, trace_data, mapping_labels, sizeref):
"""Populates a dict with arguments to update trace
Parameters
----------
args : dict
args to be used for the trace
trace_spec : NamedTuple
which kind of trace to be used (has constructor, marginal etc.
attributes)
trace_data : pandas DataFrame
data
mapping_labels : dict
to be used for hovertemplate
sizeref : float
marker sizeref
Returns
-------
trace_patch : dict
dict to be used to update trace
fit_results : dict
fit information to be used for trendlines
"""
trace_data: nw.DataFrame
df: nw.DataFrame = args["data_frame"]
if "line_close" in args and args["line_close"]:
trace_data = nw.concat([trace_data, trace_data.head(1)], how="vertical")
trace_patch = trace_spec.trace_patch.copy() or {}
fit_results = None
hover_header = ""
for attr_name in trace_spec.attrs:
attr_value = args[attr_name]
attr_label = get_decorated_label(args, attr_value, attr_name)
if attr_name == "dimensions":
dims = [
(name, trace_data.get_column(name))
for name in trace_data.columns
if ((not attr_value) or (name in attr_value))
and (trace_spec.constructor != go.Parcoords or _is_continuous(df, name))
and (
trace_spec.constructor != go.Parcats
or (attr_value is not None and name in attr_value)
or nw.to_py_scalar(df.get_column(name).n_unique())
<= args["dimensions_max_cardinality"]
)
]
trace_patch["dimensions"] = [
dict(label=get_label(args, name), values=column)
for (name, column) in dims
]
if trace_spec.constructor == go.Splom:
for d in trace_patch["dimensions"]:
d["axis"] = dict(matches=True)
mapping_labels["%{xaxis.title.text}"] = "%{x}"
mapping_labels["%{yaxis.title.text}"] = "%{y}"
elif attr_value is not None:
if attr_name == "size":
if "marker" not in trace_patch:
trace_patch["marker"] = dict()
trace_patch["marker"]["size"] = trace_data.get_column(attr_value)
trace_patch["marker"]["sizemode"] = "area"
trace_patch["marker"]["sizeref"] = sizeref
mapping_labels[attr_label] = "%{marker.size}"
elif attr_name == "marginal_x":
if trace_spec.constructor == go.Histogram:
mapping_labels["count"] = "%{y}"
elif attr_name == "marginal_y":
if trace_spec.constructor == go.Histogram:
mapping_labels["count"] = "%{x}"
elif attr_name == "trendline":
if (
args["x"]
and args["y"]
and len(
trace_data.select(nw.col(args["x"], args["y"])).drop_nulls()
)
> 1
):
# sorting is bad but trace_specs with "trendline" have no other attrs
sorted_trace_data = trace_data.sort(by=args["x"], nulls_last=True)
y = sorted_trace_data.get_column(args["y"])
x = sorted_trace_data.get_column(args["x"])
if x.dtype == nw.Datetime or x.dtype == nw.Date:
# convert to unix epoch seconds
x = _to_unix_epoch_seconds(x)
elif not x.dtype.is_numeric():
try:
x = x.cast(nw.Float64())
except ValueError:
raise ValueError(
"Could not convert value of 'x' ('%s') into a numeric type. "
"If 'x' contains stringified dates, please convert to a datetime column."
% args["x"]
)
if not y.dtype.is_numeric():
try:
y = y.cast(nw.Float64())
except ValueError:
raise ValueError(
"Could not convert value of 'y' into a numeric type."
)
# preserve original values of "x" in case they're dates
# otherwise numpy/pandas can mess with the timezones
# NB this means trendline functions must output one-to-one with the input series
# i.e. we can't do resampling, because then the X values might not line up!
non_missing = ~(x.is_null() | y.is_null())
trace_patch["x"] = sorted_trace_data.filter(non_missing).get_column(
args["x"]
)
if (
trace_patch["x"].dtype == nw.Datetime
and trace_patch["x"].dtype.time_zone is not None
):
# Remove time zone so that local time is displayed
trace_patch["x"] = (
trace_patch["x"].dt.replace_time_zone(None).to_numpy()
)
else:
trace_patch["x"] = trace_patch["x"].to_numpy()
trendline_function = trendline_functions[attr_value]
y_out, hover_header, fit_results = trendline_function(
args["trendline_options"],
sorted_trace_data.get_column(args["x"]), # narwhals series
x.to_numpy(), # numpy array
y.to_numpy(), # numpy array
args["x"],
args["y"],
non_missing.to_numpy(), # numpy array
)
assert len(y_out) == len(trace_patch["x"]), (
"missing-data-handling failure in trendline code"
)
trace_patch["y"] = y_out
mapping_labels[get_label(args, args["x"])] = "%{x}"
mapping_labels[get_label(args, args["y"])] = "%{y} <b>(trend)</b>"
elif attr_name.startswith("error"):
error_xy = attr_name[:7]
arr = "arrayminus" if attr_name.endswith("minus") else "array"
if error_xy not in trace_patch:
trace_patch[error_xy] = {}
trace_patch[error_xy][arr] = trace_data.get_column(attr_value)
elif attr_name == "custom_data":
if len(attr_value) > 0:
# here we store a data frame in customdata, and it's serialized
# as a list of row lists, which is what we want
trace_patch["customdata"] = trace_data.select(nw.col(attr_value))
elif attr_name == "hover_name":
if trace_spec.constructor not in [
go.Histogram,
go.Histogram2d,
go.Histogram2dContour,
]:
trace_patch["hovertext"] = trace_data.get_column(attr_value)
if hover_header == "":
hover_header = "<b>%{hovertext}</b><br><br>"
elif attr_name == "hover_data":
if trace_spec.constructor not in [
go.Histogram,
go.Histogram2d,
go.Histogram2dContour,
]:
hover_is_dict = isinstance(attr_value, dict)
customdata_cols = args.get("custom_data") or []
for col in attr_value:
if hover_is_dict and not attr_value[col]:
continue
if col in [
args.get("x"),
args.get("y"),
args.get("z"),
args.get("base"),
]:
continue
try:
position = args["custom_data"].index(col)
except (ValueError, AttributeError, KeyError):
position = len(customdata_cols)
customdata_cols.append(col)
attr_label_col = get_decorated_label(args, col, None)
mapping_labels[attr_label_col] = "%%{customdata[%d]}" % (
position
)
if len(customdata_cols) > 0:
# here we store a data frame in customdata, and it's serialized
# as a list of row lists, which is what we want
# dict.fromkeys(customdata_cols) allows to deduplicate column
# names, yet maintaining the original order.
trace_patch["customdata"] = trace_data.select(
*[nw.col(c) for c in dict.fromkeys(customdata_cols)]
)
elif attr_name == "color":
if trace_spec.constructor in [
go.Choropleth,
go.Choroplethmap,
go.Choroplethmapbox,
]:
trace_patch["z"] = trace_data.get_column(attr_value)
trace_patch["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%{z}"
elif trace_spec.constructor in [
go.Sunburst,
go.Treemap,
go.Icicle,
go.Pie,
go.Funnelarea,
]:
if "marker" not in trace_patch:
trace_patch["marker"] = dict()
if args.get("color_is_continuous"):
trace_patch["marker"]["colors"] = trace_data.get_column(
attr_value
)
trace_patch["marker"]["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%{color}"
else:
trace_patch["marker"]["colors"] = []
if args["color_discrete_map"] is not None:
mapping = args["color_discrete_map"].copy()
else:
mapping = {}
for cat in trace_data.get_column(attr_value).to_list():
# although trace_data.get_column(attr_value) is a Narwhals
# Series, which is an iterable, explicitly calling a to_list()
# makes sure that the elements we loop over are python objects
# in all cases, since depending on the backend this may not be
# the case (e.g. PyArrow)
if mapping.get(cat) is None:
mapping[cat] = args["color_discrete_sequence"][
len(mapping) % len(args["color_discrete_sequence"])
]
trace_patch["marker"]["colors"].append(mapping[cat])
else:
colorable = "marker"
if trace_spec.constructor in [go.Parcats, go.Parcoords]:
colorable = "line"
if colorable not in trace_patch:
trace_patch[colorable] = dict()
trace_patch[colorable]["color"] = trace_data.get_column(attr_value)
trace_patch[colorable]["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%%{%s.color}" % colorable
elif attr_name == "animation_group":
trace_patch["ids"] = trace_data.get_column(attr_value)
elif attr_name == "locations":
trace_patch[attr_name] = trace_data.get_column(attr_value)
mapping_labels[attr_label] = "%{location}"
elif attr_name == "values":
trace_patch[attr_name] = trace_data.get_column(attr_value)
_label = "value" if attr_label == "values" else attr_label
mapping_labels[_label] = "%{value}"
elif attr_name == "parents":
trace_patch[attr_name] = trace_data.get_column(attr_value)
_label = "parent" if attr_label == "parents" else attr_label
mapping_labels[_label] = "%{parent}"
elif attr_name == "ids":
trace_patch[attr_name] = trace_data.get_column(attr_value)
_label = "id" if attr_label == "ids" else attr_label
mapping_labels[_label] = "%{id}"
elif attr_name == "names":
if trace_spec.constructor in [
go.Sunburst,
go.Treemap,
go.Icicle,
go.Pie,
go.Funnelarea,
]:
trace_patch["labels"] = trace_data.get_column(attr_value)
_label = "label" if attr_label == "names" else attr_label
mapping_labels[_label] = "%{label}"
else:
trace_patch[attr_name] = trace_data.get_column(attr_value)
else:
trace_patch[attr_name] = trace_data.get_column(attr_value)
mapping_labels[attr_label] = "%%{%s}" % attr_name
elif (trace_spec.constructor == go.Histogram and attr_name in ["x", "y"]) or (
trace_spec.constructor in [go.Histogram2d, go.Histogram2dContour]
and attr_name == "z"
):
# ensure that stuff like "count" gets into the hoverlabel
mapping_labels[attr_label] = "%%{%s}" % attr_name
if trace_spec.constructor not in [go.Parcoords, go.Parcats]:
# Modify mapping_labels according to hover_data keys
# if hover_data is a dict
mapping_labels_copy = OrderedDict(mapping_labels)
if args["hover_data"] and isinstance(args["hover_data"], dict):
for k, v in mapping_labels.items():
# We need to invert the mapping here
k_args = invert_label(args, k)
if k_args in args["hover_data"]:
formatter = args["hover_data"][k_args][0]
if formatter:
if isinstance(formatter, str):
mapping_labels_copy[k] = v.replace("}", "%s}" % formatter)
else:
_ = mapping_labels_copy.pop(k)
hover_lines = [k + "=" + v for k, v in mapping_labels_copy.items()]
trace_patch["hovertemplate"] = hover_header + "<br>".join(hover_lines)
trace_patch["hovertemplate"] += "<extra></extra>"
return trace_patch, fit_results
def configure_axes(args, constructor, fig, orders):
configurators = {
go.Scatter3d: configure_3d_axes,
go.Scatterternary: configure_ternary_axes,
go.Scatterpolar: configure_polar_axes,
go.Scatterpolargl: configure_polar_axes,
go.Barpolar: configure_polar_axes,
go.Scattermap: configure_map,
go.Choroplethmap: configure_map,
go.Densitymap: configure_map,
go.Scattermapbox: configure_mapbox,
go.Choroplethmapbox: configure_mapbox,
go.Densitymapbox: configure_mapbox,
go.Scattergeo: configure_geo,
go.Choropleth: configure_geo,
}
for c in cartesians:
configurators[c] = configure_cartesian_axes
if constructor in configurators:
configurators[constructor](args, fig, orders)
def set_cartesian_axis_opts(args, axis, letter, orders):
log_key = "log_" + letter
range_key = "range_" + letter
if log_key in args and args[log_key]:
axis["type"] = "log"
if range_key in args and args[range_key]:
axis["range"] = [math.log(r, 10) for r in args[range_key]]
elif range_key in args and args[range_key]:
axis["range"] = args[range_key]
if args[letter] in orders:
axis["categoryorder"] = "array"
axis["categoryarray"] = (
orders[args[letter]]
if isinstance(axis, go.layout.XAxis)
else list(reversed(orders[args[letter]])) # top down for Y axis
)
def configure_cartesian_marginal_axes(args, fig, orders):
nrows = len(fig._grid_ref)
ncols = len(fig._grid_ref[0])
# Set y-axis titles and axis options in the left-most column
for yaxis in fig.select_yaxes(col=1):
set_cartesian_axis_opts(args, yaxis, "y", orders)
# Set x-axis titles and axis options in the bottom-most row
for xaxis in fig.select_xaxes(row=1):
set_cartesian_axis_opts(args, xaxis, "x", orders)
# Configure axis ticks on marginal subplots
if args["marginal_x"]:
fig.update_yaxes(
showticklabels=False, showline=False, ticks="", range=None, row=nrows
)
if args["template"].layout.yaxis.showgrid is None:
fig.update_yaxes(showgrid=args["marginal_x"] == "histogram", row=nrows)
if args["template"].layout.xaxis.showgrid is None:
fig.update_xaxes(showgrid=True, row=nrows)
if args["marginal_y"]:
fig.update_xaxes(
showticklabels=False, showline=False, ticks="", range=None, col=ncols
)
if args["template"].layout.xaxis.showgrid is None:
fig.update_xaxes(showgrid=args["marginal_y"] == "histogram", col=ncols)
if args["template"].layout.yaxis.showgrid is None:
fig.update_yaxes(showgrid=True, col=ncols)
# Add axis titles to non-marginal subplots
y_title = get_decorated_label(args, args["y"], "y")
if args["marginal_x"]:
fig.update_yaxes(title_text=y_title, row=1, col=1)
else:
for row in range(1, nrows + 1):
fig.update_yaxes(title_text=y_title, row=row, col=1)
x_title = get_decorated_label(args, args["x"], "x")
if args["marginal_y"]:
fig.update_xaxes(title_text=x_title, row=1, col=1)
else:
for col in range(1, ncols + 1):
fig.update_xaxes(title_text=x_title, row=1, col=col)
# Configure axis type across all x-axes
if "log_x" in args and args["log_x"]:
fig.update_xaxes(type="log")
# Configure axis type across all y-axes
if "log_y" in args and args["log_y"]:
fig.update_yaxes(type="log")
# Configure matching and axis type for marginal y-axes
matches_y = "y" + str(ncols + 1)
if args["marginal_x"]:
for row in range(2, nrows + 1, 2):
fig.update_yaxes(matches=matches_y, type=None, row=row)
if args["marginal_y"]:
for col in range(2, ncols + 1, 2):
fig.update_xaxes(matches="x2", type=None, col=col)
def configure_cartesian_axes(args, fig, orders):
if ("marginal_x" in args and args["marginal_x"]) or (
"marginal_y" in args and args["marginal_y"]
):
configure_cartesian_marginal_axes(args, fig, orders)
return
# Set y-axis titles and axis options in the left-most column
y_title = get_decorated_label(args, args["y"], "y")
for yaxis in fig.select_yaxes(col=1):
yaxis.update(title_text=y_title)
set_cartesian_axis_opts(args, yaxis, "y", orders)
# Set x-axis titles and axis options in the bottom-most row
x_title = get_decorated_label(args, args["x"], "x")
for xaxis in fig.select_xaxes(row=1):
if "is_timeline" not in args:
xaxis.update(title_text=x_title)
set_cartesian_axis_opts(args, xaxis, "x", orders)
# Configure axis type across all x-axes
if "log_x" in args and args["log_x"]:
fig.update_xaxes(type="log")
# Configure axis type across all y-axes
if "log_y" in args and args["log_y"]:
fig.update_yaxes(type="log")
if "is_timeline" in args:
fig.update_xaxes(type="date")
if "ecdfmode" in args:
if args["orientation"] == "v":
fig.update_yaxes(rangemode="tozero")
else:
fig.update_xaxes(rangemode="tozero")
def configure_ternary_axes(args, fig, orders):
fig.update_ternaries(
aaxis=dict(title_text=get_label(args, args["a"])),
baxis=dict(title_text=get_label(args, args["b"])),
caxis=dict(title_text=get_label(args, args["c"])),
)
def configure_polar_axes(args, fig, orders):
patch = dict(
angularaxis=dict(direction=args["direction"], rotation=args["start_angle"]),
radialaxis=dict(),
)
for var, axis in [("r", "radialaxis"), ("theta", "angularaxis")]:
if args[var] in orders:
patch[axis]["categoryorder"] = "array"
patch[axis]["categoryarray"] = orders[args[var]]
radialaxis = patch["radialaxis"]
if args["log_r"]:
radialaxis["type"] = "log"
if args["range_r"]:
radialaxis["range"] = [math.log(x, 10) for x in args["range_r"]]
else:
if args["range_r"]:
radialaxis["range"] = args["range_r"]
if args["range_theta"]:
patch["sector"] = args["range_theta"]
fig.update_polars(patch)
def configure_3d_axes(args, fig, orders):
patch = dict(
xaxis=dict(title_text=get_label(args, args["x"])),
yaxis=dict(title_text=get_label(args, args["y"])),
zaxis=dict(title_text=get_label(args, args["z"])),
)
for letter in ["x", "y", "z"]:
axis = patch[letter + "axis"]
if args["log_" + letter]:
axis["type"] = "log"
if args["range_" + letter]:
axis["range"] = [math.log(x, 10) for x in args["range_" + letter]]
else:
if args["range_" + letter]:
axis["range"] = args["range_" + letter]
if args[letter] in orders:
axis["categoryorder"] = "array"
axis["categoryarray"] = orders[args[letter]]
fig.update_scenes(patch)
def configure_mapbox(args, fig, orders):
center = args["center"]
if not center and "lat" in args and "lon" in args:
center = dict(
lat=args["data_frame"][args["lat"]].mean(),
lon=args["data_frame"][args["lon"]].mean(),
)
fig.update_mapboxes(
accesstoken=MAPBOX_TOKEN,
center=center,
zoom=args["zoom"],
style=args["mapbox_style"],
)
def configure_map(args, fig, orders):
center = args["center"]
if not center and "lat" in args and "lon" in args:
center = dict(
lat=args["data_frame"][args["lat"]].mean(),
lon=args["data_frame"][args["lon"]].mean(),
)
fig.update_maps(
center=center,
zoom=args["zoom"],
style=args["map_style"],
)
def configure_geo(args, fig, orders):
fig.update_geos(
center=args["center"],
scope=args["scope"],
fitbounds=args["fitbounds"],
visible=args["basemap_visible"],
projection=dict(type=args["projection"]),
)
def configure_animation_controls(args, constructor, fig):
def frame_args(duration):
return {
"frame": {"duration": duration, "redraw": constructor != go.Scatter},
"mode": "immediate",
"fromcurrent": True,
"transition": {"duration": duration, "easing": "linear"},
}
if "animation_frame" in args and args["animation_frame"] and len(fig.frames) > 1:
fig.layout.updatemenus = [
{
"buttons": [
{
"args": [None, frame_args(500)],
"label": "▶",
"method": "animate",
},
{
"args": [[None], frame_args(0)],
"label": "◼",
"method": "animate",
},
],
"direction": "left",
"pad": {"r": 10, "t": 70},
"showactive": False,
"type": "buttons",
"x": 0.1,
"xanchor": "right",
"y": 0,
"yanchor": "top",
}
]
fig.layout.sliders = [
{
"active": 0,
"yanchor": "top",
"xanchor": "left",
"currentvalue": {
"prefix": get_label(args, args["animation_frame"]) + "="
},
"pad": {"b": 10, "t": 60},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": [
{
"args": [[f.name], frame_args(0)],
"label": f.name,
"method": "animate",
}
for f in fig.frames
],
}
]
def make_trace_spec(args, constructor, attrs, trace_patch):
if constructor in [go.Scatter, go.Scatterpolar]:
if "render_mode" in args and (
args["render_mode"] == "webgl"
or (
args["render_mode"] == "auto"
and len(args["data_frame"]) > 1000
and args.get("line_shape") != "spline"
and args["animation_frame"] is None
)
):
if constructor == go.Scatter:
constructor = go.Scattergl
if "orientation" in trace_patch:
del trace_patch["orientation"]
else:
constructor = go.Scatterpolargl
# Create base trace specification
result = [TraceSpec(constructor, attrs, trace_patch, None)]
# Add marginal trace specifications
for letter in ["x", "y"]:
if "marginal_" + letter in args and args["marginal_" + letter]:
trace_spec = None
axis_map = dict(
xaxis="x1" if letter == "x" else "x2",
yaxis="y1" if letter == "y" else "y2",
)
if args["marginal_" + letter] == "histogram":
trace_spec = TraceSpec(
constructor=go.Histogram,
attrs=[letter, "marginal_" + letter],
trace_patch=dict(opacity=0.5, bingroup=letter, **axis_map),
marginal=letter,
)
elif args["marginal_" + letter] == "violin":
trace_spec = TraceSpec(
constructor=go.Violin,
attrs=[letter, "hover_name", "hover_data"],
trace_patch=dict(scalegroup=letter),
marginal=letter,
)
elif args["marginal_" + letter] == "box":
trace_spec = TraceSpec(
constructor=go.Box,
attrs=[letter, "hover_name", "hover_data"],
trace_patch=dict(notched=True),
marginal=letter,
)
elif args["marginal_" + letter] == "rug":
symbols = {"x": "line-ns-open", "y": "line-ew-open"}
trace_spec = TraceSpec(
constructor=go.Box,
attrs=[letter, "hover_name", "hover_data"],
trace_patch=dict(
fillcolor="rgba(255,255,255,0)",
line={"color": "rgba(255,255,255,0)"},
boxpoints="all",
jitter=0,
hoveron="points",
marker={"symbol": symbols[letter]},
),
marginal=letter,
)
if "color" in attrs or "color" not in args:
if "marker" not in trace_spec.trace_patch:
trace_spec.trace_patch["marker"] = dict()
first_default_color = args["color_continuous_scale"][0]
trace_spec.trace_patch["marker"]["color"] = first_default_color
result.append(trace_spec)
# Add trendline trace specifications
if args.get("trendline") and args.get("trendline_scope", "trace") == "trace":
result.append(make_trendline_spec(args, constructor))
return result
def make_trendline_spec(args, constructor):
trace_spec = TraceSpec(
constructor=(
go.Scattergl
if constructor == go.Scattergl # could be contour
else go.Scatter
),
attrs=["trendline"],
trace_patch=dict(mode="lines"),
marginal=None,
)
if args["trendline_color_override"]:
trace_spec.trace_patch["line"] = dict(color=args["trendline_color_override"])
return trace_spec
def one_group(x):
return ""
def apply_default_cascade(args):
# first we apply px.defaults to unspecified args
for param in defaults.__slots__:
if param in args and args[param] is None:
args[param] = getattr(defaults, param)
# load the default template if set, otherwise "plotly"
if args["template"] is None:
if pio.templates.default is not None:
args["template"] = pio.templates.default
else:
args["template"] = "plotly"
try:
# retrieve the actual template if we were given a name
args["template"] = pio.templates[args["template"]]
except Exception:
# otherwise try to build a real template
args["template"] = go.layout.Template(args["template"])
# if colors not set explicitly or in px.defaults, defer to a template
# if the template doesn't have one, we set some final fallback defaults
if "color_continuous_scale" in args:
if (
args["color_continuous_scale"] is None
and args["template"].layout.colorscale.sequential
):
args["color_continuous_scale"] = [
x[1] for x in args["template"].layout.colorscale.sequential
]
if args["color_continuous_scale"] is None:
args["color_continuous_scale"] = sequential.Viridis
if "color_discrete_sequence" in args:
if args["color_discrete_sequence"] is None and args["template"].layout.colorway:
args["color_discrete_sequence"] = args["template"].layout.colorway
if args["color_discrete_sequence"] is None:
args["color_discrete_sequence"] = qualitative.D3
# if symbol_sequence/line_dash_sequence not set explicitly or in px.defaults,
# see if we can defer to template. If not, set reasonable defaults
if "symbol_sequence" in args:
if args["symbol_sequence"] is None and args["template"].data.scatter:
args["symbol_sequence"] = [
scatter.marker.symbol for scatter in args["template"].data.scatter
]
if not args["symbol_sequence"] or not any(args["symbol_sequence"]):
args["symbol_sequence"] = ["circle", "diamond", "square", "x", "cross"]
if "line_dash_sequence" in args:
if args["line_dash_sequence"] is None and args["template"].data.scatter:
args["line_dash_sequence"] = [
scatter.line.dash for scatter in args["template"].data.scatter
]
if not args["line_dash_sequence"] or not any(args["line_dash_sequence"]):
args["line_dash_sequence"] = [
"solid",
"dot",
"dash",
"longdash",
"dashdot",
"longdashdot",
]
if "pattern_shape_sequence" in args:
if args["pattern_shape_sequence"] is None and args["template"].data.bar:
args["pattern_shape_sequence"] = [
bar.marker.pattern.shape for bar in args["template"].data.bar
]
if not args["pattern_shape_sequence"] or not any(
args["pattern_shape_sequence"]
):
args["pattern_shape_sequence"] = ["", "/", "\\", "x", "+", "."]
def _check_name_not_reserved(field_name, reserved_names):
if field_name not in reserved_names:
return field_name
else:
raise NameError(
"A name conflict was encountered for argument '%s'. "
"A column or index with name '%s' is ambiguous." % (field_name, field_name)
)
def _get_reserved_col_names(args):
"""
This function builds a list of columns of the data_frame argument used
as arguments, either as str/int arguments or given as columns
(pandas series type).
"""
df: nw.DataFrame = args["data_frame"]
reserved_names = set()
for field in args:
if field not in all_attrables:
continue
names = args[field] if field in array_attrables else [args[field]]
if names is None:
continue
for arg in names:
if arg is None:
continue
elif isinstance(arg, str): # no need to add ints since kw arg are not ints
reserved_names.add(arg)
elif nw.dependencies.is_into_series(arg):
arg_series = nw.from_native(arg, series_only=True)
arg_name = arg_series.name
if arg_name and arg_name in df.columns:
in_df = (arg_series == df.get_column(arg_name)).all()
if in_df:
reserved_names.add(arg_name)
elif arg is nw.maybe_get_index(df) and arg.name is not None:
reserved_names.add(arg.name)
return reserved_names
def _is_col_list(columns, arg, is_pd_like, native_namespace):
"""Returns True if arg looks like it's a list of columns or references to columns
in df_input, and False otherwise (in which case it's assumed to be a single column
or reference to a column).
"""
if arg is None or isinstance(arg, str) or isinstance(arg, int):
return False
if is_pd_like and isinstance(arg, native_namespace.MultiIndex):
return False # just to keep existing behaviour for now
try:
iter(arg)
except TypeError:
return False # not iterable
for c in arg:
if isinstance(c, str) or isinstance(c, int):
if columns is None or c not in columns:
return False
else:
try:
iter(c)
except TypeError:
return False # not iterable
return True
def _isinstance_listlike(x):
"""Returns True if x is an iterable which can be transformed into a pandas Series,
False for the other types of possible values of a `hover_data` dict.
A tuple of length 2 is a special case corresponding to a (format, data) tuple.
"""
if (
isinstance(x, str)
or (isinstance(x, tuple) and len(x) == 2)
or isinstance(x, bool)
or x is None
):
return False
else:
return True
def _escape_col_name(columns, col_name, extra):
if columns is None:
return col_name
while col_name in columns or col_name in extra:
col_name = "_" + col_name
return col_name
def to_named_series(x, name=None, native_namespace=None):
"""Assuming x is list-like or even an existing Series, returns a new Series named `name`."""
# With `pass_through=True`, the original object will be returned if unable to convert
# to a Narwhals Series.
x = nw.from_native(x, series_only=True, pass_through=True)
if isinstance(x, nw.Series):
return x.rename(name)
elif native_namespace is not None:
return nw.new_series(name=name, values=x, native_namespace=native_namespace)
else:
try:
import pandas as pd
return nw.new_series(name=name, values=x, native_namespace=pd)
except ImportError:
msg = "Pandas installation is required if no dataframe is provided."
raise NotImplementedError(msg)
def process_args_into_dataframe(
args, wide_mode, var_name, value_name, is_pd_like, native_namespace
):
"""
After this function runs, the `all_attrables` keys of `args` all contain only
references to columns of `df_output`. This function handles the extraction of data
from `args["attrable"]` and column-name-generation as appropriate, and adds the
data to `df_output` and then replaces `args["attrable"]` with the appropriate
reference.
"""
df_input: nw.DataFrame | None = args["data_frame"]
df_provided = df_input is not None
# we use a dict instead of a dataframe directly so that it doesn't cause
# PerformanceWarning by pandas by repeatedly setting the columns.
# a dict is used instead of a list as the columns needs to be overwritten.
df_output = {}
constants = {}
ranges = []
wide_id_vars = set()
reserved_names = _get_reserved_col_names(args) if df_provided else set()
# Case of functions with a "dimensions" kw: scatter_matrix, parcats, parcoords
if "dimensions" in args and args["dimensions"] is None:
if not df_provided:
raise ValueError(
"No data were provided. Please provide data either with the `data_frame` or with the `dimensions` argument."
)
else:
df_output = {col: df_input.get_column(col) for col in df_input.columns}
# hover_data is a dict
hover_data_is_dict = (
"hover_data" in args
and args["hover_data"]
and isinstance(args["hover_data"], dict)
)
# If dict, convert all values of hover_data to tuples to simplify processing
if hover_data_is_dict:
for k in args["hover_data"]:
if _isinstance_listlike(args["hover_data"][k]):
args["hover_data"][k] = (True, args["hover_data"][k])
if not isinstance(args["hover_data"][k], tuple):
args["hover_data"][k] = (args["hover_data"][k], None)
if df_provided and args["hover_data"][k][1] is not None and k in df_input:
raise ValueError(
"Ambiguous input: values for '%s' appear both in hover_data and data_frame"
% k
)
# Loop over possible arguments
for field_name in all_attrables:
# Massaging variables
argument_list = (
[args.get(field_name)]
if field_name not in array_attrables
else args.get(field_name)
)
# argument not specified, continue
# The original also tested `or argument_list is [None]` but
# that clause is always False, so it has been removed. The
# alternative fix would have been to test that `argument_list`
# is of length 1 and its sole element is `None`, but that
# feels pedantic. All tests pass with the change below; let's
# see if the world decides we were wrong.
if argument_list is None:
continue
# Argument name: field_name if the argument is not a list
# Else we give names like ["hover_data_0, hover_data_1"] etc.
field_list = (
[field_name]
if field_name not in array_attrables
else [field_name + "_" + str(i) for i in range(len(argument_list))]
)
# argument_list and field_list ready, iterate over them
# Core of the loop starts here
for i, (argument, field) in enumerate(zip(argument_list, field_list)):
length = len(df_output[next(iter(df_output))]) if len(df_output) else 0
if argument is None:
continue
col_name = None
# Case of multiindex
if is_pd_like and isinstance(argument, native_namespace.MultiIndex):
raise TypeError(
f"Argument '{field}' is a {native_namespace.__name__} MultiIndex. "
f"{native_namespace.__name__} MultiIndex is not supported by plotly "
"express at the moment."
)
# ----------------- argument is a special value ----------------------
if isinstance(argument, (Constant, Range)):
col_name = _check_name_not_reserved(
str(argument.label) if argument.label is not None else field,
reserved_names,
)
if isinstance(argument, Constant):
constants[col_name] = argument.value
else:
ranges.append(col_name)
# ----------------- argument is likely a col name ----------------------
elif isinstance(argument, str) or not hasattr(argument, "__len__"):
if (
field_name == "hover_data"
and hover_data_is_dict
and args["hover_data"][str(argument)][1] is not None
):
# hover_data has onboard data
# previously-checked to have no name-conflict with data_frame
col_name = str(argument)
real_argument = args["hover_data"][col_name][1]
if length and (real_length := len(real_argument)) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of hover_data key `%s` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (
argument,
real_length,
str(list(df_output.keys())),
length,
)
)
df_output[col_name] = to_named_series(
real_argument, col_name, native_namespace
)
elif not df_provided:
raise ValueError(
"String or int arguments are only possible when a "
"DataFrame or an array is provided in the `data_frame` "
"argument. No DataFrame was provided, but argument "
"'%s' is of type str or int." % field
)
# Check validity of column name
elif argument not in df_input.columns:
if wide_mode and argument in (value_name, var_name):
continue
else:
err_msg = (
"Value of '%s' is not the name of a column in 'data_frame'. "
"Expected one of %s but received: %s"
% (field, str(list(df_input.columns)), argument)
)
if argument == "index":
err_msg += "\n To use the index, pass it in directly as `df.index`."
raise ValueError(err_msg)
elif length and (actual_len := len(df_input)) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of column argument `df[%s]` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (
field,
actual_len,
str(list(df_output.keys())),
length,
)
)
else:
col_name = str(argument)
df_output[col_name] = to_named_series(
df_input.get_column(argument), col_name
)
# ----------------- argument is likely a column / array / list.... -------
else:
if df_provided and hasattr(argument, "name"):
if is_pd_like and argument is nw.maybe_get_index(df_input):
if argument.name is None or argument.name in df_input.columns:
col_name = "index"
else:
col_name = argument.name
col_name = _escape_col_name(
df_input.columns, col_name, [var_name, value_name]
)
else:
if (
argument.name is not None
and argument.name in df_input.columns
and (
to_named_series(
argument, argument.name, native_namespace
)
== df_input.get_column(argument.name)
).all()
):
col_name = argument.name
if col_name is None: # numpy array, list...
col_name = _check_name_not_reserved(field, reserved_names)
if length and (len_arg := len(argument)) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of argument `%s` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (field, len_arg, str(list(df_output.keys())), length)
)
df_output[str(col_name)] = to_named_series(
x=argument,
name=str(col_name),
native_namespace=native_namespace,
)
# Finally, update argument with column name now that column exists
assert col_name is not None, (
"Data-frame processing failure, likely due to a internal bug. "
"Please report this to "
"https://github.com/plotly/plotly.py/issues/new and we will try to "
"replicate and fix it."
)
if field_name not in array_attrables:
args[field_name] = str(col_name)
elif isinstance(args[field_name], dict):
pass
else:
args[field_name][i] = str(col_name)
if field_name != "wide_variable":
wide_id_vars.add(str(col_name))
length = len(df_output[next(iter(df_output))]) if len(df_output) else 0
if native_namespace is None:
try:
import pandas as pd
native_namespace = pd
except ImportError:
msg = "Pandas installation is required if no dataframe is provided."
raise NotImplementedError(msg)
if ranges:
import numpy as np
range_series = nw.new_series(
name="__placeholder__",
values=np.arange(length),
native_namespace=native_namespace,
)
df_output.update(
{col_name: range_series.alias(col_name) for col_name in ranges}
)
df_output.update(
{
# constant is single value. repeat by len to avoid creating NaN on concatenating
col_name: nw.new_series(
name=col_name,
values=[constants[col_name]] * length,
native_namespace=native_namespace,
)
for col_name in constants
}
)
if df_output:
df_output = nw.from_dict(df_output)
else:
try:
import pandas as pd
except ImportError:
msg = "Pandas installation is required."
raise NotImplementedError(msg)
df_output = nw.from_native(pd.DataFrame({}), eager_only=True)
return df_output, wide_id_vars
def build_dataframe(args, constructor):
"""
Constructs a dataframe and modifies `args` in-place.
The argument values in `args` can be either strings corresponding to
existing columns of a dataframe, or data arrays (lists, numpy arrays,
pandas columns, series).
Parameters
----------
args : OrderedDict
arguments passed to the px function and subsequently modified
constructor : graph_object trace class
the trace type selected for this figure
"""
# make copies of all the fields via dict() and list()
for field in args:
if field in array_attrables and args[field] is not None:
if isinstance(args[field], dict):
args[field] = dict(args[field])
elif field in ["custom_data", "hover_data"] and isinstance(
args[field], str
):
args[field] = [args[field]]
else:
args[field] = list(args[field])
# Cast data_frame argument to DataFrame (it could be a numpy array, dict etc.)
df_provided = args["data_frame"] is not None
# Flag that indicates if the resulting data_frame after parsing is pandas-like
# (in terms of resulting Narwhals DataFrame).
# True if pandas, modin.pandas or cudf DataFrame/Series instance, or converted from
# PySpark to pandas.
is_pd_like = False
# Flag that indicates if data_frame needs to be converted to PyArrow.
# True if Ibis, DuckDB, Vaex, or implements __dataframe__
needs_interchanging = False
# If data_frame is provided, we parse it into a narwhals DataFrame, while accounting
# for compatibility with pandas specific paths (e.g. Index/MultiIndex case).
if df_provided:
# data_frame is pandas-like DataFrame (pandas, modin.pandas, cudf)
if nw.dependencies.is_pandas_like_dataframe(args["data_frame"]):
columns = args["data_frame"].columns # This can be multi index
args["data_frame"] = nw.from_native(args["data_frame"], eager_only=True)
is_pd_like = True
# data_frame is pandas-like Series (pandas, modin.pandas, cudf)
elif nw.dependencies.is_pandas_like_series(args["data_frame"]):
args["data_frame"] = nw.from_native(
args["data_frame"], series_only=True
).to_frame()
columns = args["data_frame"].columns
is_pd_like = True
# data_frame is any other DataFrame object natively supported via Narwhals.
# With `pass_through=True`, the original object will be returned if unable to convert
# to a Narwhals DataFrame, making this condition False.
elif isinstance(
data_frame := nw.from_native(
args["data_frame"], eager_or_interchange_only=True, pass_through=True
),
nw.DataFrame,
):
args["data_frame"] = data_frame
needs_interchanging = nw.get_level(data_frame) == "interchange"
columns = args["data_frame"].columns
# data_frame is any other Series object natively supported via Narwhals.
# With `pass_through=True`, the original object will be returned if unable to convert
# to a Narwhals Series, making this condition False.
elif isinstance(
series := nw.from_native(
args["data_frame"], series_only=True, pass_through=True
),
nw.Series,
):
args["data_frame"] = series.to_frame()
columns = args["data_frame"].columns
# data_frame is PySpark: it does not support interchange protocol and it is not
# integrated in Narwhals. We use its native method to convert it to pandas.
elif hasattr(args["data_frame"], "toPandas"):
args["data_frame"] = nw.from_native(
args["data_frame"].toPandas(), eager_only=True
)
columns = args["data_frame"].columns
is_pd_like = True
# data_frame is some other object type (e.g. dict, list, ...)
# We try to import pandas, and then try to instantiate a pandas dataframe from
# this such object
else:
try:
import pandas as pd
try:
args["data_frame"] = nw.from_native(
pd.DataFrame(args["data_frame"])
)
columns = args["data_frame"].columns
is_pd_like = True
except Exception:
msg = (
f"Unable to convert data_frame of type {type(args['data_frame'])} "
"to pandas DataFrame. Please provide a supported dataframe type "
"or a type that can be passed to pd.DataFrame."
)
raise NotImplementedError(msg)
except ImportError:
msg = (
f"Attempting to convert data_frame of type {type(args['data_frame'])} "
"to pandas DataFrame, but Pandas is not installed. "
"Convert it to supported dataframe type or install pandas."
)
raise NotImplementedError(msg)
# data_frame is not provided
else:
columns = None
df_input: nw.DataFrame | None = args["data_frame"]
index = (
nw.maybe_get_index(df_input)
if df_provided and not needs_interchanging
else None
)
native_namespace = (
nw.get_native_namespace(df_input)
if df_provided and not needs_interchanging
else None
)
# now we handle special cases like wide-mode or x-xor-y specification
# by rearranging args to tee things up for process_args_into_dataframe to work
no_x = args.get("x") is None
no_y = args.get("y") is None
wide_x = (
False
if no_x
else _is_col_list(columns, args["x"], is_pd_like, native_namespace)
)
wide_y = (
False
if no_y
else _is_col_list(columns, args["y"], is_pd_like, native_namespace)
)
wide_mode = False
var_name = None # will likely be "variable" in wide_mode
wide_cross_name = None # will likely be "index" in wide_mode
value_name = None # will likely be "value" in wide_mode
hist2d_types = [go.Histogram2d, go.Histogram2dContour]
hist1d_orientation = constructor == go.Histogram or "ecdfmode" in args
if constructor in cartesians:
if wide_x and wide_y:
raise ValueError(
"Cannot accept list of column references or list of columns for both `x` and `y`."
)
if df_provided and no_x and no_y:
wide_mode = True
if is_pd_like and isinstance(columns, native_namespace.MultiIndex):
raise TypeError(
f"Data frame columns is a {native_namespace.__name__} MultiIndex. "
f"{native_namespace.__name__} MultiIndex is not supported by plotly "
"express at the moment."
)
args["wide_variable"] = list(columns)
if is_pd_like and isinstance(columns, native_namespace.Index):
var_name = columns.name
else:
var_name = None
if var_name in [None, "value", "index"] or var_name in columns:
var_name = "variable"
if constructor == go.Funnel:
wide_orientation = args.get("orientation") or "h"
else:
wide_orientation = args.get("orientation") or "v"
args["orientation"] = wide_orientation
args["wide_cross"] = None
elif wide_x != wide_y:
wide_mode = True
args["wide_variable"] = args["y"] if wide_y else args["x"]
if df_provided and is_pd_like and args["wide_variable"] is columns:
var_name = columns.name
if is_pd_like and isinstance(args["wide_variable"], native_namespace.Index):
args["wide_variable"] = list(args["wide_variable"])
if var_name in [None, "value", "index"] or (
df_provided and var_name in columns
):
var_name = "variable"
if hist1d_orientation:
wide_orientation = "v" if wide_x else "h"
else:
wide_orientation = "v" if wide_y else "h"
args["y" if wide_y else "x"] = None
args["wide_cross"] = None
if not no_x and not no_y:
wide_cross_name = "__x__" if wide_y else "__y__"
if wide_mode:
value_name = _escape_col_name(columns, "value", [])
var_name = _escape_col_name(columns, var_name, [])
# If the data_frame has interchange-only support levelin Narwhals, then we need to
# convert it to a full support level backend.
# Hence we convert requires Interchange to PyArrow.
if needs_interchanging:
if wide_mode:
args["data_frame"] = nw.from_native(
args["data_frame"].to_arrow(), eager_only=True
)
else:
# Save precious resources by only interchanging columns that are
# actually going to be plotted. This is tricky to do in the general case,
# because Plotly allows calls like `px.line(df, x='x', y=['y1', df['y1']])`,
# but interchange-only objects (e.g. DuckDB) don't typically have a concept
# of self-standing Series. It's more important to perform project pushdown
# here seeing as we're materialising to an (eager) PyArrow table.
necessary_columns = {
i for i in args.values() if isinstance(i, str) and i in columns
}
for field in args:
if args[field] is not None and field in array_attrables:
necessary_columns.update(i for i in args[field] if i in columns)
columns = list(necessary_columns)
args["data_frame"] = nw.from_native(
args["data_frame"].select(columns).to_arrow(), eager_only=True
)
import pyarrow as pa
native_namespace = pa
missing_bar_dim = None
if (
constructor in [go.Scatter, go.Bar, go.Funnel] + hist2d_types
and not hist1d_orientation
):
if not wide_mode and (no_x != no_y):
for ax in ["x", "y"]:
if args.get(ax) is None:
args[ax] = (
index
if index is not None
else Range(
label=_escape_col_name(columns, ax, [var_name, value_name])
)
)
if constructor == go.Bar:
missing_bar_dim = ax
else:
if args["orientation"] is None:
args["orientation"] = "v" if ax == "x" else "h"
if wide_mode and wide_cross_name is None:
if no_x != no_y and args["orientation"] is None:
args["orientation"] = "v" if no_x else "h"
if df_provided and is_pd_like and index is not None:
if isinstance(index, native_namespace.MultiIndex):
raise TypeError(
f"Data frame index is a {native_namespace.__name__} MultiIndex. "
f"{native_namespace.__name__} MultiIndex is not supported by "
"plotly express at the moment."
)
args["wide_cross"] = index
else:
args["wide_cross"] = Range(
label=_escape_col_name(columns, "index", [var_name, value_name])
)
no_color = False
if isinstance(args.get("color"), str) and args["color"] == NO_COLOR:
no_color = True
args["color"] = None
# now that things have been prepped, we do the systematic rewriting of `args`
df_output, wide_id_vars = process_args_into_dataframe(
args,
wide_mode,
var_name,
value_name,
is_pd_like,
native_namespace,
)
df_output: nw.DataFrame
# now that `df_output` exists and `args` contains only references, we complete
# the special-case and wide-mode handling by further rewriting args and/or mutating
# df_output
count_name = _escape_col_name(df_output.columns, "count", [var_name, value_name])
if not wide_mode and missing_bar_dim and constructor == go.Bar:
# now that we've populated df_output, we check to see if the non-missing
# dimension is categorical: if so, then setting the missing dimension to a
# constant 1 is a less-insane thing to do than setting it to the index by
# default and we let the normal auto-orientation-code do its thing later
other_dim = "x" if missing_bar_dim == "y" else "y"
if not _is_continuous(df_output, args[other_dim]):
args[missing_bar_dim] = count_name
df_output = df_output.with_columns(nw.lit(1).alias(count_name))
else:
# on the other hand, if the non-missing dimension is continuous, then we
# can use this information to override the normal auto-orientation code
if args["orientation"] is None:
args["orientation"] = "v" if missing_bar_dim == "x" else "h"
if constructor in hist2d_types:
del args["orientation"]
if wide_mode:
# at this point, `df_output` is semi-long/semi-wide, but we know which columns
# are which, so we melt it and reassign `args` to refer to the newly-tidy
# columns, keeping track of various names and manglings set up above
wide_value_vars = [c for c in args["wide_variable"] if c not in wide_id_vars]
del args["wide_variable"]
if wide_cross_name == "__x__":
wide_cross_name = args["x"]
elif wide_cross_name == "__y__":
wide_cross_name = args["y"]
else:
wide_cross_name = args["wide_cross"]
del args["wide_cross"]
dtype = None
for v in wide_value_vars:
v_dtype = df_output.get_column(v).dtype
v_dtype = "number" if v_dtype.is_numeric() else str(v_dtype)
if dtype is None:
dtype = v_dtype
elif dtype != v_dtype:
raise ValueError(
"Plotly Express cannot process wide-form data with columns of different type."
)
df_output = df_output.unpivot(
index=wide_id_vars,
on=wide_value_vars,
variable_name=var_name,
value_name=value_name,
)
assert len(df_output.columns) == len(set(df_output.columns)), (
"Wide-mode name-inference failure, likely due to a internal bug. "
"Please report this to "
"https://github.com/plotly/plotly.py/issues/new and we will try to "
"replicate and fix it."
)
df_output = df_output.with_columns(nw.col(var_name).cast(nw.String))
orient_v = wide_orientation == "v"
if hist1d_orientation:
args["x" if orient_v else "y"] = value_name
args["y" if orient_v else "x"] = wide_cross_name
args["color"] = args["color"] or var_name
elif constructor in [go.Scatter, go.Funnel] + hist2d_types:
args["x" if orient_v else "y"] = wide_cross_name
args["y" if orient_v else "x"] = value_name
if constructor != go.Histogram2d:
args["color"] = args["color"] or var_name
if "line_group" in args:
args["line_group"] = args["line_group"] or var_name
elif constructor == go.Bar:
if _is_continuous(df_output, value_name):
args["x" if orient_v else "y"] = wide_cross_name
args["y" if orient_v else "x"] = value_name
args["color"] = args["color"] or var_name
else:
args["x" if orient_v else "y"] = value_name
args["y" if orient_v else "x"] = count_name
df_output = df_output.with_columns(nw.lit(1).alias(count_name))
args["color"] = args["color"] or var_name
elif constructor in [go.Violin, go.Box]:
args["x" if orient_v else "y"] = wide_cross_name or var_name
args["y" if orient_v else "x"] = value_name
if hist1d_orientation and constructor == go.Scatter:
if args["x"] is not None and args["y"] is not None:
args["histfunc"] = "sum"
elif args["x"] is None:
args["histfunc"] = None
args["orientation"] = "h"
args["x"] = count_name
df_output = df_output.with_columns(nw.lit(1).alias(count_name))
else:
args["histfunc"] = None
args["orientation"] = "v"
args["y"] = count_name
df_output = df_output.with_columns(nw.lit(1).alias(count_name))
if no_color:
args["color"] = None
args["data_frame"] = df_output
return args
def _check_dataframe_all_leaves(df: nw.DataFrame) -> None:
cols = df.columns
df_sorted = df.sort(by=cols, descending=False, nulls_last=True)
null_mask = df_sorted.select(nw.all().is_null())
df_sorted = df_sorted.select(nw.all().cast(nw.String()))
null_indices_mask = null_mask.select(
null_mask=nw.any_horizontal(nw.all())
).get_column("null_mask")
null_mask_filtered = null_mask.filter(null_indices_mask)
if not null_mask_filtered.is_empty():
for col_idx in range(1, null_mask_filtered.shape[1]):
# For each row, if a True value is encountered, then check that
# all values in subsequent columns are also True
null_entries_with_non_null_children = (
~null_mask_filtered[:, col_idx] & null_mask_filtered[:, col_idx - 1]
)
if nw.to_py_scalar(null_entries_with_non_null_children.any()):
row_idx = null_entries_with_non_null_children.to_list().index(True)
raise ValueError(
"None entries cannot have not-None children",
df_sorted.row(row_idx),
)
fill_series = nw.new_series(
name="fill_value",
values=[""] * len(df_sorted),
dtype=nw.String(),
native_namespace=nw.get_native_namespace(df_sorted),
)
df_sorted = df_sorted.with_columns(
**{
c: df_sorted.get_column(c).zip_with(~null_mask.get_column(c), fill_series)
for c in cols
}
)
# Conversion to list is due to python native vs pyarrow scalars
row_strings = (
df_sorted.select(
row_strings=nw.concat_str(cols, separator="", ignore_nulls=False)
)
.get_column("row_strings")
.to_list()
)
null_indices = set(null_indices_mask.arg_true().to_list())
for i, (current_row, next_row) in enumerate(
zip(row_strings[:-1], row_strings[1:]), start=1
):
if (next_row in current_row) and (i in null_indices):
raise ValueError(
"Non-leaves rows are not permitted in the dataframe \n",
df_sorted.row(i),
"is not a leaf.",
)
def process_dataframe_hierarchy(args):
"""
Build dataframe for sunburst, treemap, or icicle when the path argument is provided.
"""
df: nw.DataFrame = args["data_frame"]
path = args["path"][::-1]
_check_dataframe_all_leaves(df[path[::-1]])
discrete_color = not _is_continuous(df, args["color"]) if args["color"] else False
df = df.lazy()
new_path = [col_name + "_path_copy" for col_name in path]
df = df.with_columns(
nw.col(col_name).alias(new_col_name)
for new_col_name, col_name in zip(new_path, path)
)
path = new_path
# ------------ Define aggregation functions --------------------------------
agg_f = {}
if args["values"]:
try:
df = df.with_columns(nw.col(args["values"]).cast(nw.Float64()))
except Exception: # pandas, Polars and pyarrow exception types are different
raise ValueError(
"Column `%s` of `df` could not be converted to a numerical data type."
% args["values"]
)
if args["color"] and args["color"] == args["values"]:
new_value_col_name = args["values"] + "_sum"
df = df.with_columns(nw.col(args["values"]).alias(new_value_col_name))
args["values"] = new_value_col_name
count_colname = args["values"]
else:
# we need a count column for the first groupby and the weighted mean of color
# trick to be sure the col name is unused: take the sum of existing names
columns = df.collect_schema().names()
count_colname = (
"count" if "count" not in columns else "".join([str(el) for el in columns])
)
# we can modify df because it's a copy of the px argument
df = df.with_columns(nw.lit(1).alias(count_colname))
args["values"] = count_colname
# Since count_colname is always in agg_f, it can be used later to normalize color
# in the continuous case after some gymnastic
agg_f[count_colname] = nw.sum(count_colname)
discrete_aggs = []
continuous_aggs = []
n_unique_token = _generate_temporary_column_name(
n_bytes=16, columns=df.collect_schema().names()
)
# In theory, for discrete columns aggregation, we should have a way to do
# `.agg(nw.col(x).unique())` in group_by and successively unpack/parse it as:
# ```
# (nw.when(nw.col(x).list.len()==1)
# .then(nw.col(x).list.first())
# .otherwise(nw.lit("(?)"))
# )
# ```
# which replicates the original pandas only codebase:
# ```
# def discrete_agg(x):
# uniques = x.unique()
# return uniques[0] if len(uniques) == 1 else "(?)"
#
# df.groupby(path[i:]).agg(...)
# ```
# However this is not possible, therefore the following workaround is provided.
# We make two aggregations for the same column:
# - take the max value
# - take the number of unique values
# Finally, after the group by statement, it is unpacked via:
# ```
# (nw.when(nw.col(col_n_unique) == 1)
# .then(nw.col(col_max_value)) # which is the unique value
# .otherwise(nw.lit("(?)"))
# )
# ```
if args["color"]:
if discrete_color:
discrete_aggs.append(args["color"])
agg_f[args["color"]] = nw.col(args["color"]).max()
agg_f[f"{args['color']}{n_unique_token}"] = (
nw.col(args["color"])
.n_unique()
.alias(f"{args['color']}{n_unique_token}")
)
else:
# This first needs to be multiplied by `count_colname`
continuous_aggs.append(args["color"])
agg_f[args["color"]] = nw.sum(args["color"])
# Other columns (for color, hover_data, custom_data etc.)
cols = list(set(df.collect_schema().names()).difference(path))
df = df.with_columns(nw.col(c).cast(nw.String()) for c in cols if c not in agg_f)
for col in cols: # for hover_data, custom_data etc.
if col not in agg_f:
# Similar trick as above
discrete_aggs.append(col)
agg_f[col] = nw.col(col).max()
agg_f[f"{col}{n_unique_token}"] = (
nw.col(col).n_unique().alias(f"{col}{n_unique_token}")
)
# Avoid collisions with reserved names - columns in the path have been copied already
cols = list(set(cols) - set(["labels", "parent", "id"]))
# ----------------------------------------------------------------------------
all_trees = []
if args["color"] and not discrete_color:
df = df.with_columns(
(nw.col(args["color"]) * nw.col(count_colname)).alias(args["color"])
)
def post_agg(dframe: nw.LazyFrame, continuous_aggs, discrete_aggs) -> nw.LazyFrame:
"""
- continuous_aggs is either [] or [args["color"]]
- discrete_aggs is either [args["color"], <rest_of_cols>] or [<rest_of cols>]
"""
return dframe.with_columns(
*[nw.col(col) / nw.col(count_colname) for col in continuous_aggs],
*[
(
nw.when(nw.col(f"{col}{n_unique_token}") == 1)
.then(nw.col(col))
.otherwise(nw.lit("(?)"))
.alias(col)
)
for col in discrete_aggs
],
).drop([f"{col}{n_unique_token}" for col in discrete_aggs])
for i, level in enumerate(path):
dfg = (
df.group_by(path[i:], drop_null_keys=True)
.agg(**agg_f)
.pipe(post_agg, continuous_aggs, discrete_aggs)
)
# Path label massaging
df_tree = dfg.with_columns(
*cols,
labels=nw.col(level).cast(nw.String()),
parent=nw.lit(""),
id=nw.col(level).cast(nw.String()),
)
if i < len(path) - 1:
_concat_str_token = _generate_temporary_column_name(
n_bytes=16, columns=[*cols, "labels", "parent", "id"]
)
df_tree = (
df_tree.with_columns(
nw.concat_str(
[
nw.col(path[j]).cast(nw.String())
for j in range(len(path) - 1, i, -1)
],
separator="/",
).alias(_concat_str_token)
)
.with_columns(
parent=nw.concat_str(
[nw.col(_concat_str_token), nw.col("parent")], separator="/"
),
id=nw.concat_str(
[nw.col(_concat_str_token), nw.col("id")], separator="/"
),
)
.drop(_concat_str_token)
)
# strip "/" if at the end of the string, equivalent to `.str.rstrip`
df_tree = df_tree.with_columns(
parent=nw.col("parent").str.replace("/?$", "").str.replace("^/?", "")
)
all_trees.append(df_tree.select(*["labels", "parent", "id", *cols]))
df_all_trees = nw.maybe_reset_index(nw.concat(all_trees, how="vertical").collect())
# we want to make sure than (?) is the first color of the sequence
if args["color"] and discrete_color:
sort_col_name = "sort_color_if_discrete_color"
while sort_col_name in df_all_trees.columns:
sort_col_name += "0"
df_all_trees = df_all_trees.with_columns(
nw.col(args["color"]).cast(nw.String()).alias(sort_col_name)
).sort(by=sort_col_name, nulls_last=True)
# Now modify arguments
args["data_frame"] = df_all_trees
args["path"] = None
args["ids"] = "id"
args["names"] = "labels"
args["parents"] = "parent"
if args["color"]:
if not args["hover_data"]:
args["hover_data"] = [args["color"]]
elif isinstance(args["hover_data"], dict):
if not args["hover_data"].get(args["color"]):
args["hover_data"][args["color"]] = (True, None)
else:
args["hover_data"].append(args["color"])
return args
def process_dataframe_timeline(args):
"""
Massage input for bar traces for px.timeline()
"""
args["is_timeline"] = True
if args["x_start"] is None or args["x_end"] is None:
raise ValueError("Both x_start and x_end are required")
df: nw.DataFrame = args["data_frame"]
schema = df.schema
to_convert_to_datetime = [
col
for col in [args["x_start"], args["x_end"]]
if schema[col] != nw.Datetime and schema[col] != nw.Date
]
if to_convert_to_datetime:
try:
df = df.with_columns(nw.col(to_convert_to_datetime).str.to_datetime())
except Exception as exc:
raise TypeError(
"Both x_start and x_end must refer to data convertible to datetimes."
) from exc
# note that we are not adding any columns to the data frame here, so no risk of overwrite
args["data_frame"] = df.with_columns(
(nw.col(args["x_end"]) - nw.col(args["x_start"]))
.dt.total_milliseconds()
.alias(args["x_end"])
)
args["x"] = args["x_end"]
args["base"] = args["x_start"]
del args["x_start"], args["x_end"]
return args
def process_dataframe_pie(args, trace_patch):
import numpy as np
names = args.get("names")
if names is None:
return args, trace_patch
order_in = args["category_orders"].get(names, {}).copy()
if not order_in:
return args, trace_patch
df: nw.DataFrame = args["data_frame"]
trace_patch["sort"] = False
trace_patch["direction"] = "clockwise"
uniques = df.get_column(names).unique(maintain_order=True).to_list()
order = [x for x in OrderedDict.fromkeys(list(order_in) + uniques) if x in uniques]
# Sort args['data_frame'] by column `names` according to order `order`.
token = nw.generate_temporary_column_name(8, df.columns)
args["data_frame"] = (
df.with_columns(
nw.col(names)
.replace_strict(order, np.arange(len(order)), return_dtype=nw.UInt32)
.alias(token)
)
.sort(token)
.drop(token)
)
return args, trace_patch
def infer_config(args, constructor, trace_patch, layout_patch):
attrs = [k for k in direct_attrables + array_attrables if k in args]
grouped_attrs = []
df: nw.DataFrame = args["data_frame"]
# Compute sizeref
sizeref = 0
if "size" in args and args["size"]:
sizeref = (
nw.to_py_scalar(df.get_column(args["size"]).max()) / args["size_max"] ** 2
)
# Compute color attributes and grouping attributes
if "color" in args:
if "color_continuous_scale" in args:
if "color_discrete_sequence" not in args:
attrs.append("color")
else:
if args["color"] and _is_continuous(df, args["color"]):
attrs.append("color")
args["color_is_continuous"] = True
elif constructor in [go.Sunburst, go.Treemap, go.Icicle]:
attrs.append("color")
args["color_is_continuous"] = False
else:
grouped_attrs.append("marker.color")
elif "line_group" in args or constructor == go.Histogram2dContour:
grouped_attrs.append("line.color")
elif constructor in [go.Pie, go.Funnelarea]:
attrs.append("color")
if args["color"]:
if args["hover_data"] is None:
args["hover_data"] = []
args["hover_data"].append(args["color"])
else:
grouped_attrs.append("marker.color")
show_colorbar = bool(
"color" in attrs
and args["color"]
and constructor not in [go.Pie, go.Funnelarea]
and (
constructor not in [go.Treemap, go.Sunburst, go.Icicle]
or args.get("color_is_continuous")
)
)
else:
show_colorbar = False
if "line_dash" in args:
grouped_attrs.append("line.dash")
if "symbol" in args:
grouped_attrs.append("marker.symbol")
if "pattern_shape" in args:
if constructor in [go.Scatter]:
grouped_attrs.append("fillpattern.shape")
else:
grouped_attrs.append("marker.pattern.shape")
if "orientation" in args:
has_x = args["x"] is not None
has_y = args["y"] is not None
if args["orientation"] is None:
if constructor in [go.Histogram, go.Scatter]:
if has_y and not has_x:
args["orientation"] = "h"
elif constructor in [go.Violin, go.Box, go.Bar, go.Funnel]:
if has_x and not has_y:
args["orientation"] = "h"
if args["orientation"] is None and has_x and has_y:
x_is_continuous = _is_continuous(df, args["x"])
y_is_continuous = _is_continuous(df, args["y"])
if x_is_continuous and not y_is_continuous:
args["orientation"] = "h"
if y_is_continuous and not x_is_continuous:
args["orientation"] = "v"
if args["orientation"] is None:
args["orientation"] = "v"
if constructor == go.Histogram:
if has_x and has_y and args["histfunc"] is None:
args["histfunc"] = trace_patch["histfunc"] = "sum"
orientation = args["orientation"]
nbins = args["nbins"]
trace_patch["nbinsx"] = nbins if orientation == "v" else None
trace_patch["nbinsy"] = None if orientation == "v" else nbins
trace_patch["bingroup"] = "x" if orientation == "v" else "y"
trace_patch["orientation"] = args["orientation"]
if constructor in [go.Violin, go.Box]:
mode = "boxmode" if constructor == go.Box else "violinmode"
if layout_patch[mode] is None and args["color"] is not None:
if args["y"] == args["color"] and args["orientation"] == "h":
layout_patch[mode] = "overlay"
elif args["x"] == args["color"] and args["orientation"] == "v":
layout_patch[mode] = "overlay"
if layout_patch[mode] is None:
layout_patch[mode] = "group"
if (
constructor == go.Histogram2d
and args["z"] is not None
and args["histfunc"] is None
):
args["histfunc"] = trace_patch["histfunc"] = "sum"
if args.get("text_auto", False) is not False:
if constructor in [go.Histogram2d, go.Histogram2dContour]:
letter = "z"
elif constructor == go.Bar:
letter = "y" if args["orientation"] == "v" else "x"
else:
letter = "value"
if args["text_auto"] is True:
trace_patch["texttemplate"] = "%{" + letter + "}"
else:
trace_patch["texttemplate"] = "%{" + letter + ":" + args["text_auto"] + "}"
if constructor in [go.Histogram2d, go.Densitymap, go.Densitymapbox]:
show_colorbar = True
trace_patch["coloraxis"] = "coloraxis1"
if "opacity" in args:
if args["opacity"] is None:
if "barmode" in args and args["barmode"] == "overlay":
trace_patch["marker"] = dict(opacity=0.5)
elif constructor in [
go.Densitymap,
go.Densitymapbox,
go.Pie,
go.Funnel,
go.Funnelarea,
]:
trace_patch["opacity"] = args["opacity"]
else:
trace_patch["marker"] = dict(opacity=args["opacity"])
if (
"line_group" in args or "line_dash" in args
): # px.line, px.line_*, px.area, px.ecdf
modes = set()
if args.get("lines", True):
modes.add("lines")
if args.get("text") or args.get("symbol") or args.get("markers"):
modes.add("markers")
if args.get("text"):
modes.add("text")
if len(modes) == 0:
modes.add("lines")
trace_patch["mode"] = "+".join(sorted(modes))
elif constructor != go.Splom and (
"symbol" in args or constructor in [go.Scattermap, go.Scattermapbox]
):
trace_patch["mode"] = "markers" + ("+text" if args["text"] else "")
if "line_shape" in args:
trace_patch["line"] = dict(shape=args["line_shape"])
elif "ecdfmode" in args:
trace_patch["line"] = dict(
shape="vh" if args["ecdfmode"] == "reversed" else "hv"
)
if "geojson" in args:
trace_patch["featureidkey"] = args["featureidkey"]
trace_patch["geojson"] = (
args["geojson"]
if not hasattr(args["geojson"], "__geo_interface__") # for geopandas
else args["geojson"].__geo_interface__
)
# Compute marginal attribute: copy to appropriate marginal_*
if "marginal" in args:
position = "marginal_x" if args["orientation"] == "v" else "marginal_y"
other_position = "marginal_x" if args["orientation"] == "h" else "marginal_y"
args[position] = args["marginal"]
args[other_position] = None
# Ignore facet rows and columns when data frame is empty so as to prevent nrows/ncols equaling 0
if df.is_empty():
args["facet_row"] = args["facet_col"] = None
# If both marginals and faceting are specified, faceting wins
if args.get("facet_col") is not None and args.get("marginal_y") is not None:
args["marginal_y"] = None
if args.get("facet_row") is not None and args.get("marginal_x") is not None:
args["marginal_x"] = None
# facet_col_wrap only works if no marginals or row faceting is used
if (
args.get("marginal_x") is not None
or args.get("marginal_y") is not None
or args.get("facet_row") is not None
):
args["facet_col_wrap"] = 0
if "trendline" in args and args["trendline"] is not None:
if args["trendline"] not in trendline_functions:
raise ValueError(
"Value '%s' for `trendline` must be one of %s"
% (args["trendline"], trendline_functions.keys())
)
if "trendline_options" in args and args["trendline_options"] is None:
args["trendline_options"] = dict()
if "ecdfnorm" in args:
if args.get("ecdfnorm", None) not in [None, "percent", "probability"]:
raise ValueError(
"`ecdfnorm` must be one of None, 'percent' or 'probability'. "
+ "'%s' was provided." % args["ecdfnorm"]
)
args["histnorm"] = args["ecdfnorm"]
# Compute applicable grouping attributes
grouped_attrs.extend([k for k in group_attrables if k in args])
# Create grouped mappings
grouped_mappings = [make_mapping(args, a) for a in grouped_attrs]
# Create trace specs
trace_specs = make_trace_spec(args, constructor, attrs, trace_patch)
return trace_specs, grouped_mappings, sizeref, show_colorbar
def get_groups_and_orders(args, grouper):
"""
`orders` is the user-supplied ordering with the remaining data-frame-supplied
ordering appended if the column is used for grouping. It includes anything the user
gave, for any variable, including values not present in the dataset. It's a dict
where the keys are e.g. "x" or "color"
`groups` is the dicts of groups, ordered by the order above. Its keys are
tuples like [("value1", ""), ("value2", "")] where each tuple contains the name
of a single dimension-group
"""
orders = {} if "category_orders" not in args else args["category_orders"].copy()
df: nw.DataFrame = args["data_frame"]
# figure out orders and what the single group name would be if there were one
single_group_name = []
unique_cache = dict()
for i, col in enumerate(grouper):
if col == one_group:
single_group_name.append("")
else:
if col not in unique_cache:
unique_cache[col] = (
df.get_column(col).unique(maintain_order=True).to_list()
)
uniques = unique_cache[col]
if len(uniques) == 1:
single_group_name.append(uniques[0])
if col not in orders:
orders[col] = uniques
else:
orders[col] = list(OrderedDict.fromkeys(list(orders[col]) + uniques))
if len(single_group_name) == len(grouper):
# we have a single group, so we can skip all group-by operations!
groups = {tuple(single_group_name): df}
else:
required_grouper = [group for group in orders if group in grouper]
grouped = dict(df.group_by(required_grouper, drop_null_keys=True).__iter__())
sorted_group_names = sorted(
grouped.keys(),
key=lambda values: [
orders[group].index(value) if value in orders[group] else -1
for group, value in zip(required_grouper, values)
],
)
# calculate the full group_names by inserting "" in the tuple index for one_group groups
full_sorted_group_names = [
tuple(
[
(
""
if col == one_group
else sub_group_names[required_grouper.index(col)]
)
for col in grouper
]
)
for sub_group_names in sorted_group_names
]
groups = {
sf: grouped[s] for sf, s in zip(full_sorted_group_names, sorted_group_names)
}
return groups, orders
def make_figure(args, constructor, trace_patch=None, layout_patch=None):
trace_patch = trace_patch or {}
layout_patch = layout_patch or {}
apply_default_cascade(args)
args = build_dataframe(args, constructor)
if constructor in [go.Treemap, go.Sunburst, go.Icicle] and args["path"] is not None:
args = process_dataframe_hierarchy(args)
if constructor in [go.Pie]:
args, trace_patch = process_dataframe_pie(args, trace_patch)
if constructor == "timeline":
constructor = go.Bar
args = process_dataframe_timeline(args)
# If we have marginal histograms, set barmode to "overlay"
if "histogram" in [args.get("marginal_x"), args.get("marginal_y")]:
layout_patch["barmode"] = "overlay"
trace_specs, grouped_mappings, sizeref, show_colorbar = infer_config(
args, constructor, trace_patch, layout_patch
)
grouper = [x.grouper or one_group for x in grouped_mappings] or [one_group]
groups, orders = get_groups_and_orders(args, grouper)
col_labels = []
row_labels = []
nrows = ncols = 1
for m in grouped_mappings:
if m.grouper not in orders:
m.val_map[""] = m.sequence[0]
else:
sorted_values = orders[m.grouper]
if m.facet == "col":
prefix = get_label(args, args["facet_col"]) + "="
col_labels = [prefix + str(s) for s in sorted_values]
ncols = len(col_labels)
if m.facet == "row":
prefix = get_label(args, args["facet_row"]) + "="
row_labels = [prefix + str(s) for s in sorted_values]
nrows = len(row_labels)
for val in sorted_values:
if val not in m.val_map: # always False if it's an IdentityMap
m.val_map[val] = m.sequence[len(m.val_map) % len(m.sequence)]
subplot_type = _subplot_type_for_trace_type(constructor().type)
trace_names_by_frame = {}
frames = OrderedDict()
trendline_rows = []
trace_name_labels = None
facet_col_wrap = args.get("facet_col_wrap", 0)
for group_name, group in groups.items():
mapping_labels = OrderedDict()
trace_name_labels = OrderedDict()
frame_name = ""
for col, val, m in zip(grouper, group_name, grouped_mappings):
if col != one_group:
key = get_label(args, col)
if not isinstance(m.val_map, IdentityMap):
mapping_labels[key] = str(val)
if m.show_in_trace_name:
trace_name_labels[key] = str(val)
if m.variable == "animation_frame":
frame_name = val
trace_name = ", ".join(trace_name_labels.values())
if frame_name not in trace_names_by_frame:
trace_names_by_frame[frame_name] = set()
trace_names = trace_names_by_frame[frame_name]
for trace_spec in trace_specs:
# Create the trace
trace = trace_spec.constructor(name=trace_name)
if trace_spec.constructor not in [
go.Parcats,
go.Parcoords,
go.Choropleth,
go.Choroplethmap,
go.Choroplethmapbox,
go.Densitymap,
go.Densitymapbox,
go.Histogram2d,
go.Sunburst,
go.Treemap,
go.Icicle,
]:
trace.update(
legendgroup=trace_name,
showlegend=(trace_name != "" and trace_name not in trace_names),
)
# Set 'offsetgroup' only in group barmode (or if no barmode is set)
barmode = layout_patch.get("barmode")
if trace_spec.constructor in [go.Bar, go.Box, go.Violin, go.Histogram] and (
barmode == "group" or barmode is None
):
trace.update(alignmentgroup=True, offsetgroup=trace_name)
trace_names.add(trace_name)
# Init subplot row/col
trace._subplot_row = 1
trace._subplot_col = 1
for i, m in enumerate(grouped_mappings):
val = group_name[i]
try:
m.updater(trace, m.val_map[val]) # covers most cases
except ValueError:
# this catches some odd cases like marginals
if (
trace_spec != trace_specs[0]
and (
trace_spec.constructor in [go.Violin, go.Box]
and m.variable in ["symbol", "pattern", "dash"]
)
or (
trace_spec.constructor in [go.Histogram]
and m.variable in ["symbol", "dash"]
)
):
pass
elif (
trace_spec != trace_specs[0]
and trace_spec.constructor in [go.Histogram]
and m.variable == "color"
):
trace.update(marker=dict(color=m.val_map[val]))
elif (
trace_spec.constructor
in [go.Choropleth, go.Choroplethmap, go.Choroplethmapbox]
and m.variable == "color"
):
trace.update(
z=[1] * len(group),
colorscale=[m.val_map[val]] * 2,
showscale=False,
showlegend=True,
)
else:
raise
# Find row for trace, handling facet_row and marginal_x
if m.facet == "row":
row = m.val_map[val]
else:
if (
args.get("marginal_x") is not None # there is a marginal
and trace_spec.marginal != "x" # and we're not it
):
row = 2
else:
row = 1
# Find col for trace, handling facet_col and marginal_y
if m.facet == "col":
col = m.val_map[val]
if facet_col_wrap: # assumes no facet_row, no marginals
row = 1 + ((col - 1) // facet_col_wrap)
col = 1 + ((col - 1) % facet_col_wrap)
else:
if trace_spec.marginal == "y":
col = 2
else:
col = 1
if row > 1:
trace._subplot_row = row
if col > 1:
trace._subplot_col = col
if (
trace_specs[0].constructor == go.Histogram2dContour
and trace_spec.constructor == go.Box
and trace.line.color
):
trace.update(marker=dict(color=trace.line.color))
if "ecdfmode" in args:
base = args["x"] if args["orientation"] == "v" else args["y"]
var = args["x"] if args["orientation"] == "h" else args["y"]
ascending = args.get("ecdfmode", "standard") != "reversed"
group = group.sort(by=base, descending=not ascending, nulls_last=True)
group_sum = group.get_column(
var
).sum() # compute here before next line mutates
group = group.with_columns(nw.col(var).cum_sum().alias(var))
if not ascending:
group = group.sort(by=base, descending=False, nulls_last=True)
if args.get("ecdfmode", "standard") == "complementary":
group = group.with_columns((group_sum - nw.col(var)).alias(var))
if args["ecdfnorm"] == "probability":
group = group.with_columns(nw.col(var) / group_sum)
elif args["ecdfnorm"] == "percent":
group = group.with_columns((nw.col(var) / group_sum) * 100.0)
patch, fit_results = make_trace_kwargs(
args, trace_spec, group, mapping_labels.copy(), sizeref
)
trace.update(patch)
if fit_results is not None:
trendline_rows.append(mapping_labels.copy())
trendline_rows[-1]["px_fit_results"] = fit_results
if frame_name not in frames:
frames[frame_name] = dict(data=[], name=frame_name)
frames[frame_name]["data"].append(trace)
frame_list = [f for f in frames.values()]
if len(frame_list) > 1:
frame_list = sorted(
frame_list, key=lambda f: orders[args["animation_frame"]].index(f["name"])
)
if show_colorbar:
colorvar = (
"z"
if constructor in [go.Histogram2d, go.Densitymap, go.Densitymapbox]
else "color"
)
range_color = args["range_color"] or [None, None]
colorscale_validator = ColorscaleValidator("colorscale", "make_figure")
layout_patch["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=args["color_continuous_midpoint"],
cmin=range_color[0],
cmax=range_color[1],
colorbar=dict(
title_text=get_decorated_label(args, args[colorvar], colorvar)
),
)
for v in ["height", "width"]:
if args[v]:
layout_patch[v] = args[v]
layout_patch["legend"] = dict(tracegroupgap=0)
if trace_name_labels:
layout_patch["legend"]["title_text"] = ", ".join(trace_name_labels)
if args["title"]:
layout_patch["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout_patch["margin"] = {"t": 60}
if args["subtitle"]:
layout_patch["title_subtitle_text"] = args["subtitle"]
if (
"size" in args
and args["size"]
and args["template"].layout.legend.itemsizing is None
):
layout_patch["legend"]["itemsizing"] = "constant"
if facet_col_wrap:
nrows = math.ceil(ncols / facet_col_wrap)
ncols = min(ncols, facet_col_wrap)
if args.get("marginal_x") is not None:
nrows += 1
if args.get("marginal_y") is not None:
ncols += 1
fig = init_figure(
args, subplot_type, frame_list, nrows, ncols, col_labels, row_labels
)
# Position traces in subplots
for frame in frame_list:
for trace in frame["data"]:
if isinstance(trace, go.Splom):
# Special case that is not compatible with make_subplots
continue
_set_trace_grid_reference(
trace,
fig.layout,
fig._grid_ref,
nrows - trace._subplot_row + 1,
trace._subplot_col,
)
# Add traces, layout and frames to figure
fig.add_traces(frame_list[0]["data"] if len(frame_list) > 0 else [])
fig.update_layout(layout_patch)
if "template" in args and args["template"] is not None:
fig.update_layout(template=args["template"], overwrite=True)
for f in frame_list:
f["name"] = str(f["name"])
fig.frames = frame_list if len(frames) > 1 else []
if args.get("trendline") and args.get("trendline_scope", "trace") == "overall":
trendline_spec = make_trendline_spec(args, constructor)
trendline_trace = trendline_spec.constructor(
name="Overall Trendline", legendgroup="Overall Trendline", showlegend=False
)
if "line" not in trendline_spec.trace_patch: # no color override
for m in grouped_mappings:
if m.variable == "color":
next_color = m.sequence[len(m.val_map) % len(m.sequence)]
trendline_spec.trace_patch["line"] = dict(color=next_color)
patch, fit_results = make_trace_kwargs(
args, trendline_spec, args["data_frame"], {}, sizeref
)
trendline_trace.update(patch)
fig.add_trace(
trendline_trace, row="all", col="all", exclude_empty_subplots=True
)
fig.update_traces(selector=-1, showlegend=True)
if fit_results is not None:
trendline_rows.append(dict(px_fit_results=fit_results))
if trendline_rows:
try:
import pandas as pd
fig._px_trendlines = pd.DataFrame(trendline_rows)
except ImportError:
msg = "Trendlines require pandas to be installed."
raise NotImplementedError(msg)
else:
fig._px_trendlines = []
configure_axes(args, constructor, fig, orders)
configure_animation_controls(args, constructor, fig)
return fig
def init_figure(args, subplot_type, frame_list, nrows, ncols, col_labels, row_labels):
# Build subplot specs
specs = [[dict(type=subplot_type or "domain")] * ncols for _ in range(nrows)]
# Default row/column widths uniform
column_widths = [1.0] * ncols
row_heights = [1.0] * nrows
facet_col_wrap = args.get("facet_col_wrap", 0)
# Build column_widths/row_heights
if subplot_type == "xy":
if args.get("marginal_x") is not None:
if args["marginal_x"] == "histogram" or ("color" in args and args["color"]):
main_size = 0.74
else:
main_size = 0.84
row_heights = [main_size] * (nrows - 1) + [1 - main_size]
vertical_spacing = 0.01
elif facet_col_wrap:
vertical_spacing = args.get("facet_row_spacing") or 0.07
else:
vertical_spacing = args.get("facet_row_spacing") or 0.03
if args.get("marginal_y") is not None:
if args["marginal_y"] == "histogram" or ("color" in args and args["color"]):
main_size = 0.74
else:
main_size = 0.84
column_widths = [main_size] * (ncols - 1) + [1 - main_size]
horizontal_spacing = 0.005
else:
horizontal_spacing = args.get("facet_col_spacing") or 0.02
else:
# Other subplot types:
# 'scene', 'geo', 'polar', 'ternary', 'mapbox', 'domain', None
#
# We can customize subplot spacing per type once we enable faceting
# for all plot types
if facet_col_wrap:
vertical_spacing = args.get("facet_row_spacing") or 0.07
else:
vertical_spacing = args.get("facet_row_spacing") or 0.03
horizontal_spacing = args.get("facet_col_spacing") or 0.02
if facet_col_wrap:
subplot_labels = [None] * nrows * ncols
while len(col_labels) < nrows * ncols:
col_labels.append(None)
for i in range(nrows):
for j in range(ncols):
subplot_labels[i * ncols + j] = col_labels[(nrows - 1 - i) * ncols + j]
def _spacing_error_translator(e, direction, facet_arg):
"""
Translates the spacing errors thrown by the underlying make_subplots
routine into one that describes an argument adjustable through px.
"""
if ("%s spacing" % (direction,)) in e.args[0]:
e.args = (
e.args[0]
+ """
Use the {facet_arg} argument to adjust this spacing.""".format(facet_arg=facet_arg),
)
raise e
# Create figure with subplots
try:
fig = make_subplots(
rows=nrows,
cols=ncols,
specs=specs,
shared_xaxes="all",
shared_yaxes="all",
row_titles=[] if facet_col_wrap else list(reversed(row_labels)),
column_titles=[] if facet_col_wrap else col_labels,
subplot_titles=subplot_labels if facet_col_wrap else [],
horizontal_spacing=horizontal_spacing,
vertical_spacing=vertical_spacing,
row_heights=row_heights,
column_widths=column_widths,
start_cell="bottom-left",
)
except ValueError as e:
_spacing_error_translator(e, "Horizontal", "facet_col_spacing")
_spacing_error_translator(e, "Vertical", "facet_row_spacing")
raise
# Remove explicit font size of row/col titles so template can take over
for annot in fig.layout.annotations:
annot.update(font=None)
return fig
| PxDefaults |
python | coleifer__peewee | playhouse/pool.py | {
"start": 12397,
"end": 12598
} | class ____(PooledDatabase):
def _is_closed(self, conn):
try:
conn.total_changes
except:
return True
else:
return False
| _PooledSqliteDatabase |
python | huggingface__transformers | tests/models/clip/test_modeling_clip.py | {
"start": 20004,
"end": 23324
} | class ____(CLIPModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CLIPModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": CLIPModel, "image-feature-extraction": CLIPVisionModel} if is_torch_available() else {}
)
additional_model_inputs = ["pixel_values"]
test_resize_embeddings = False
test_attention_outputs = False
_is_composite = True
def setUp(self):
self.model_tester = CLIPModelTester(self)
common_properties = ["projection_dim", "logit_scale_init_value"]
self.config_tester = ConfigTester(
self, config_class=CLIPConfig, has_text_modality=False, common_properties=common_properties
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="CLIPModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save CLIPConfig and check if we can load CLIPVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = CLIPVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save CLIPConfig and check if we can load CLIPTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = CLIPTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@slow
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# adding only flaky decorator here and call the parent test method
return getattr(ModelTesterMixin, self._testMethodName)(self)
def test_sdpa_can_dispatch_composite_models(self):
super().test_sdpa_can_dispatch_composite_models()
def test_sdpa_can_dispatch_on_flash(self):
self.skipTest(reason="CLIP text tower has two attention masks: `causal_attention_mask` and `attention_mask`")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="CLIP model can't be compiled dynamic, error in clip_loss`")
| CLIPModelTest |
python | falconry__falcon | falcon/errors.py | {
"start": 90238,
"end": 92447
} | class ____(HTTPBadRequest):
"""400 Bad Request.
One of the headers in the request is invalid.
`msg` and `header_name` are the only positional arguments allowed,
the other arguments are defined as keyword-only.
Args:
msg (str): A description of why the value is invalid.
header_name (str): The name of the invalid header.
Keyword Args:
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
msg: str,
header_name: str,
*,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
description = 'The value provided for the "{0}" header is invalid. {1}'
description = description.format(header_name, msg)
super().__init__(
title='Invalid header value',
description=description,
headers=headers,
**kwargs,
)
| HTTPInvalidHeader |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 5914,
"end": 6349
} | class ____(graphene.ObjectType):
op = graphene.NonNull(GrapheneObjectStoreOperationType)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "ObjectStoreOperationResult"
def resolve_metadataEntries(self, _graphene_info: ResolveInfo):
from dagster_graphql.implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata)
| GrapheneObjectStoreOperationResult |
python | numba__numba | numba/cuda/compiler.py | {
"start": 2387,
"end": 3193
} | class ____(LoweringPass):
_name = "cuda_backend"
def __init__(self):
LoweringPass.__init__(self)
def run_pass(self, state):
"""
Back-end: Packages lowering output in a compile result
"""
lowered = state['cr']
signature = typing.signature(state.return_type, *state.args)
state.cr = cuda_compile_result(
typing_context=state.typingctx,
target_context=state.targetctx,
typing_error=state.status.fail_reason,
type_annotation=state.type_annotation,
library=state.library,
call_helper=lowered.call_helper,
signature=signature,
fndesc=lowered.fndesc,
)
return True
@register_pass(mutates_CFG=False, analysis_only=False)
| CUDABackend |
python | explosion__spaCy | spacy/tests/parser/test_ner.py | {
"start": 28882,
"end": 29176
} | class ____:
def __init__(self, nlp, start, end, name="my_blocker"):
self.start = start
self.end = end
self.name = name
def __call__(self, doc):
doc.set_ents([], blocked=[doc[self.start : self.end]], default="unmodified")
return doc
| BlockerComponent1 |
python | PrefectHQ__prefect | src/prefect/cli/cloud/ip_allowlist.py | {
"start": 3779,
"end": 9283
} | class ____(BaseModel):
raw: str
parsed: IPvAnyNetwork
def parse_ip_network_argument(val: str) -> IPNetworkArg:
return IPNetworkArg(
raw=val,
parsed=val,
)
IP_ARGUMENT = Annotated[
IPNetworkArg,
typer.Argument(
parser=parse_ip_network_argument,
help="An IP address or range in CIDR notation. E.g. 192.168.1.0 or 192.168.1.0/24",
metavar="IP address or range",
),
]
@ip_allowlist_app.command()
async def add(
ctx: typer.Context,
ip_address_or_range: IP_ARGUMENT,
description: Optional[str] = typer.Option(
None,
"--description",
"-d",
help="A short description to annotate the entry with.",
),
):
"""Add a new IP entry to your account IP allowlist."""
new_entry = IPAllowlistEntry(
ip_network=ip_address_or_range.parsed, description=description, enabled=True
)
async with get_cloud_client(infer_cloud_url=True) as client:
ip_allowlist = await client.read_account_ip_allowlist()
existing_entry_with_same_ip = None
for entry in ip_allowlist.entries:
if entry.ip_network == ip_address_or_range.parsed:
existing_entry_with_same_ip = entry
break
if existing_entry_with_same_ip:
if not typer.confirm(
f"There's already an entry for this IP ({ip_address_or_range.raw}). Do you want to overwrite it?"
):
exit_with_error("Aborted.")
ip_allowlist.entries.remove(existing_entry_with_same_ip)
ip_allowlist.entries.append(new_entry)
try:
await client.update_account_ip_allowlist(ip_allowlist)
except PrefectHTTPStatusError as exc:
_handle_update_error(exc)
updated_ip_allowlist = await client.read_account_ip_allowlist()
_print_ip_allowlist_table(
updated_ip_allowlist, enabled=ctx.meta["enforce_ip_allowlist"]
)
@ip_allowlist_app.command()
async def remove(ctx: typer.Context, ip_address_or_range: IP_ARGUMENT):
"""Remove an IP entry from your account IP allowlist."""
async with get_cloud_client(infer_cloud_url=True) as client:
ip_allowlist = await client.read_account_ip_allowlist()
ip_allowlist.entries = [
entry
for entry in ip_allowlist.entries
if entry.ip_network != ip_address_or_range.parsed
]
try:
await client.update_account_ip_allowlist(ip_allowlist)
except PrefectHTTPStatusError as exc:
_handle_update_error(exc)
updated_ip_allowlist = await client.read_account_ip_allowlist()
_print_ip_allowlist_table(
updated_ip_allowlist, enabled=ctx.meta["enforce_ip_allowlist"]
)
@ip_allowlist_app.command()
async def toggle(ctx: typer.Context, ip_address_or_range: IP_ARGUMENT):
"""Toggle the enabled status of an individual IP entry in your account IP allowlist."""
async with get_cloud_client(infer_cloud_url=True) as client:
ip_allowlist = await client.read_account_ip_allowlist()
found_matching_entry = False
for entry in ip_allowlist.entries:
if entry.ip_network == ip_address_or_range.parsed:
entry.enabled = not entry.enabled
found_matching_entry = True
break
if not found_matching_entry:
exit_with_error(
f"No entry found with IP address `{ip_address_or_range.raw}`."
)
try:
await client.update_account_ip_allowlist(ip_allowlist)
except PrefectHTTPStatusError as exc:
_handle_update_error(exc)
updated_ip_allowlist = await client.read_account_ip_allowlist()
_print_ip_allowlist_table(
updated_ip_allowlist, enabled=ctx.meta["enforce_ip_allowlist"]
)
def _print_ip_allowlist_table(ip_allowlist: IPAllowlist, enabled: bool):
if not ip_allowlist.entries:
app.console.print(
Panel(
"IP allowlist is empty. Add an entry to secure access to your Prefect Cloud account.",
expand=False,
)
)
return
red_asterisk_if_not_enabled = "[red]*[/red]" if enabled is False else ""
table = Table(
title="IP Allowlist " + red_asterisk_if_not_enabled,
caption=f"{red_asterisk_if_not_enabled} Enforcement is "
f"[bold]{'ENABLED' if enabled else '[red]DISABLED[/red]'}[/bold].",
caption_style="not dim",
)
table.add_column("IP Address", style="cyan", no_wrap=True)
table.add_column("Description", style="blue", no_wrap=False)
table.add_column("Enabled", style="green", justify="right", no_wrap=True)
table.add_column("Last Seen", style="magenta", justify="right", no_wrap=True)
for entry in ip_allowlist.entries:
table.add_row(
str(entry.ip_network),
entry.description,
str(entry.enabled),
entry.last_seen or "Never",
style="dim" if not entry.enabled else None,
)
app.console.print(table)
def _handle_update_error(error: PrefectHTTPStatusError):
if error.response.status_code == 422 and (
details := (
error.response.json().get("detail")
or error.response.json().get("exception_detail")
)
):
exit_with_error(f"Error updating allowlist: {details}")
else:
raise error
| IPNetworkArg |
python | kubernetes-client__python | kubernetes/client/models/v1_http_get_action.py | {
"start": 383,
"end": 7067
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'host': 'str',
'http_headers': 'list[V1HTTPHeader]',
'path': 'str',
'port': 'object',
'scheme': 'str'
}
attribute_map = {
'host': 'host',
'http_headers': 'httpHeaders',
'path': 'path',
'port': 'port',
'scheme': 'scheme'
}
def __init__(self, host=None, http_headers=None, path=None, port=None, scheme=None, local_vars_configuration=None): # noqa: E501
"""V1HTTPGetAction - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._host = None
self._http_headers = None
self._path = None
self._port = None
self._scheme = None
self.discriminator = None
if host is not None:
self.host = host
if http_headers is not None:
self.http_headers = http_headers
if path is not None:
self.path = path
self.port = port
if scheme is not None:
self.scheme = scheme
@property
def host(self):
"""Gets the host of this V1HTTPGetAction. # noqa: E501
Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. # noqa: E501
:return: The host of this V1HTTPGetAction. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this V1HTTPGetAction.
Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. # noqa: E501
:param host: The host of this V1HTTPGetAction. # noqa: E501
:type: str
"""
self._host = host
@property
def http_headers(self):
"""Gets the http_headers of this V1HTTPGetAction. # noqa: E501
Custom headers to set in the request. HTTP allows repeated headers. # noqa: E501
:return: The http_headers of this V1HTTPGetAction. # noqa: E501
:rtype: list[V1HTTPHeader]
"""
return self._http_headers
@http_headers.setter
def http_headers(self, http_headers):
"""Sets the http_headers of this V1HTTPGetAction.
Custom headers to set in the request. HTTP allows repeated headers. # noqa: E501
:param http_headers: The http_headers of this V1HTTPGetAction. # noqa: E501
:type: list[V1HTTPHeader]
"""
self._http_headers = http_headers
@property
def path(self):
"""Gets the path of this V1HTTPGetAction. # noqa: E501
Path to access on the HTTP server. # noqa: E501
:return: The path of this V1HTTPGetAction. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1HTTPGetAction.
Path to access on the HTTP server. # noqa: E501
:param path: The path of this V1HTTPGetAction. # noqa: E501
:type: str
"""
self._path = path
@property
def port(self):
"""Gets the port of this V1HTTPGetAction. # noqa: E501
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
:return: The port of this V1HTTPGetAction. # noqa: E501
:rtype: object
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1HTTPGetAction.
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
:param port: The port of this V1HTTPGetAction. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def scheme(self):
"""Gets the scheme of this V1HTTPGetAction. # noqa: E501
Scheme to use for connecting to the host. Defaults to HTTP. # noqa: E501
:return: The scheme of this V1HTTPGetAction. # noqa: E501
:rtype: str
"""
return self._scheme
@scheme.setter
def scheme(self, scheme):
"""Sets the scheme of this V1HTTPGetAction.
Scheme to use for connecting to the host. Defaults to HTTP. # noqa: E501
:param scheme: The scheme of this V1HTTPGetAction. # noqa: E501
:type: str
"""
self._scheme = scheme
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HTTPGetAction):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1HTTPGetAction):
return True
return self.to_dict() != other.to_dict()
| V1HTTPGetAction |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/screen_switch.py | {
"start": 130,
"end": 405
} | class ____(Screen):
BINDINGS = [("b", "switch_to_b", "Switch to screen B")]
def compose(self) -> ComposeResult:
yield Header()
yield Static("A")
yield Footer()
def action_switch_to_b(self):
self.app.switch_screen(ScreenB())
| ScreenA |
python | pennersr__django-allauth | allauth/mfa/internal/flows/trust.py | {
"start": 370,
"end": 2791
} | class ____:
fingerprint: str
at: int
def create_config_fingerprint(user: AbstractBaseUser) -> str:
"""
If the user changes anything about his security setup, we want to invalidate
any trust that was issued before.
"""
salt = "allauth.mfa.trust"
parts: List[str] = []
parts.append(str(user.pk))
parts.append(user.password)
for authenticator in Authenticator.objects.filter(user_id=user.pk).order_by("pk"):
parts.append(str(authenticator.pk))
parts.append(str(authenticator.type))
seed = authenticator.data.get("seed")
if seed is not None:
parts.append(str(seed))
return salted_hmac(salt, "|".join(parts), algorithm="sha256").hexdigest()
def decode_trust_cookie(request: HttpRequest) -> List[IssuedTrust]:
value = request.COOKIES.get(app_settings.TRUST_COOKIE_NAME)
if not value:
return []
signer = Signer()
try:
data = signer.unsign_object(value)
except BadSignature:
return []
trusts = [IssuedTrust(fingerprint=entry[0], at=entry[1]) for entry in data]
now = time.time()
trusts = list(
filter(
lambda t: t.at + app_settings.TRUST_COOKIE_AGE.total_seconds() > now, trusts
)
)
return trusts
def encode_trust_cookie(trusts: List[IssuedTrust]) -> str:
signer = Signer()
return signer.sign_object([(it.fingerprint, it.at) for it in trusts])
def trust_browser(
request: HttpRequest, user: AbstractBaseUser, response: HttpResponse
) -> None:
fingerprint = create_config_fingerprint(user)
trusts = decode_trust_cookie(request)
trusts.append(IssuedTrust(fingerprint=fingerprint, at=int(time.time())))
response.set_cookie(
app_settings.TRUST_COOKIE_NAME,
encode_trust_cookie(trusts),
max_age=app_settings.TRUST_COOKIE_AGE,
path=app_settings.TRUST_COOKIE_PATH,
domain=app_settings.TRUST_COOKIE_DOMAIN,
secure=app_settings.TRUST_COOKIE_SECURE,
httponly=app_settings.TRUST_COOKIE_HTTPONLY,
samesite=app_settings.TRUST_COOKIE_SAMESITE,
)
def is_trusted_browser(request: HttpRequest, user: AbstractBaseUser) -> bool:
if not app_settings._TRUST_STAGE_ENABLED:
return False
trusts = decode_trust_cookie(request)
fingerprint = create_config_fingerprint(user)
return any([t.fingerprint == fingerprint for t in trusts])
| IssuedTrust |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/str.py | {
"start": 888,
"end": 1322
} | class ____(SeriesDefault):
"""Builder for default-to-pandas methods which is executed under `str` accessor."""
@classmethod
def frame_wrapper(cls, df):
"""
Get `str` accessor of the passed frame.
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.core.strings.accessor.StringMethods
"""
return df.squeeze(axis=1).str
| StrDefault |
python | kamyu104__LeetCode-Solutions | Python/minimum-moves-to-make-array-complementary.py | {
"start": 33,
"end": 1005
} | class ____(object):
def minMoves(self, nums, limit):
"""
:type nums: List[int]
:type limit: int
:rtype: int
"""
diff = [0]*(2*(limit+1))
for i in xrange(len(nums)//2):
left, right = nums[i], nums[-1-i]
diff[min(left, right)+1] -= 1 # if target total grows to min(left, right)+1, one less move
diff[left+right] -= 1 # if target total grows to left+right, one less move
diff[left+right+1] += 1 # if target total grows to left+right+1, one more move
diff[max(left, right)+limit+1] += 1 # if target total grows to max(left, right)+limit+1, one more move
result = count = len(nums) # default is to move all nums
for total in xrange(2, 2*limit+1): # enumerate all possible target totals
count += diff[total]
result = min(result, count)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/swiftformer/modeling_swiftformer.py | {
"start": 11951,
"end": 13779
} | class ____(nn.Module):
def __init__(self, config: SwiftFormerConfig) -> None:
super().__init__()
self.config = config
embed_dims = config.embed_dims
downsamples = config.downsamples
layer_depths = config.depths
# Transformer model
network = []
for i in range(len(layer_depths)):
stage = SwiftFormerStage(config=config, index=i)
network.append(stage)
if i >= len(layer_depths) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
# downsampling between two stages
network.append(SwiftFormerEmbeddings(config, index=i))
self.network = nn.ModuleList(network)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
all_hidden_states = (hidden_states,) if output_hidden_states else None
for block in self.network:
hidden_states = block(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
)
@auto_docstring
| SwiftFormerEncoder |
python | ansible__ansible | packaging/release.py | {
"start": 3739,
"end": 4436
} | class ____:
"""Display interface for sending output to the console."""
CLEAR = "\033[0m"
RED = "\033[31m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
def fatal(self, message: t.Any) -> None:
"""Print a fatal message to the console."""
self.show(f"FATAL: {message}", color=self.RED)
def warning(self, message: t.Any) -> None:
"""Print a warning message to the console."""
self.show(f"WARNING: {message}", color=self.PURPLE)
def show(self, message: t.Any, color: str | None = None) -> None:
"""Print a message to the console."""
print(f"{color or self.CLEAR}{message}{self.CLEAR}", flush=True)
| Display |
python | sympy__sympy | sympy/liealgebras/type_f.py | {
"start": 91,
"end": 4423
} | class ____(Standard_Cartan):
def __new__(cls, n):
if n != 4:
raise ValueError("n should be 4")
return Standard_Cartan.__new__(cls, "F", 4)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.dimension()
4
"""
return 4
def basic_root(self, i, j):
"""Generate roots with 1 in ith position and -1 in jth position
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""The ith simple root of F_4
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.simple_root(3)
[0, 0, 0, 1]
"""
if i < 3:
return self.basic_root(i-1, i)
if i == 3:
root = [0]*4
root[3] = 1
return root
if i == 4:
root = [Rational(-1, 2)]*4
return root
def positive_roots(self):
"""Generate all the positive roots of A_n
This is half of all of the roots of F_4; by multiplying all the
positive roots by -1 we get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
k += 1
root = [Rational(1, 2)]*n
posroots[k] = root
for i in range(1, 4):
k += 1
root = [Rational(1, 2)]*n
root[i] = Rational(-1, 2)
posroots[k] = root
posroots[k+1] = [Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)]
posroots[k+2] = [Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)]
posroots[k+3] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
posroots[k+4] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(-1, 2)]
return posroots
def roots(self):
"""
Returns the total number of roots for F_4
"""
return 48
def cartan_matrix(self):
"""The Cartan matrix for F_4
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
m = Matrix( 4, 4, [2, -1, 0, 0, -1, 2, -2, 0, 0,
-1, 2, -1, 0, 0, -1, 2])
return m
def basis(self):
"""
Returns the number of independent generators of F_4
"""
return 52
def dynkin_diagram(self):
diag = "0---0=>=0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
| TypeF |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 34692,
"end": 34834
} | class ____(serializers.ModelSerializer):
class Meta:
model = UniqueForMonthModel
fields = '__all__'
| UniqueForMonthSerializer |
python | scipy__scipy | scipy/sparse/linalg/_special_sparse_arrays.py | {
"start": 27558,
"end": 30268
} | class ____(LinearOperator):
"""
Construct a stiffness matrix in various formats of Mikota pair.
The stiffness matrix `K` is square real tri-diagonal symmetric
positive definite with integer entries.
Parameters
----------
shape : tuple of int
The shape of the matrix.
dtype : dtype
Numerical type of the array. Default is ``np.int32``.
Methods
-------
toarray()
Construct a dense array from Mikota data
tosparse()
Construct a sparse array from Mikota data
tobanded()
The format for banded symmetric matrices,
i.e., (2, n) ndarray with 2 upper diagonals
placing the main diagonal at the bottom.
"""
def __init__(self, shape, dtype=np.int32):
self.shape = shape
self.dtype = dtype
super().__init__(dtype, shape)
# The matrix is constructed from its diagonals;
# we precompute these to avoid duplicating the computation
n = shape[0]
self._diag0 = np.arange(2 * n - 1, 0, -2, dtype=self.dtype)
self._diag1 = - np.arange(n - 1, 0, -1, dtype=self.dtype)
def tobanded(self):
return np.array([np.pad(self._diag1, (1, 0), 'constant'), self._diag0])
def tosparse(self):
from scipy.sparse import diags_array
return diags_array([self._diag1, self._diag0, self._diag1], offsets=[-1, 0, 1],
shape=self.shape, dtype=self.dtype)
def toarray(self):
return self.tosparse().toarray()
def _matvec(self, x):
"""
Construct matrix-free callable banded-matrix-vector multiplication by
the Mikota stiffness matrix without constructing or storing the matrix
itself using the knowledge of its entries and the 3-diagonal format.
"""
x = x.reshape(self.shape[0], -1)
result_dtype = np.promote_types(x.dtype, self.dtype)
kx = np.zeros_like(x, dtype=result_dtype)
d1 = self._diag1
d0 = self._diag0
kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :]
kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :]
kx[1: -1, :] = (d1[:-1, None] * x[: -2, :]
+ d0[1: -1, None] * x[1: -1, :]
+ d1[1:, None] * x[2:, :])
return kx
def _matmat(self, x):
"""
Construct matrix-free callable matrix-matrix multiplication by
the Stiffness mass matrix without constructing or storing the matrix itself
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
"""
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
| MikotaK |
python | ray-project__ray | rllib/examples/_old_api_stack/models/fast_model.py | {
"start": 406,
"end": 1779
} | class ____(TFModelV2):
"""An example for a non-Keras ModelV2 in tf that learns a single weight.
Defines all network architecture in `forward` (not `__init__` as it's
usually done for Keras-style TFModelV2s).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
# Have we registered our vars yet (see `forward`)?
self._registered = False
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
with tf1.variable_scope("model", reuse=tf1.AUTO_REUSE):
bias = tf1.get_variable(
dtype=tf.float32,
name="bias",
initializer=tf.keras.initializers.Zeros(),
shape=(),
)
output = bias + tf.zeros([tf.shape(input_dict["obs"])[0], self.num_outputs])
self._value_out = tf.reduce_mean(output, -1) # fake value
if not self._registered:
self.register_variables(
tf1.get_collection(
tf1.GraphKeys.TRAINABLE_VARIABLES, scope=".+/model/.+"
)
)
self._registered = True
return output, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
| FastModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.