language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_subclasses/_fake_tensor_utils.py | {
"start": 463,
"end": 2272
} | class ____:
"""
Represents a SymNode without the associated ShapeEnv
"""
# n.b. keep the same protocol as SymNode
_expr: sympy.Expr
pytype: type
_hint: Optional[Union[int, float, bool]]
constant: Optional[Union[int, float, bool]]
fx_node: torch.fx.Node
@staticmethod
def from_node(node: SymNode) -> _DeconstructedSymNode:
return _DeconstructedSymNode(
node._expr,
node.pytype,
node._hint,
node.constant,
# pyrefly: ignore [bad-argument-type]
node.fx_node,
)
def extract(self, shape_env: ShapeEnv) -> SymNode:
return SymNode(
self._expr, shape_env, self.pytype, self._hint, self.constant, self.fx_node
)
def __str__(self) -> str:
return str(self._expr)
def __repr__(self) -> str:
return f"_DeconstructedSymNode{{{self._expr!r}, {self.pytype!r}, {self._hint!r}, {self.constant!r}, {self.fx_node!r}}}"
def __eq__(self, other: object) -> bool:
raise NotImplementedError
def __hash__(self) -> int:
raise NotImplementedError
# _value_eq to match SymNode
def _value_eq(self, other: object) -> bool:
if isinstance(other, (SymNode, _DeconstructedSymNode)):
return (
self._expr == other._expr
and self.pytype == other.pytype
and self._hint == other._hint
and self.constant == other.constant
and self.fx_node == other.fx_node
)
else:
return False
# _value_hash to match SymNode
def _value_hash(self) -> int:
return hash((self._expr, self.pytype, self._hint, self.constant, self.fx_node))
@dataclass(frozen=True, slots=True)
| _DeconstructedSymNode |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 15277,
"end": 15368
} | class ____(AnyUrl):
allowed_schemes = {'amqp', 'amqps'}
host_required = False
| AmqpDsn |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 30264,
"end": 30376
} | class ____(DataType):
"""Type representing DataType values that could not be determined statically."""
| Unknown |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/tools/_beta_functions.py | {
"start": 1053,
"end": 1427
} | class ____(ABC):
@abstractmethod
def to_dict(self) -> BetaToolUnionParam: ...
@abstractmethod
def call(self, input: object) -> BetaFunctionToolResultType: ...
@property
def name(self) -> str:
raw = self.to_dict()
if "mcp_server_name" in raw:
return raw["mcp_server_name"]
return raw["name"]
| BetaBuiltinFunctionTool |
python | realpython__materials | python-double-underscore/passenger.py | {
"start": 0,
"end": 165
} | class ____:
def __init__(self, name, class_, seat):
self.name = name
self.class_ = class_
self.seat = seat
# Implementation...
| Passenger |
python | python__mypy | mypy/test/testsemanal.py | {
"start": 1189,
"end": 2547
} | class ____(DataSuite):
files = semanal_files
native_sep = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_semanal(testcase)
def test_semanal(testcase: DataDrivenTestCase) -> None:
"""Perform a semantic analysis test case.
The testcase argument contains a description of the test case
(inputs and output).
"""
try:
src = "\n".join(testcase.input)
options = get_semanal_options(src, testcase)
options.python_version = testfile_pyversion(testcase.file)
result = build.build(
sources=[BuildSource("main", None, src)], options=options, alt_lib_path=test_temp_dir
)
a = result.errors
if a:
raise CompileError(a)
# Include string representations of the source files in the actual
# output.
for module in sorted(result.files.keys()):
if module in testcase.test_modules:
a += result.files[module].str_with_options(options).split("\n")
except CompileError as e:
a = e.messages
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output,
a,
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
)
# Semantic analyzer error test cases
| SemAnalSuite |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 11237,
"end": 11300
} | class ____(HTTPServerError):
status_code = 502
| HTTPBadGateway |
python | sanic-org__sanic | sanic/application/constants.py | {
"start": 503,
"end": 594
} | class ____(StrEnum):
"""Server modes."""
PRODUCTION = auto()
DEBUG = auto()
| Mode |
python | ray-project__ray | python/ray/tests/test_failure_4.py | {
"start": 5808,
"end": 24317
} | class ____:
def __init__(self):
self.ref = None
def invoke(self):
self.ref = foo.remote(0)
# Wait for the task to finish before exiting the driver.
ray.get(self.ref)
def get(self):
print("get", self.ref)
return self.ref
if __name__ == "__main__":
ray.init(address="{}", namespace="default")
a = Actor.options(name="holder", lifetime="detached").remote()
# Wait for the task to finish before exiting the driver.
ray.get(a.invoke.remote())
print("success")
""".format(
address
)
out = run_string_as_driver(driver_script)
assert "success" in out
import time
time.sleep(5)
# connect to the cluster
ray.init(address=address, namespace="default")
actor = ray.get_actor("holder")
x = actor.get.remote()
while isinstance(x, ray.ObjectRef):
x = ray.get(x)
assert x == 42
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize("debug_enabled", [False, True])
def test_object_lost_error(ray_start_cluster, debug_enabled):
cluster = ray_start_cluster
system_config = {
"health_check_failure_threshold": 3,
"health_check_period_ms": 1000,
"health_check_initial_delay_ms": 0,
}
if debug_enabled:
system_config["record_ref_creation_sites"] = True
cluster.add_node(num_cpus=0, _system_config=system_config)
ray.init(address=cluster.address)
worker_node = cluster.add_node(num_cpus=1)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
return
def foo(self):
return "x" * 1000_000
def done(self):
return
@ray.remote
def borrower(ref):
ray.get(ref[0])
@ray.remote
def task_arg(ref):
return
a = Actor.remote()
x = a.foo.remote()
ray.get(a.done.remote())
cluster.remove_node(worker_node, allow_graceful=False)
cluster.add_node(num_cpus=1)
y = borrower.remote([x])
try:
ray.get(x)
assert False
except ray.exceptions.ObjectLostError as e:
error = str(e)
print(error)
assert ("actor call" in error) == debug_enabled
assert ("test_object_lost_error" in error) == debug_enabled
try:
ray.get(y)
assert False
except ray.exceptions.RayTaskError as e:
error = str(e)
print(error)
assert ("actor call" in error) == debug_enabled
assert ("test_object_lost_error" in error) == debug_enabled
try:
ray.get(task_arg.remote(x))
except ray.exceptions.RayTaskError as e:
error = str(e)
print(error)
assert ("actor call" in error) == debug_enabled
assert ("test_object_lost_error" in error) == debug_enabled
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize(
"ray_start_cluster_head",
[
{
"num_cpus": 0,
"_system_config": {
"health_check_initial_delay_ms": 0,
"health_check_period_ms": 100,
"health_check_failure_threshold": 10,
},
},
],
indirect=True,
)
def test_raylet_graceful_shutdown_through_rpc(ray_start_cluster_head, error_pubsub):
"""
Prepare the cluster.
"""
cluster = ray_start_cluster_head
head_node_port = None
for n in ray.nodes():
head_node_port = int(n["NodeManagerPort"])
worker = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
worker_node_port = None
for n in ray.nodes():
port = int(n["NodeManagerPort"])
if port != head_node_port and n["alive"]:
worker_node_port = port
"""
warm up the cluster
"""
@ray.remote
def f():
pass
ray.get(f.remote())
# Kill a raylet gracefully.
def kill_raylet(ip, port, graceful=True):
raylet_address = build_address(ip, port)
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
print(f"Sending a shutdown request to {build_address(ip, port)}")
try:
stub.ShutdownRaylet(
node_manager_pb2.ShutdownRayletRequest(graceful=graceful)
)
except _InactiveRpcError:
assert not graceful
"""
Kill the first worker ungracefully.
"""
ip = worker.node_ip_address
kill_raylet(ip, worker_node_port, graceful=False)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=10)
# Should print the heartbeat messages.
assert "has missed too many heartbeats from it" in errors[0]["error_message"]
# NOTE the killed raylet is a zombie since the
# parent process (the pytest script) hasn't called wait syscall.
# For normal scenarios where raylet is created by
# ray start, this issue won't exist.
try:
wait_for_condition(lambda: len(search_raylet(cluster)) == 1)
except Exception:
print("More than one raylets are detected.")
print(search_raylet(cluster))
"""
Kill the second worker gracefully.
"""
worker = cluster.add_node(num_cpus=0)
worker_node_port = None
for n in ray.nodes():
port = int(n["NodeManagerPort"])
if port != head_node_port and n["alive"]:
worker_node_port = port
# Kill the second worker gracefully.
ip = worker.node_ip_address
kill_raylet(ip, worker_node_port, graceful=True)
p = error_pubsub
# Error shouldn't be printed to the driver.
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)
# Error messages shouldn't be published.
assert len(errors) == 0
try:
wait_for_condition(lambda: len(search_raylet(cluster)) == 1)
except Exception:
print("More than one raylets are detected.")
print(search_raylet(cluster))
"""
Make sure head node is not dead.
"""
ray.get(f.options(num_cpus=0).remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize(
"ray_start_cluster_head",
[
{
"num_cpus": 0,
"_system_config": {
"health_check_failure_threshold": 10,
"health_check_period_ms": 100,
"health_check_initial_delay_ms": 0,
},
}
],
indirect=True,
)
def test_gcs_drain(ray_start_cluster_head, error_pubsub):
"""
Prepare the cluster.
"""
cluster = ray_start_cluster_head
head_node_id = ray.nodes()[0]["NodeID"]
NUM_NODES = 2
for _ in range(NUM_NODES):
cluster.add_node(num_cpus=1)
worker_node_ids = []
for n in ray.nodes():
if n["NodeID"] != head_node_id:
worker_node_ids.append(n["NodeID"])
"""
Warm up the cluster.
"""
@ray.remote(num_cpus=1)
class A:
def ready(self):
pass
actors = [A.remote() for _ in range(NUM_NODES)]
ray.get([actor.ready.remote() for actor in actors])
"""
Test batch drain.
"""
# Prepare requests.
gcs_server_addr = cluster.gcs_address
options = ray_constants.GLOBAL_GRPC_OPTIONS
channel = grpc.insecure_channel(gcs_server_addr, options)
stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub(channel)
r = gcs_service_pb2.DrainNodeRequest()
for worker_id in worker_node_ids:
data = r.drain_node_data.add()
data.node_id = NodeID.from_hex(worker_id).binary()
stub.DrainNode(r)
p = error_pubsub
# Error shouldn't be printed to the driver.
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)
assert len(errors) == 0
# There should be only a head node since we drained worker nodes.
# NOTE: In the current implementation we kill nodes when draining them.
# This check should be removed once we implement
# the proper drain behavior.
try:
wait_for_condition(lambda: len(search_raylet(cluster)) == 1)
except Exception:
print("More than one raylets are detected.")
print(search_raylet(cluster))
"""
Make sure the API is idempotent.
"""
for _ in range(10):
stub.DrainNode(r)
p = error_pubsub
# Error shouldn't be printed to the driver.
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)
assert len(errors) == 0
"""
Make sure the GCS states are updated properly.
"""
for n in ray.nodes():
node_id = n["NodeID"]
is_alive = n["Alive"]
if node_id == head_node_id:
assert is_alive
if node_id in worker_node_ids:
assert not is_alive
"""
Make sure head node is not dead and functional.
"""
a = A.options(num_cpus=0).remote()
ray.get(a.ready.remote())
def test_worker_start_timeout(monkeypatch, ray_start_cluster):
# This test is to make sure
# 1. when worker failed to register, raylet will print useful log
# 2. raylet will kill hanging worker
with monkeypatch.context() as m:
# this delay will make worker start slow
m.setenv(
"RAY_testing_asio_delay_us",
"InternalKVGcsService.grpc_server.InternalKVGet=2000000:2000000",
)
m.setenv("RAY_worker_register_timeout_seconds", "1")
m.setenv("RAY_prestart_worker_first_driver", "false")
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, object_store_memory=1e9)
script = """
import ray
ray.init(address='auto')
@ray.remote
def task():
return None
ray.get(task.remote(), timeout=3)
"""
with pytest.raises(subprocess.CalledProcessError) as e:
run_string_as_driver(script)
# make sure log is correct
assert (
"The process is still alive, probably it's hanging during start"
) in e.value.output.decode()
# worker will be killed so it won't try to register to raylet
assert (
"Received a register request from an unknown worker shim process"
) not in e.value.output.decode()
def test_task_failure_when_driver_local_raylet_dies(ray_start_cluster):
cluster = ray_start_cluster
system_configs = {
"health_check_initial_delay_ms": 0,
"health_check_timeout_ms": 1000,
"health_check_failure_threshold": 1,
}
head = cluster.add_node(
num_cpus=4,
resources={"foo": 1},
_system_config=system_configs,
)
cluster.wait_for_nodes()
ray.init(address=cluster.address, include_dashboard=True)
signal = SignalActor.remote()
@ray.remote(resources={"foo": 1})
def func():
ray.get(signal.send.remote())
while True:
time.sleep(1)
func.remote()
ray.get(signal.wait.remote())
# The lease request should wait inside raylet
# since there is no available resources.
ret = func.options(name="task-local-raylet-dead").remote()
# Waiting for the lease request to reach raylet.
def task_running():
tasks = list_tasks(filters=[("name", "=", "task-local-raylet-dead")])
assert len(tasks) == 1
assert tasks[0]["state"] == "PENDING_NODE_ASSIGNMENT"
return True
wait_for_condition(task_running)
head.kill_raylet()
with pytest.raises(LocalRayletDiedError):
ray.get(ret)
# Check the task failure states for observability.
wait_for_condition(
verify_failed_task,
name="task-local-raylet-dead",
error_type="LOCAL_RAYLET_DIED",
error_message="The worker failed to receive a response from the local raylet",
)
def test_locality_aware_scheduling_for_dead_nodes(shutdown_only):
"""Test that locality-ware scheduling can handle dead nodes."""
# Create a cluster with 4 nodes.
config = {
"health_check_failure_threshold": 5,
"health_check_period_ms": 50,
"health_check_initial_delay_ms": 0,
}
cluster = Cluster()
cluster.add_node(num_cpus=4, resources={"node1": 1}, _system_config=config)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
node2 = cluster.add_node(num_cpus=4, resources={"node2": 1})
node3 = cluster.add_node(num_cpus=4, resources={"node3": 1})
node4 = cluster.add_node(num_cpus=4, resources={"node4": 1})
cluster.wait_for_nodes()
# Create 2 objects on node 2.
@ray.remote(resources={"node2": 0.1})
def create_object():
return np.zeros(10 * 1024 * 1024, dtype=np.uint8)
obj1 = create_object.remote()
obj2 = create_object.remote()
# Push these 2 objects to other nodes.
# node2 will have obj1 and obj2.
# node3 will have obj1.
# node4 will have obj2.
@ray.remote
class MyActor:
def __init__(self, obj_refs):
# Note, we need to keep obj_refs to prevent the objects from
# being garbage collected.
self.obj_refs = obj_refs
self.obj = ray.get(obj_refs)
def ready(self):
return True
actors = [
MyActor.options(resources={"node2": 0.1}).remote([obj1, obj2]),
MyActor.options(resources={"node3": 0.1}).remote([obj1]),
MyActor.options(resources={"node4": 0.1}).remote([obj2]),
]
assert all(ray.get(actor.ready.remote()) is True for actor in actors)
# This function requires obj1 and obj2.
@ray.remote
def func(obj1, obj2):
return ray._private.worker.global_worker.node.unique_id
# This function should be scheduled to node2. As node2 has both objects.
assert ray.get(func.remote(obj1, obj2)) == node2.unique_id
# Kill node2, and re-schedule the function.
# It should be scheduled to either node3 or node4.
node2.kill_raylet()
# Waits for the driver to receive the NodeRemoved notification.
time.sleep(1)
target_node = ray.get(func.remote(obj1, obj2))
assert target_node == node3.unique_id or target_node == node4.unique_id
def test_actor_task_fast_fail(ray_start_cluster):
# Explicitly set `max_task_retries=0` here to show the test scenario.
@ray.remote(max_restarts=1, max_task_retries=0)
class SlowActor:
def __init__(self, signal_actor):
if ray.get_runtime_context().was_current_actor_reconstructed:
ray.get(signal_actor.wait.remote())
def ping(self):
return "pong"
signal = SignalActor.remote()
actor = SlowActor.remote(signal)
ray.get(actor.ping.remote())
ray.kill(actor, no_restart=False)
# Wait for a while so that now the driver knows the actor is in
# RESTARTING state.
time.sleep(1)
# An actor task should fail quickly until the actor is restarted if
# `max_task_retries` is 0.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.ping.remote())
signal.send.remote()
# Wait for a while so that now the driver knows the actor is in
# ALIVE state.
time.sleep(1)
# An actor task should succeed.
ray.get(actor.ping.remote())
def test_task_crash_after_raylet_dead_throws_node_died_error():
@ray.remote(max_retries=0)
def sleeper():
import os
time.sleep(3)
os.kill(os.getpid(), 9)
with ray.init(include_dashboard=True):
ref = sleeper.remote()
raylet = ray.nodes()[0]
kill_raylet(raylet)
with pytest.raises(ray.exceptions.NodeDiedError) as error:
ray.get(ref)
message = str(error)
assert raylet["NodeManagerAddress"] in message
def test_accessing_actor_after_cluster_crashed(shutdown_only):
ray.init()
@ray.remote
class A:
def f(self):
return
a = A.remote()
ray.get(a.f.remote())
ray.shutdown()
ray.init()
with pytest.raises(Exception) as exc_info:
ray.get(a.f.remote())
assert "It might be dead or it's from a different cluster" in exc_info.value.args[0]
def test_internal_error_as_instance_of_cause_correct(shutdown_only):
"""Verify as_instance_of_cause returns the correct exception
even when an exception is ray internal error.
"""
ray.init()
@ray.remote
class A:
def kill(self):
import os
os._exit(1)
def get(self):
return 1
a = A.remote()
@ray.remote
class B:
def get_result(self, a):
return ray.get(a.get.remote())
b = B.remote()
a.kill.remote()
try:
ray.get(b.get_result.remote(a))
except Exception as e:
assert isinstance(e.as_instanceof_cause(), ray.exceptions.RayActorError)
assert isinstance(e.cause, ray.exceptions.RayActorError)
assert isinstance(e, ray.exceptions.RayActorError)
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_cpus": 1,
"num_nodes": 1,
}
],
indirect=True,
)
def test_shows_both_user_exception_system_error_same_time(ray_start_cluster):
@ray.remote(max_calls=1)
def f():
raise Exception("this is an exception")
with pytest.raises(Exception):
ray.get(f.remote())
# Wait for the task info to be propagated.
import time
time.sleep(1)
tasks = list_tasks(filters=[("name", "=", "f")], detail=True)
assert len(tasks) == 1, tasks
task = tasks[0]
assert task["state"] == "FAILED"
assert task["error_type"] == "TASK_EXECUTION_EXCEPTION"
# The error message should look like below (modulo line breaks), and we compare
# without the stacktrace:
#
# User exception:
# ray::f() (pid=70293, ip=127.0.0.1)
# File "<YOUR_RAY_DIR>/python/ray/tests/test_exit_observability.py", line 465,
# in f
# raise Exception("this is an exception")
# Exception: this is an exception
#
# System error:
# IntentionalSystemExit: Worker exits with an exit code 0. Exited because worker
# reached max_calls=1 for this method.
error_message = task["error_message"]
assert error_message.startswith("User exception:\nray::f()"), error_message
assert error_message.endswith(
"Exception: this is an exception\n\nSystem error:\n"
"IntentionalSystemExit: Worker exits with an exit "
"code 0. Exited because worker reached max_calls=1 for this method."
), task
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| Actor |
python | coleifer__peewee | playhouse/reflection.py | {
"start": 12052,
"end": 13754
} | class ____(Metadata):
if FIELD_TYPE is None:
column_map = {}
else:
column_map = {
FIELD_TYPE.BLOB: TextField,
FIELD_TYPE.CHAR: CharField,
FIELD_TYPE.DATE: DateField,
FIELD_TYPE.DATETIME: DateTimeField,
FIELD_TYPE.DECIMAL: DecimalField,
FIELD_TYPE.DOUBLE: FloatField,
FIELD_TYPE.FLOAT: FloatField,
FIELD_TYPE.INT24: IntegerField,
FIELD_TYPE.LONG_BLOB: TextField,
FIELD_TYPE.LONG: IntegerField,
FIELD_TYPE.LONGLONG: BigIntegerField,
FIELD_TYPE.MEDIUM_BLOB: TextField,
FIELD_TYPE.NEWDECIMAL: DecimalField,
FIELD_TYPE.SHORT: IntegerField,
FIELD_TYPE.STRING: CharField,
FIELD_TYPE.TIMESTAMP: DateTimeField,
FIELD_TYPE.TIME: TimeField,
FIELD_TYPE.TINY_BLOB: TextField,
FIELD_TYPE.TINY: IntegerField,
FIELD_TYPE.VAR_STRING: CharField,
}
def __init__(self, database, **kwargs):
if 'password' in kwargs:
kwargs['passwd'] = kwargs.pop('password')
super(MySQLMetadata, self).__init__(database, **kwargs)
def get_column_types(self, table, schema=None):
column_types = {}
# Look up the actual column type for each column.
cursor = self.execute('SELECT * FROM `%s` LIMIT 1' % table)
# Store column metadata in dictionary keyed by column name.
for column_description in cursor.description:
name, type_code = column_description[:2]
column_types[name] = self.column_map.get(type_code, UnknownField)
return column_types, {}
| MySQLMetadata |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_response_usage.py | {
"start": 566,
"end": 800
} | class ____(BaseModel):
audio_tokens: Optional[int] = None
"""The number of audio tokens used in the Response."""
text_tokens: Optional[int] = None
"""The number of text tokens used in the Response."""
| OutputTokenDetails |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_3des.py | {
"start": 741,
"end": 1773
} | class ____:
test_kat = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "3DES", "CBC"),
[
"TCBCinvperm.rsp",
"TCBCpermop.rsp",
"TCBCsubtab.rsp",
"TCBCvarkey.rsp",
"TCBCvartext.rsp",
],
lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
test_mmt = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "3DES", "CBC"),
["TCBCMMT1.rsp", "TCBCMMT2.rsp", "TCBCMMT3.rsp"],
lambda key1, key2, key3, **kwargs: algorithms.TripleDES(
binascii.unhexlify(key1 + key2 + key3)
),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.TripleDES(b"\x00" * 8), OFB(b"\x00" * 8)
),
skip_message="Does not support TripleDES OFB",
)
| TestTripleDESModeCBC |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_reflection.py | {
"start": 19674,
"end": 21137
} | class ____(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = "oracle"
__sparse_driver_backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql(
"""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""",
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table admin_docindex")
def test_reflect_all(self, connection):
m = MetaData()
m.reflect(connection)
eq_({t.name for t in m.tables.values()}, {"admin_docindex"})
def enterprise_edition_or_version(version):
def check():
if testing.db.dialect.server_version_info < (version,):
with testing.db.connect() as conn:
return (
"Enterprise Edition"
in conn.exec_driver_sql("select * from v$version").scalar()
)
else:
return True
return check
| DontReflectIOTTest |
python | bokeh__bokeh | tests/unit/bokeh/application/test_application.py | {
"start": 7462,
"end": 7679
} | class ____:
# Public methods ----------------------------------------------------------
def test_abstract(self) -> None:
with pytest.raises(TypeError):
baa.ServerContext()
| Test_ServerContext |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 137882,
"end": 139856
} | class ____(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
| multinomial_frozen |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/dtensor_util_test.py | {
"start": 1356,
"end": 3371
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2,))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: layout.Mesh(['batch'], global_ids, local_ids,
test_util.create_device_list((2,), device))
for device in ['TPU', 'GPU', 'CPU']
}
self.mesh = self.configTestMesh(mesh_dict)
tensor_1 = constant_op.constant([1.0])
tensor_2 = constant_op.constant([2.0])
self.batch_layout = layout.Layout.batch_sharded(
self.mesh, batch_dim='batch', rank=1)
self.dtensor = d_api.pack([tensor_1, tensor_2], self.batch_layout)
@parameterized.named_parameters([
('py_floats', [1.0, 2.0]),
('np_floats', np.array([1.0, 2.0])),
('tf_const', lambda: constant_op.constant([1.0, 2.0])),
('distribute_value', values_lib.PerReplica([1.0, 2.0])),
])
def test_input_validation(self, inputs):
if callable(inputs):
inputs = inputs()
with self.assertRaisesRegex(ValueError, 'can only be built with DTensor'):
dtensor_util.DTensorDistributedValue(inputs)
def test_unpack(self):
v = dtensor_util.DTensorDistributedValue(self.dtensor)
self.assertIs(self.dtensor, v.get_dtensor())
per_replica_result = v.values
self.assertLen(per_replica_result, 2)
self.assertAllClose(per_replica_result[0], constant_op.constant([1.0]))
self.assertAllClose(per_replica_result[1], constant_op.constant([2.0]))
def test_graph_behavior(self):
@def_function.function
def run_fn(input_dtensor):
return dtensor_util.DTensorDistributedValue(input_dtensor)
result = run_fn(self.dtensor)
# When it cross the boundary of tf.function, it will be unwrapped and
# return a dtensor instance directly.
self.assertTrue(d_api.is_dtensor(result))
self.assertDTensorEqual(constant_op.constant([1.0, 2.0]),
self.batch_layout, result)
| DTensorDistributedValueTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_reflection.py | {
"start": 117740,
"end": 119821
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
tb1 = Table(
"tb1",
metadata,
Column("id", Integer),
Column("attr", Integer),
Column("name", sql_types.VARCHAR(20)),
sa.PrimaryKeyConstraint("name", "id", "attr", name="pk_tb1"),
schema=None,
test_needs_fk=True,
)
Table(
"tb2",
metadata,
Column("id", Integer, primary_key=True),
Column("pid", Integer),
Column("pattr", Integer),
Column("pname", sql_types.VARCHAR(20)),
sa.ForeignKeyConstraint(
["pname", "pid", "pattr"],
[tb1.c.name, tb1.c.id, tb1.c.attr],
name="fk_tb1_name_id_attr",
),
schema=None,
test_needs_fk=True,
)
@testing.requires.primary_key_constraint_reflection
def test_pk_column_order(self, connection):
# test for issue #5661
insp = inspect(connection)
primary_key = insp.get_pk_constraint(self.tables.tb1.name)
eq_(primary_key.get("constrained_columns"), ["name", "id", "attr"])
@testing.requires.foreign_key_constraint_reflection
def test_fk_column_order(self, connection):
# test for issue #5661
insp = inspect(connection)
foreign_keys = insp.get_foreign_keys(self.tables.tb2.name)
eq_(len(foreign_keys), 1)
fkey1 = foreign_keys[0]
eq_(fkey1.get("referred_columns"), ["name", "id", "attr"])
eq_(fkey1.get("constrained_columns"), ["pname", "pid", "pattr"])
__all__ = (
"ComponentReflectionTest",
"ComponentReflectionTestExtra",
"TableNoColumnsTest",
"QuotedNameArgumentTest",
"BizarroCharacterTest",
"HasTableTest",
"HasIndexTest",
"NormalizedNameTest",
"ComputedReflectionTest",
"IdentityReflectionTest",
"CompositeKeyReflectionTest",
"TempTableElementsTest",
)
| CompositeKeyReflectionTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 4698,
"end": 4787
} | class ____:
f: int = 0
def object_target(x):
a = A10()
a.f = x
| A10 |
python | numba__numba | numba/tests/test_extending.py | {
"start": 53063,
"end": 56373
} | class ____(TestCase):
"""Test caching of the use of overload implementations that use
`with objmode`
"""
_numba_parallel_test_ = False
def setUp(self):
warnings.simplefilter("error", errors.NumbaWarning)
def tearDown(self):
warnings.resetwarnings()
def test_caching_overload_objmode(self):
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
def realwork(x):
# uses numpy code
arr = np.arange(x) / x
return np.linalg.norm(arr)
def python_code(x):
# create indirections
return realwork(x)
@overload(with_objmode_cache_ov_example)
def _ov_with_objmode_cache_ov_example(x):
def impl(x):
with objmode(y="float64"):
y = python_code(x)
return y
return impl
@njit(cache=True)
def testcase(x):
return with_objmode_cache_ov_example(x)
expect = realwork(123)
got = testcase(123)
self.assertEqual(got, expect)
testcase_cached = njit(cache=True)(testcase.py_func)
got = testcase_cached(123)
self.assertEqual(got, expect)
@classmethod
def check_objmode_cache_ndarray(cls):
def do_this(a, b):
return np.sum(a + b)
def do_something(a, b):
return np.sum(a + b)
@overload(do_something)
def overload_do_something(a, b):
def _do_something_impl(a, b):
with objmode(y='float64'):
y = do_this(a, b)
return y
return _do_something_impl
@njit(cache=True)
def test_caching():
a = np.arange(20)
b = np.arange(20)
return do_something(a, b)
got = test_caching()
expect = test_caching.py_func()
# Check result
if got != expect:
raise AssertionError("incorrect result")
return test_caching
@classmethod
def populate_objmode_cache_ndarray_check_cache(cls):
cls.check_objmode_cache_ndarray()
@classmethod
def check_objmode_cache_ndarray_check_cache(cls):
disp = cls.check_objmode_cache_ndarray()
if len(disp.stats.cache_misses) != 0:
raise AssertionError('unexpected cache miss')
if len(disp.stats.cache_hits) <= 0:
raise AssertionError("unexpected missing cache hit")
def test_check_objmode_cache_ndarray(self):
# See issue #6130.
# Env is missing after cache load.
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
# Run in new process to populate the cache
run_in_new_process_in_cache_dir(
self.populate_objmode_cache_ndarray_check_cache, cache_dir
)
# Run in new process to use the cache in a fresh process.
res = run_in_new_process_in_cache_dir(
self.check_objmode_cache_ndarray_check_cache, cache_dir
)
self.assertEqual(res['exitcode'], 0)
| TestCachingOverloadObjmode |
python | TheAlgorithms__Python | neural_network/convolution_neural_network.py | {
"start": 546,
"end": 14293
} | class ____:
def __init__(
self, conv1_get, size_p1, bp_num1, bp_num2, bp_num3, rate_w=0.2, rate_t=0.2
):
"""
:param conv1_get: [a,c,d], size, number, step of convolution kernel
:param size_p1: pooling size
:param bp_num1: units number of flatten layer
:param bp_num2: units number of hidden layer
:param bp_num3: units number of output layer
:param rate_w: rate of weight learning
:param rate_t: rate of threshold learning
"""
self.num_bp1 = bp_num1
self.num_bp2 = bp_num2
self.num_bp3 = bp_num3
self.conv1 = conv1_get[:2]
self.step_conv1 = conv1_get[2]
self.size_pooling1 = size_p1
self.rate_weight = rate_w
self.rate_thre = rate_t
rng = np.random.default_rng()
self.w_conv1 = [
np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5)
for i in range(self.conv1[1])
]
self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5)
self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5)
self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1
self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1
self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1
def save_model(self, save_path):
# save model dict with pickle
model_dic = {
"num_bp1": self.num_bp1,
"num_bp2": self.num_bp2,
"num_bp3": self.num_bp3,
"conv1": self.conv1,
"step_conv1": self.step_conv1,
"size_pooling1": self.size_pooling1,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conv1,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conv1,
"thre_bp2": self.thre_bp2,
"thre_bp3": self.thre_bp3,
}
with open(save_path, "wb") as f:
pickle.dump(model_dic, f)
print(f"Model saved: {save_path}")
@classmethod
def read_model(cls, model_path):
# read saved model
with open(model_path, "rb") as f:
model_dic = pickle.load(f) # noqa: S301
conv_get = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
size_p1 = model_dic.get("size_pooling1")
bp1 = model_dic.get("num_bp1")
bp2 = model_dic.get("num_bp2")
bp3 = model_dic.get("num_bp3")
r_w = model_dic.get("rate_weight")
r_t = model_dic.get("rate_thre")
# create model instance
conv_ins = CNN(conv_get, size_p1, bp1, bp2, bp3, r_w, r_t)
# modify model parameter
conv_ins.w_conv1 = model_dic.get("w_conv1")
conv_ins.wkj = model_dic.get("wkj")
conv_ins.vji = model_dic.get("vji")
conv_ins.thre_conv1 = model_dic.get("thre_conv1")
conv_ins.thre_bp2 = model_dic.get("thre_bp2")
conv_ins.thre_bp3 = model_dic.get("thre_bp3")
return conv_ins
def sig(self, x):
return 1 / (1 + np.exp(-1 * x))
def do_round(self, x):
return round(x, 3)
def convolute(self, data, convs, w_convs, thre_convs, conv_step):
# convolution process
size_conv = convs[0]
num_conv = convs[1]
size_data = np.shape(data)[0]
# get the data slice of original image data, data_focus
data_focus = []
for i_focus in range(0, size_data - size_conv + 1, conv_step):
for j_focus in range(0, size_data - size_conv + 1, conv_step):
focus = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(focus)
# calculate the feature map of every single kernel, and saved as list of matrix
data_featuremap = []
size_feature_map = int((size_data - size_conv) / conv_step + 1)
for i_map in range(num_conv):
featuremap = []
for i_focus in range(len(data_focus)):
net_focus = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(net_focus))
featuremap = np.asmatrix(featuremap).reshape(
size_feature_map, size_feature_map
)
data_featuremap.append(featuremap)
# expanding the data slice to one dimension
focus1_list = []
for each_focus in data_focus:
focus1_list.extend(self.Expand_Mat(each_focus))
focus_list = np.asarray(focus1_list)
return focus_list, data_featuremap
def pooling(self, featuremaps, size_pooling, pooling_type="average_pool"):
# pooling process
size_map = len(featuremaps[0])
size_pooled = int(size_map / size_pooling)
featuremap_pooled = []
for i_map in range(len(featuremaps)):
feature_map = featuremaps[i_map]
map_pooled = []
for i_focus in range(0, size_map, size_pooling):
for j_focus in range(0, size_map, size_pooling):
focus = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(focus))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(focus))
map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled)
featuremap_pooled.append(map_pooled)
return featuremap_pooled
def _expand(self, data):
# expanding three dimension data to one dimension list
data_expanded = []
for i in range(len(data)):
shapes = np.shape(data[i])
data_listed = data[i].reshape(1, shapes[0] * shapes[1])
data_listed = data_listed.getA().tolist()[0]
data_expanded.extend(data_listed)
data_expanded = np.asarray(data_expanded)
return data_expanded
def _expand_mat(self, data_mat):
# expanding matrix to one dimension list
data_mat = np.asarray(data_mat)
shapes = np.shape(data_mat)
data_expanded = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def _calculate_gradient_from_pool(
self, out_map, pd_pool, num_map, size_map, size_pooling
):
"""
calculate the gradient from the data slice of pool layer
pd_pool: list of matrix
out_map: the shape of data slice(size_map*size_map)
return: pd_all: list of matrix, [num, size_map, size_map]
"""
pd_all = []
i_pool = 0
for i_map in range(num_map):
pd_conv1 = np.ones((size_map, size_map))
for i in range(0, size_map, size_pooling):
for j in range(0, size_map, size_pooling):
pd_conv1[i : i + size_pooling, j : j + size_pooling] = pd_pool[
i_pool
]
i_pool = i_pool + 1
pd_conv2 = np.multiply(
pd_conv1, np.multiply(out_map[i_map], (1 - out_map[i_map]))
)
pd_all.append(pd_conv2)
return pd_all
def train(
self, patterns, datas_train, datas_teach, n_repeat, error_accuracy, draw_e=bool
):
# model training
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(datas_train)))
print((" - - Shape: Teach_Data ", np.shape(datas_teach)))
rp = 0
all_mse = []
mse = 10000
while rp < n_repeat and mse >= error_accuracy:
error_count = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(datas_train)):
# print('------------Learning Image: %d--------------'%p)
data_train = np.asmatrix(datas_train[p])
data_teach = np.asarray(datas_teach[p])
data_focus1, data_conved1 = self.convolute(
data_train,
self.conv1,
self.w_conv1,
self.thre_conv1,
conv_step=self.step_conv1,
)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
shape_featuremap1 = np.shape(data_conved1)
"""
print(' -----original shape ', np.shape(data_train))
print(' ---- after convolution ',np.shape(data_conv1))
print(' -----after pooling ',np.shape(data_pooled1))
"""
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = np.dot(bp_out1, self.vji.T) - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = np.dot(bp_out2, self.wkj.T) - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
pd_k_all = np.multiply(
(data_teach - bp_out3), np.multiply(bp_out3, (1 - bp_out3))
)
pd_j_all = np.multiply(
np.dot(pd_k_all, self.wkj), np.multiply(bp_out2, (1 - bp_out2))
)
pd_i_all = np.dot(pd_j_all, self.vji)
pd_conv1_pooled = pd_i_all / (self.size_pooling1 * self.size_pooling1)
pd_conv1_pooled = pd_conv1_pooled.T.getA().tolist()
pd_conv1_all = self._calculate_gradient_from_pool(
data_conved1,
pd_conv1_pooled,
shape_featuremap1[0],
shape_featuremap1[1],
self.size_pooling1,
)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conv1[1]):
pd_conv_list = self._expand_mat(pd_conv1_all[k_conv])
delta_w = self.rate_weight * np.dot(pd_conv_list, data_focus1)
self.w_conv1[k_conv] = self.w_conv1[k_conv] + delta_w.reshape(
(self.conv1[0], self.conv1[0])
)
self.thre_conv1[k_conv] = (
self.thre_conv1[k_conv]
- np.sum(pd_conv1_all[k_conv]) * self.rate_thre
)
# all connected layer
self.wkj = self.wkj + pd_k_all.T * bp_out2 * self.rate_weight
self.vji = self.vji + pd_j_all.T * bp_out1 * self.rate_weight
self.thre_bp3 = self.thre_bp3 - pd_k_all * self.rate_thre
self.thre_bp2 = self.thre_bp2 - pd_j_all * self.rate_thre
# calculate the sum error of all single image
errors = np.sum(abs(data_teach - bp_out3))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
rp = rp + 1
mse = error_count / patterns
all_mse.append(mse)
def draw_error():
yplot = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(all_mse, "+-")
plt.plot(yplot, "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(True, alpha=0.5)
plt.show()
print("------------------Training Complete---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def predict(self, datas_test):
# model predict
produce_out = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(datas_test)))
for p in range(len(datas_test)):
data_test = np.asmatrix(datas_test[p])
_data_focus1, data_conved1 = self.convolute(
data_test,
self.conv1,
self.w_conv1,
self.thre_conv1,
conv_step=self.step_conv1,
)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = bp_out1 * self.vji.T - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = bp_out2 * self.wkj.T - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
produce_out.extend(bp_out3.getA().tolist())
res = [list(map(self.do_round, each)) for each in produce_out]
return np.asarray(res)
def convolution(self, data):
# return the data of image after convoluting process so we can check it out
data_test = np.asmatrix(data)
_data_focus1, data_conved1 = self.convolute(
data_test,
self.conv1,
self.w_conv1,
self.thre_conv1,
conv_step=self.step_conv1,
)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
return data_conved1, data_pooled1
if __name__ == "__main__":
"""
I will put the example in another file
"""
| CNN |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 159681,
"end": 160404
} | class ____:
def test_astype(self):
data = [[1, 2], [3, 4]]
actual = np.astype(
np.array(data, dtype=np.int64), np.uint32
)
expected = np.array(data, dtype=np.uint32)
assert_array_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
assert np.shares_memory(
actual, np.astype(actual, actual.dtype, copy=False)
)
actual = np.astype(np.int64(10), np.float64)
expected = np.float64(10)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
with pytest.raises(TypeError, match="Input should be a NumPy array"):
np.astype(data, np.float64)
| TestAsType |
python | openai__openai-python | src/openai/_streaming.py | {
"start": 3923,
"end": 7415
} | class ____(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: AsyncOpenAI,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
async def __anext__(self) -> _T:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[_T]:
async for item in self._iterator:
yield item
async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
yield sse
async def __stream__(self) -> AsyncIterator[_T]:
cast_to = cast(Any, self._cast_to)
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()
async for sse in iterator:
if sse.data.startswith("[DONE]"):
break
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
if sse.event and sse.event.startswith("thread."):
data = sse.json()
if sse.event == "error" and is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
else:
data = sse.json()
if is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data=data, cast_to=cast_to, response=response)
# As we might not fully consume the response stream, we need to close it explicitly
await response.aclose()
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self.response.aclose()
| AsyncStream |
python | realpython__materials | python-serialize/http-payload/flask-rest-api/main.py | {
"start": 172,
"end": 788
} | class ____:
id: UUID
name: str
created_at: datetime
@classmethod
def create(cls, name):
return cls(uuid4(), name, datetime.now())
users = [
User.create("Alice"),
User.create("Bob"),
]
@app.route("/users", methods=["GET", "POST"])
def view_users():
if request.method == "GET":
return users
elif request.method == "POST":
if request.is_json:
payload = request.get_json()
user = User.create(payload["name"])
users.append(user)
return jsonify(user), 201
if __name__ == "__main__":
app.run(debug=True)
| User |
python | automl__auto-sklearn | test/test_evaluation/test_train_evaluator.py | {
"start": 2271,
"end": 2346
} | class ____(object):
def __init__(self):
self.name = "dummy"
| Dummy |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 6476,
"end": 6713
} | class ____:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.to_raw_response_wrapper(
checkpoints.list,
)
| CheckpointsWithRawResponse |
python | pytorch__pytorch | test/inductor/test_debug_trace.py | {
"start": 3534,
"end": 4572
} | class ____:
var_ranges = {p0: 256}
index0 = p0
def body(self, ops):
get_index = self.get_index('index0')
load = ops.load('arg0_1', get_index)
constant = ops.constant(1.0, torch.float32)
add = ops.add(load, constant)
get_index_1 = self.get_index('index0')
store = ops.store('buf0', get_index_1, add, None)
return store
op1: SchedulerNode(ComputedBuffer)
op1.writes = [MemoryDep('buf1', c0, {c0: 256})]
op1.unmet_dependencies = [MemoryDep('buf0', c0, {c0: 256})]
op1.met_dependencies = []
op1.outputs = [
buf1: ComputedBuffer
buf1.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf1.users = [NodeUser(node=ExternKernelSchedulerNode(name='op2'), can_inplace=False, is_weak=False)]
]
op1.group.device = cpu
op1.group.iteration = ((256,), ())
op1.sizes = ([256], [])
buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
| op0_loop_body |
python | ray-project__ray | python/ray/train/v2/_internal/execution/context.py | {
"start": 3530,
"end": 17818
} | class ____:
train_run_context: TrainRunContext
distributed_context: DistributedContext
execution_context: ExecutionContext
storage_context: StorageContext
controller_actor: ActorHandle
dataset_shard_provider: "DatasetShardProvider"
# TODO: consolidate into CheckpointContext
checkpoint: Optional["Checkpoint"] = None
current_report_index: int = 0
report_call_index: int = 0
report_order_condition: threading.Condition = threading.Condition()
checkpoint_upload_threadpool: ThreadPoolExecutor = ThreadPoolExecutor(
max_workers=MAX_CHECKPOINT_UPLOAD_THREADS
)
def get_experiment_name(self) -> str:
return self.train_run_context.run_config.name
def get_world_size(self) -> int:
return self.distributed_context.world_size
def get_world_rank(self) -> int:
return self.distributed_context.world_rank
def get_local_rank(self) -> int:
return self.distributed_context.local_rank
def get_local_world_size(self) -> int:
return self.distributed_context.local_world_size
def get_node_rank(self) -> int:
return self.distributed_context.node_rank
def get_storage(self):
return self.storage_context
# TODO: Don't allow these private methods to be called from user code.
def get_result_queue(self):
return self.execution_context.result_queue
def get_synchronization_actor(self):
return self.execution_context.synchronization_actor
def get_checkpoint(self):
with self.report_order_condition:
return self.checkpoint
def get_all_reported_checkpoints(
self,
consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED,
) -> List["ReportedCheckpoint"]:
return ray.get(
self.controller_actor.get_all_reported_checkpoints.remote(
self.report_call_index,
consistency_mode,
)
)
def get_dataset_shard(self, dataset_info: "DatasetShardMetadata") -> DataIterator:
"""Returns the :class:`ray.data.DataIterator` shard for this worker.
Call :meth:`~ray.data.DataIterator.iter_torch_batches` or
:meth:`~ray.data.DataIterator.to_tf` on this shard to convert it to the
appropriate framework-specific data type.
Args:
dataset_info: The shard metadata, including the dataset name and worker rank.
Returns:
The ``DataIterator`` shard with the given name for this worker.
Raises:
KeyError: If the dataset shard with the given name is not found.
"""
return self.dataset_shard_provider.get_dataset_shard(dataset_info)
def get_context_callbacks(self) -> List["TrainContextCallback"]:
return self.execution_context.train_context_callbacks
def _sync_checkpoint_dir_name_across_ranks(
self, checkpoint_dir_name: Optional[str] = None
) -> str:
"""Sync the checkpoint dir name across ranks.
Args:
checkpoint_dir_name: The checkpoint dir name to sync.
Returns:
The synced checkpoint dir name.
"""
# If checkpoint_dir_name is not set, use default checkpoint_dir_name
# created by the storage context.
checkpoint_dir_name = (
checkpoint_dir_name
or self.storage_context.make_default_checkpoint_dir_name()
)
# Get a consensus across ranks on the remote storage path, so distributed
# checkpoints will be stored to the same place.
sync_actor = self.get_synchronization_actor()
return ray.get(
sync_actor.broadcast_from_rank_zero.remote(
world_rank=self.distributed_context.world_rank,
world_size=self.distributed_context.world_size,
data=checkpoint_dir_name,
caller_method_name="ray.train.report",
)
)
# TODO: make retry configurable
@retry(description="upload checkpoint", max_attempts=3, match=AWS_RETRYABLE_TOKENS)
def _upload_checkpoint(
self,
checkpoint_dir_name: str,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"] = None,
delete_local_checkpoint_after_upload: bool = False,
checkpoint_upload_fn: Optional[
Callable[["Checkpoint", str], "Checkpoint"]
] = None,
validation_spec: Optional[_ValidationSpec] = None,
) -> _TrainingReport:
"""Save the checkpoint to remote storage.
Args:
checkpoint_dir_name: The checkpoint dir to persist to.
metrics: The metrics to report.
checkpoint: The checkpoint to report.
delete_local_checkpoint_after_upload: Whether to delete the checkpoint after it is uploaded.
checkpoint_upload_fn: A user defined function that will be called with the
checkpoint to upload it. If not provided, defaults to using the `pyarrow.fs.copy_files`
utility for copying to the destination `storage_path`.
validation_spec: The validation specification.
Returns:
The training result object containing the persisted checkpoint.
"""
if not checkpoint:
return _TrainingReport(
checkpoint=None, metrics=metrics, validation_spec=None
)
# Persist the checkpoint to the remote storage path.
try:
if checkpoint_upload_fn:
persisted_checkpoint = checkpoint_upload_fn(
checkpoint, checkpoint_dir_name
)
if persisted_checkpoint is None or not isinstance(
persisted_checkpoint, ray.train.Checkpoint
):
raise ValueError(
"checkpoint_upload_fn must return a `ray.train.Checkpoint`."
)
else:
persisted_checkpoint = self.storage_context.persist_current_checkpoint(
checkpoint, checkpoint_dir_name
)
except FileNotFoundError:
logger.exception(
f"Failed to find local checkpoint {checkpoint} when attempting to upload it. "
"This could be caused by multiple workers on a node attempting to upload the "
"same directory, and then one of the workers deletes the directory before the "
"others finish."
)
raise
# TODO: consider deleting local checkpoint as async callback instead
if delete_local_checkpoint_after_upload:
try:
delete_fs_path(checkpoint.filesystem, checkpoint.path)
except Exception:
logger.exception(
f"Failed to delete the local checkpoint after a successful upload: {checkpoint}"
)
return _TrainingReport(
checkpoint=persisted_checkpoint,
metrics=metrics,
validation_spec=validation_spec,
)
def _wait_then_report(
self, training_report: _TrainingReport, report_call_index: int
):
"""Thread waits for its turn before reporting training result to result queue.
It does this in order to guarantee the FIFO processing of checkpoints.
The queue size is set to 1 to avoid accumulating unprocessed results.
If the queue is full, the put operation blocks until a result is consumed.
TODO: Add a metric to track the blocking time waiting for the
training result to be consumed by the controller.
"""
with self.report_order_condition:
self.report_order_condition.wait_for(
lambda: self.current_report_index == report_call_index - 1
)
logger.info(
f"Reporting training result {report_call_index}: {training_report}"
)
# Update latest checkpoint as the persisted checkpoint.
if training_report.checkpoint:
self.checkpoint = training_report.checkpoint
self.get_result_queue().put(training_report)
self.current_report_index += 1
self.report_order_condition.notify_all()
def report(
self,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"] = None,
checkpoint_dir_name: Optional[str] = None,
checkpoint_upload_mode: CheckpointUploadMode = CheckpointUploadMode.SYNC,
delete_local_checkpoint_after_upload: Optional[bool] = None,
checkpoint_upload_fn: Optional[
Callable[["Checkpoint", str], "Checkpoint"]
] = None,
validate_fn: Optional[Callable[["Checkpoint", Optional[Dict]], Dict]] = None,
validate_config: Optional[Dict] = None,
) -> None:
"""
Upload checkpoint to remote storage and put a training
result on the result queue of this worker process.
TODO: the report function should be implemented in the worker instead
of in the train context. The train context should only keep the train
related information and not the worker related actions. This refactor
would also require the `TrainContextCallback` to be updated as well.
"""
if "torch" in sys.modules:
from ray.air._internal.torch_utils import contains_tensor
if contains_tensor(metrics):
raise ValueError(
"Passing objects containg Torch tensors as metrics "
"is not supported as it will throw an exception on "
"deserialization. You can either convert the tensors "
"to Python objects (ex: `.numpy()`, `.item()`, etc.) "
"or save tensors as part of the checkpoint files instead."
)
with invoke_context_managers(
[
callback.on_report
for callback in self.execution_context.train_context_callbacks
]
):
if validate_fn:
validation_spec = _ValidationSpec(
validate_fn=validate_fn,
validate_config=validate_config,
)
else:
validation_spec = None
self.report_call_index += 1
report_call_index = self.report_call_index
# Sync the checkpoint dir name across ranks.
checkpoint_dir_name = self._sync_checkpoint_dir_name_across_ranks(
checkpoint_dir_name
)
# Upload checkpoint, wait for turn, and report.
if checkpoint_upload_mode == CheckpointUploadMode.SYNC:
training_report = self._upload_checkpoint(
checkpoint_dir_name,
metrics,
checkpoint,
delete_local_checkpoint_after_upload,
checkpoint_upload_fn,
validation_spec,
)
self._wait_then_report(training_report, report_call_index)
elif checkpoint_upload_mode == CheckpointUploadMode.NO_UPLOAD:
training_report = _TrainingReport(
checkpoint=checkpoint,
metrics=metrics,
validation_spec=validation_spec,
)
self._wait_then_report(training_report, report_call_index)
elif checkpoint_upload_mode == CheckpointUploadMode.ASYNC:
def _upload_checkpoint_and_report(
checkpoint_dir_name: str,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"],
report_call_index: int,
) -> None:
try:
training_report = self._upload_checkpoint(
checkpoint_dir_name,
metrics,
checkpoint,
delete_local_checkpoint_after_upload,
checkpoint_upload_fn,
validation_spec,
)
self._wait_then_report(training_report, report_call_index)
except Exception as e:
# TODO: env var to disable eager raising
logger.exception(
"Checkpoint upload failed in the background thread. Raising eagerly "
"to avoid training in a corrupted state with more potential progress "
"lost due to checkpointing failures."
)
self.execution_context.training_thread_runner.get_exception_queue().put(
construct_user_exception_with_traceback(e)
)
self.checkpoint_upload_threadpool.submit(
_upload_checkpoint_and_report,
checkpoint_dir_name,
metrics,
checkpoint,
report_call_index,
)
else:
raise ValueError(
f"Invalid checkpoint upload mode: {checkpoint_upload_mode}"
)
# The global variable holding the current TrainContext
_train_context: Optional[TrainContext] = None
# Thread lock to protect the global TrainContext
_context_lock = threading.Lock()
def get_train_context() -> TrainContext:
"""Get the internal train context.
Note:
This should not be used directly by user-facing APIs. User-facing APIs should
call :class:`~ray.train.v2._internal.execution.train_fn_utils.TrainFnUtils`
or use :class:`~ray.train.v2.api.context.TrainContext` instead.
Returns:
The internal TrainContext for this worker.
"""
with _context_lock:
if _train_context is None:
raise RuntimeError("TrainContext has not been initialized.")
return _train_context
def set_train_context(context) -> None:
global _train_context
with _context_lock:
_train_context = context
| TrainContext |
python | pandas-dev__pandas | pandas/tests/extension/base/dim2.py | {
"start": 10723,
"end": 12454
} | class ____(Dim2CompatTests):
# More specific tests for NDArrayBackedExtensionArray subclasses
def test_copy_order(self, data):
# We should be matching numpy semantics for the "order" keyword in 'copy'
arr2d = data.repeat(2).reshape(-1, 2)
assert arr2d._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d[::2, ::2].copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy("F")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
res = arr2d.copy("K")
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.T.copy("K")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
# order not accepted by numpy
msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"
with pytest.raises(ValueError, match=msg):
arr2d.copy("Q")
# neither contiguity
arr_nc = arr2d[::2]
assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]
assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]
assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]
| NDArrayBacked2DTests |
python | django__django | tests/admin_changelist/models.py | {
"start": 3440,
"end": 3500
} | class ____(User):
class Meta:
proxy = True
| ProxyUser |
python | keras-team__keras | benchmarks/torch_ctl_benchmark/conv_model_benchmark.py | {
"start": 948,
"end": 2638
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 32, kernel_size=(3, 3))
self.activation = torch.nn.ReLU()
self.max_pool = torch.nn.MaxPool2d((2, 2))
self.flatten = torch.nn.Flatten()
self.dense = torch.nn.LazyLinear(num_classes)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
x = self.max_pool(x)
x = self.flatten(x)
x = self.dense(x)
x = self.softmax(x)
return x
def run_keras_custom_training_loop():
keras_model = keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(num_classes),
layers.Softmax(),
]
)
optimizer = optim.Adam(keras_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
keras_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="keras",
)
def run_torch_custom_training_loop():
torch_model = TorchModel()
optimizer = optim.Adam(torch_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
torch_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="torch",
)
if __name__ == "__main__":
run_keras_custom_training_loop()
run_torch_custom_training_loop()
| TorchModel |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 107855,
"end": 111681
} | class ____(Request):
"""
Get user and system tags used for the models under the specified projects
:param include_system: If set to 'true' then the list of the system tags is
also returned. The default value is 'false'
:type include_system: bool
:param projects: The list of projects under which the tags are searched. If not
passed or empty then all the projects are searched
:type projects: Sequence[str]
:param filter: Filter on entities to collect tags from
:type filter: dict
"""
_service = "projects"
_action = "get_model_tags"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"filter": {
"description": "Filter on entities to collect tags from",
"properties": {
"system_tags": {
"description": "The list of system tag values to filter by. Use 'null' value to specify empty system tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "The list of tag values to filter by. Use 'null' value to specify empty tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
},
"type": ["object", "null"],
},
"include_system": {
"default": False,
"description": "If set to 'true' then the list of the system tags is also returned. The default value is 'false'",
"type": ["boolean", "null"],
},
"projects": {
"description": "The list of projects under which the tags are searched. If not passed or empty then all the projects are searched",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
include_system: Optional[bool] = False,
projects: Optional[List[str]] = None,
filter: Optional[dict] = None,
**kwargs: Any
) -> None:
super(GetModelTagsRequest, self).__init__(**kwargs)
self.include_system = include_system
self.projects = projects
self.filter = filter
@schema_property("include_system")
def include_system(self) -> Optional[bool]:
return self._property_include_system
@include_system.setter
def include_system(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_system = None
return
self.assert_isinstance(value, "include_system", (bool,))
self._property_include_system = value
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("filter")
def filter(self) -> Optional[dict]:
return self._property_filter
@filter.setter
def filter(self, value: Optional[dict]) -> None:
if value is None:
self._property_filter = None
return
self.assert_isinstance(value, "filter", (dict,))
self._property_filter = value
| GetModelTagsRequest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1161908,
"end": 1170336
} | class ____(Position2Def):
r"""
SecondaryFieldDef schema wrapper.
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_schema = {"$ref": "#/definitions/SecondaryFieldDef"}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
**kwds,
)
| SecondaryFieldDef |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 5494,
"end": 7741
} | class ____(MyStatefulMachine.TestCase):
settings = Settings(derandomize=True, stateful_step_count=5)
def test_multiple_precondition_bug():
# See https://github.com/HypothesisWorks/hypothesis/issues/2861
class MultiplePreconditionMachine(RuleBasedStateMachine):
@rule(x=st.integers())
def good_method(self, x):
pass
@precondition(lambda self: True)
@precondition(lambda self: False)
@rule(x=st.integers())
def bad_method_a(self, x):
raise AssertionError("This rule runs, even though it shouldn't.")
@precondition(lambda self: False)
@precondition(lambda self: True)
@rule(x=st.integers())
def bad_method_b(self, x):
raise AssertionError("This rule might be skipped for the wrong reason.")
@precondition(lambda self: True)
@rule(x=st.integers())
@precondition(lambda self: False)
def bad_method_c(self, x):
raise AssertionError("This rule runs, even though it shouldn't.")
@rule(x=st.integers())
@precondition(lambda self: True)
@precondition(lambda self: False)
def bad_method_d(self, x):
raise AssertionError("This rule runs, even though it shouldn't.")
@precondition(lambda self: True)
@precondition(lambda self: False)
@invariant()
def bad_invariant_a(self):
raise AssertionError("This invariant runs, even though it shouldn't.")
@precondition(lambda self: False)
@precondition(lambda self: True)
@invariant()
def bad_invariant_b(self):
raise AssertionError("This invariant runs, even though it shouldn't.")
@precondition(lambda self: True)
@invariant()
@precondition(lambda self: False)
def bad_invariant_c(self):
raise AssertionError("This invariant runs, even though it shouldn't.")
@invariant()
@precondition(lambda self: True)
@precondition(lambda self: False)
def bad_invariant_d(self):
raise AssertionError("This invariant runs, even though it shouldn't.")
run_state_machine_as_test(MultiplePreconditionMachine)
| TestMyStatefulMachine |
python | sympy__sympy | sympy/polys/polyquinticconst.py | {
"start": 503,
"end": 96035
} | class ____:
"""Special functions for solvable quintics"""
def __init__(self, poly):
_, _, self.p, self.q, self.r, self.s = poly.all_coeffs()
self.zeta1 = Rational(-1, 4) + (sqrt(5)/4) + I*sqrt((sqrt(5)/8) + Rational(5, 8))
self.zeta2 = (-sqrt(5)/4) - Rational(1, 4) + I*sqrt((-sqrt(5)/8) + Rational(5, 8))
self.zeta3 = (-sqrt(5)/4) - Rational(1, 4) - I*sqrt((-sqrt(5)/8) + Rational(5, 8))
self.zeta4 = Rational(-1, 4) + (sqrt(5)/4) - I*sqrt((sqrt(5)/8) + Rational(5, 8))
@property
def f20(self):
p, q, r, s = self.p, self.q, self.r, self.s
f20 = q**8 - 13*p*q**6*r + p**5*q**2*r**2 + 65*p**2*q**4*r**2 - 4*p**6*r**3 - 128*p**3*q**2*r**3 + 17*q**4*r**3 + 48*p**4*r**4 - 16*p*q**2*r**4 - 192*p**2*r**5 + 256*r**6 - 4*p**5*q**3*s - 12*p**2*q**5*s + 18*p**6*q*r*s + 12*p**3*q**3*r*s - 124*q**5*r*s + 196*p**4*q*r**2*s + 590*p*q**3*r**2*s - 160*p**2*q*r**3*s - 1600*q*r**4*s - 27*p**7*s**2 - 150*p**4*q**2*s**2 - 125*p*q**4*s**2 - 99*p**5*r*s**2 - 725*p**2*q**2*r*s**2 + 1200*p**3*r**2*s**2 + 3250*q**2*r**2*s**2 - 2000*p*r**3*s**2 - 1250*p*q*r*s**3 + 3125*p**2*s**4 - 9375*r*s**4-(2*p*q**6 - 19*p**2*q**4*r + 51*p**3*q**2*r**2 - 3*q**4*r**2 - 32*p**4*r**3 - 76*p*q**2*r**3 + 256*p**2*r**4 - 512*r**5 + 31*p**3*q**3*s + 58*q**5*s - 117*p**4*q*r*s - 105*p*q**3*r*s - 260*p**2*q*r**2*s + 2400*q*r**3*s + 108*p**5*s**2 + 325*p**2*q**2*s**2 - 525*p**3*r*s**2 - 2750*q**2*r*s**2 + 500*p*r**2*s**2 - 625*p*q*s**3 + 3125*s**4)*x+(p**2*q**4 - 6*p**3*q**2*r - 8*q**4*r + 9*p**4*r**2 + 76*p*q**2*r**2 - 136*p**2*r**3 + 400*r**4 - 50*p*q**3*s + 90*p**2*q*r*s - 1400*q*r**2*s + 625*q**2*s**2 + 500*p*r*s**2)*x**2-(2*q**4 - 21*p*q**2*r + 40*p**2*r**2 - 160*r**3 + 15*p**2*q*s + 400*q*r*s - 125*p*s**2)*x**3+(2*p*q**2 - 6*p**2*r + 40*r**2 - 50*q*s)*x**4 + 8*r*x**5 + x**6
return Poly(f20, x)
@property
def b(self):
p, q, r, s = self.p, self.q, self.r, self.s
b = ( [], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0],)
b[1][5] = 100*p**7*q**7 + 2175*p**4*q**9 + 10500*p*q**11 - 1100*p**8*q**5*r - 27975*p**5*q**7*r - 152950*p**2*q**9*r + 4125*p**9*q**3*r**2 + 128875*p**6*q**5*r**2 + 830525*p**3*q**7*r**2 - 59450*q**9*r**2 - 5400*p**10*q*r**3 - 243800*p**7*q**3*r**3 - 2082650*p**4*q**5*r**3 + 333925*p*q**7*r**3 + 139200*p**8*q*r**4 + 2406000*p**5*q**3*r**4 + 122600*p**2*q**5*r**4 - 1254400*p**6*q*r**5 - 3776000*p**3*q**3*r**5 - 1832000*q**5*r**5 + 4736000*p**4*q*r**6 + 6720000*p*q**3*r**6 - 6400000*p**2*q*r**7 + 900*p**9*q**4*s + 37400*p**6*q**6*s + 281625*p**3*q**8*s + 435000*q**10*s - 6750*p**10*q**2*r*s - 322300*p**7*q**4*r*s - 2718575*p**4*q**6*r*s - 4214250*p*q**8*r*s + 16200*p**11*r**2*s + 859275*p**8*q**2*r**2*s + 8925475*p**5*q**4*r**2*s + 14427875*p**2*q**6*r**2*s - 453600*p**9*r**3*s - 10038400*p**6*q**2*r**3*s - 17397500*p**3*q**4*r**3*s + 11333125*q**6*r**3*s + 4451200*p**7*r**4*s + 15850000*p**4*q**2*r**4*s - 34000000*p*q**4*r**4*s - 17984000*p**5*r**5*s + 10000000*p**2*q**2*r**5*s + 25600000*p**3*r**6*s + 8000000*q**2*r**6*s - 6075*p**11*q*s**2 + 83250*p**8*q**3*s**2 + 1282500*p**5*q**5*s**2 + 2862500*p**2*q**7*s**2 - 724275*p**9*q*r*s**2 - 9807250*p**6*q**3*r*s**2 - 28374375*p**3*q**5*r*s**2 - 22212500*q**7*r*s**2 + 8982000*p**7*q*r**2*s**2 + 39600000*p**4*q**3*r**2*s**2 + 61746875*p*q**5*r**2*s**2 + 1010000*p**5*q*r**3*s**2 + 1000000*p**2*q**3*r**3*s**2 - 78000000*p**3*q*r**4*s**2 - 30000000*q**3*r**4*s**2 - 80000000*p*q*r**5*s**2 + 759375*p**10*s**3 + 9787500*p**7*q**2*s**3 + 39062500*p**4*q**4*s**3 + 52343750*p*q**6*s**3 - 12301875*p**8*r*s**3 - 98175000*p**5*q**2*r*s**3 - 225078125*p**2*q**4*r*s**3 + 54900000*p**6*r**2*s**3 + 310000000*p**3*q**2*r**2*s**3 + 7890625*q**4*r**2*s**3 - 51250000*p**4*r**3*s**3 + 420000000*p*q**2*r**3*s**3 - 110000000*p**2*r**4*s**3 + 200000000*r**5*s**3 - 2109375*p**6*q*s**4 + 21093750*p**3*q**3*s**4 + 89843750*q**5*s**4 - 182343750*p**4*q*r*s**4 - 733203125*p*q**3*r*s**4 + 196875000*p**2*q*r**2*s**4 - 1125000000*q*r**3*s**4 + 158203125*p**5*s**5 + 566406250*p**2*q**2*s**5 - 101562500*p**3*r*s**5 + 1669921875*q**2*r*s**5 - 1250000000*p*r**2*s**5 + 1220703125*p*q*s**6 - 6103515625*s**7
b[1][4] = -1000*p**5*q**7 - 7250*p**2*q**9 + 10800*p**6*q**5*r + 96900*p**3*q**7*r + 52500*q**9*r - 37400*p**7*q**3*r**2 - 470850*p**4*q**5*r**2 - 640600*p*q**7*r**2 + 39600*p**8*q*r**3 + 983600*p**5*q**3*r**3 + 2848100*p**2*q**5*r**3 - 814400*p**6*q*r**4 - 6076000*p**3*q**3*r**4 - 2308000*q**5*r**4 + 5024000*p**4*q*r**5 + 9680000*p*q**3*r**5 - 9600000*p**2*q*r**6 - 13800*p**7*q**4*s - 94650*p**4*q**6*s + 26500*p*q**8*s + 86400*p**8*q**2*r*s + 816500*p**5*q**4*r*s + 257500*p**2*q**6*r*s - 91800*p**9*r**2*s - 1853700*p**6*q**2*r**2*s - 630000*p**3*q**4*r**2*s + 8971250*q**6*r**2*s + 2071200*p**7*r**3*s + 7240000*p**4*q**2*r**3*s - 29375000*p*q**4*r**3*s - 14416000*p**5*r**4*s + 5200000*p**2*q**2*r**4*s + 30400000*p**3*r**5*s + 12000000*q**2*r**5*s - 64800*p**9*q*s**2 - 567000*p**6*q**3*s**2 - 1655000*p**3*q**5*s**2 - 6987500*q**7*s**2 - 337500*p**7*q*r*s**2 - 8462500*p**4*q**3*r*s**2 + 5812500*p*q**5*r*s**2 + 24930000*p**5*q*r**2*s**2 + 69125000*p**2*q**3*r**2*s**2 - 103500000*p**3*q*r**3*s**2 - 30000000*q**3*r**3*s**2 - 90000000*p*q*r**4*s**2 + 708750*p**8*s**3 + 5400000*p**5*q**2*s**3 - 8906250*p**2*q**4*s**3 - 18562500*p**6*r*s**3 + 625000*p**3*q**2*r*s**3 - 29687500*q**4*r*s**3 + 75000000*p**4*r**2*s**3 + 416250000*p*q**2*r**2*s**3 - 60000000*p**2*r**3*s**3 + 300000000*r**4*s**3 - 71718750*p**4*q*s**4 - 189062500*p*q**3*s**4 - 210937500*p**2*q*r*s**4 - 1187500000*q*r**2*s**4 + 187500000*p**3*s**5 + 800781250*q**2*s**5 + 390625000*p*r*s**5
b[1][3] = 500*p**6*q**5 + 6350*p**3*q**7 + 19800*q**9 - 3750*p**7*q**3*r - 65100*p**4*q**5*r - 264950*p*q**7*r + 6750*p**8*q*r**2 + 209050*p**5*q**3*r**2 + 1217250*p**2*q**5*r**2 - 219000*p**6*q*r**3 - 2510000*p**3*q**3*r**3 - 1098500*q**5*r**3 + 2068000*p**4*q*r**4 + 5060000*p*q**3*r**4 - 5200000*p**2*q*r**5 + 6750*p**8*q**2*s + 96350*p**5*q**4*s + 346000*p**2*q**6*s - 20250*p**9*r*s - 459900*p**6*q**2*r*s - 1828750*p**3*q**4*r*s + 2930000*q**6*r*s + 594000*p**7*r**2*s + 4301250*p**4*q**2*r**2*s - 10906250*p*q**4*r**2*s - 5252000*p**5*r**3*s + 1450000*p**2*q**2*r**3*s + 12800000*p**3*r**4*s + 6500000*q**2*r**4*s - 74250*p**7*q*s**2 - 1418750*p**4*q**3*s**2 - 5956250*p*q**5*s**2 + 4297500*p**5*q*r*s**2 + 29906250*p**2*q**3*r*s**2 - 31500000*p**3*q*r**2*s**2 - 12500000*q**3*r**2*s**2 - 35000000*p*q*r**3*s**2 - 1350000*p**6*s**3 - 6093750*p**3*q**2*s**3 - 17500000*q**4*s**3 + 7031250*p**4*r*s**3 + 127812500*p*q**2*r*s**3 - 18750000*p**2*r**2*s**3 + 162500000*r**3*s**3 - 107812500*p**2*q*s**4 - 460937500*q*r*s**4 + 214843750*p*s**5
b[1][2] = -1950*p**4*q**5 - 14100*p*q**7 + 14350*p**5*q**3*r + 125600*p**2*q**5*r - 27900*p**6*q*r**2 - 402250*p**3*q**3*r**2 - 288250*q**5*r**2 + 436000*p**4*q*r**3 + 1345000*p*q**3*r**3 - 1400000*p**2*q*r**4 - 9450*p**6*q**2*s + 1250*p**3*q**4*s + 465000*q**6*s + 49950*p**7*r*s + 302500*p**4*q**2*r*s - 1718750*p*q**4*r*s - 834000*p**5*r**2*s - 437500*p**2*q**2*r**2*s + 3100000*p**3*r**3*s + 1750000*q**2*r**3*s + 292500*p**5*q*s**2 + 1937500*p**2*q**3*s**2 - 3343750*p**3*q*r*s**2 - 1875000*q**3*r*s**2 - 8125000*p*q*r**2*s**2 + 1406250*p**4*s**3 + 12343750*p*q**2*s**3 - 5312500*p**2*r*s**3 + 43750000*r**2*s**3 - 74218750*q*s**4
b[1][1] = 300*p**5*q**3 + 2150*p**2*q**5 - 1350*p**6*q*r - 21500*p**3*q**3*r - 61500*q**5*r + 42000*p**4*q*r**2 + 290000*p*q**3*r**2 - 300000*p**2*q*r**3 + 4050*p**7*s + 45000*p**4*q**2*s + 125000*p*q**4*s - 108000*p**5*r*s - 643750*p**2*q**2*r*s + 700000*p**3*r**2*s + 375000*q**2*r**2*s + 93750*p**3*q*s**2 + 312500*q**3*s**2 - 1875000*p*q*r*s**2 + 1406250*p**2*s**3 + 9375000*r*s**3
b[1][0] = -1250*p**3*q**3 - 9000*q**5 + 4500*p**4*q*r + 46250*p*q**3*r - 50000*p**2*q*r**2 - 6750*p**5*s - 43750*p**2*q**2*s + 75000*p**3*r*s + 62500*q**2*r*s - 156250*p*q*s**2 + 1562500*s**3
b[2][5] = 200*p**6*q**11 - 250*p**3*q**13 - 10800*q**15 - 3900*p**7*q**9*r - 3325*p**4*q**11*r + 181800*p*q**13*r + 26950*p**8*q**7*r**2 + 69625*p**5*q**9*r**2 - 1214450*p**2*q**11*r**2 - 78725*p**9*q**5*r**3 - 368675*p**6*q**7*r**3 + 4166325*p**3*q**9*r**3 + 1131100*q**11*r**3 + 73400*p**10*q**3*r**4 + 661950*p**7*q**5*r**4 - 9151950*p**4*q**7*r**4 - 16633075*p*q**9*r**4 + 36000*p**11*q*r**5 + 135600*p**8*q**3*r**5 + 17321400*p**5*q**5*r**5 + 85338300*p**2*q**7*r**5 - 832000*p**9*q*r**6 - 21379200*p**6*q**3*r**6 - 176044000*p**3*q**5*r**6 - 1410000*q**7*r**6 + 6528000*p**7*q*r**7 + 129664000*p**4*q**3*r**7 + 47344000*p*q**5*r**7 - 21504000*p**5*q*r**8 - 115200000*p**2*q**3*r**8 + 25600000*p**3*q*r**9 + 64000000*q**3*r**9 + 15700*p**8*q**8*s + 120525*p**5*q**10*s + 113250*p**2*q**12*s - 196900*p**9*q**6*r*s - 1776925*p**6*q**8*r*s - 3062475*p**3*q**10*r*s - 4153500*q**12*r*s + 857925*p**10*q**4*r**2*s + 10562775*p**7*q**6*r**2*s + 34866250*p**4*q**8*r**2*s + 73486750*p*q**10*r**2*s - 1333800*p**11*q**2*r**3*s - 29212625*p**8*q**4*r**3*s - 168729675*p**5*q**6*r**3*s - 427230750*p**2*q**8*r**3*s + 108000*p**12*r**4*s + 30384200*p**9*q**2*r**4*s + 324535100*p**6*q**4*r**4*s + 952666750*p**3*q**6*r**4*s - 38076875*q**8*r**4*s - 4296000*p**10*r**5*s - 213606400*p**7*q**2*r**5*s - 842060000*p**4*q**4*r**5*s - 95285000*p*q**6*r**5*s + 61184000*p**8*r**6*s + 567520000*p**5*q**2*r**6*s + 547000000*p**2*q**4*r**6*s - 390912000*p**6*r**7*s - 812800000*p**3*q**2*r**7*s - 924000000*q**4*r**7*s + 1152000000*p**4*r**8*s + 800000000*p*q**2*r**8*s - 1280000000*p**2*r**9*s + 141750*p**10*q**5*s**2 - 31500*p**7*q**7*s**2 - 11325000*p**4*q**9*s**2 - 31687500*p*q**11*s**2 - 1293975*p**11*q**3*r*s**2 - 4803800*p**8*q**5*r*s**2 + 71398250*p**5*q**7*r*s**2 + 227625000*p**2*q**9*r*s**2 + 3256200*p**12*q*r**2*s**2 + 43870125*p**9*q**3*r**2*s**2 + 64581500*p**6*q**5*r**2*s**2 + 56090625*p**3*q**7*r**2*s**2 + 260218750*q**9*r**2*s**2 - 74610000*p**10*q*r**3*s**2 - 662186500*p**7*q**3*r**3*s**2 - 1987747500*p**4*q**5*r**3*s**2 - 811928125*p*q**7*r**3*s**2 + 471286000*p**8*q*r**4*s**2 + 2106040000*p**5*q**3*r**4*s**2 + 792687500*p**2*q**5*r**4*s**2 - 135120000*p**6*q*r**5*s**2 + 2479000000*p**3*q**3*r**5*s**2 + 5242250000*q**5*r**5*s**2 - 6400000000*p**4*q*r**6*s**2 - 8620000000*p*q**3*r**6*s**2 + 13280000000*p**2*q*r**7*s**2 + 1600000000*q*r**8*s**2 + 273375*p**12*q**2*s**3 - 13612500*p**9*q**4*s**3 - 177250000*p**6*q**6*s**3 - 511015625*p**3*q**8*s**3 - 320937500*q**10*s**3 - 2770200*p**13*r*s**3 + 12595500*p**10*q**2*r*s**3 + 543950000*p**7*q**4*r*s**3 + 1612281250*p**4*q**6*r*s**3 + 968125000*p*q**8*r*s**3 + 77031000*p**11*r**2*s**3 + 373218750*p**8*q**2*r**2*s**3 + 1839765625*p**5*q**4*r**2*s**3 + 1818515625*p**2*q**6*r**2*s**3 - 776745000*p**9*r**3*s**3 - 6861075000*p**6*q**2*r**3*s**3 - 20014531250*p**3*q**4*r**3*s**3 - 13747812500*q**6*r**3*s**3 + 3768000000*p**7*r**4*s**3 + 35365000000*p**4*q**2*r**4*s**3 + 34441875000*p*q**4*r**4*s**3 - 9628000000*p**5*r**5*s**3 - 63230000000*p**2*q**2*r**5*s**3 + 13600000000*p**3*r**6*s**3 - 15000000000*q**2*r**6*s**3 - 10400000000*p*r**7*s**3 - 45562500*p**11*q*s**4 - 525937500*p**8*q**3*s**4 - 1364218750*p**5*q**5*s**4 - 1382812500*p**2*q**7*s**4 + 572062500*p**9*q*r*s**4 + 2473515625*p**6*q**3*r*s**4 + 13192187500*p**3*q**5*r*s**4 + 12703125000*q**7*r*s**4 - 451406250*p**7*q*r**2*s**4 - 18153906250*p**4*q**3*r**2*s**4 - 36908203125*p*q**5*r**2*s**4 - 9069375000*p**5*q*r**3*s**4 + 79957812500*p**2*q**3*r**3*s**4 + 5512500000*p**3*q*r**4*s**4 + 50656250000*q**3*r**4*s**4 + 74750000000*p*q*r**5*s**4 + 56953125*p**10*s**5 + 1381640625*p**7*q**2*s**5 - 781250000*p**4*q**4*s**5 + 878906250*p*q**6*s**5 - 2655703125*p**8*r*s**5 - 3223046875*p**5*q**2*r*s**5 - 35117187500*p**2*q**4*r*s**5 + 26573437500*p**6*r**2*s**5 + 14785156250*p**3*q**2*r**2*s**5 - 52050781250*q**4*r**2*s**5 - 103062500000*p**4*r**3*s**5 - 281796875000*p*q**2*r**3*s**5 + 146875000000*p**2*r**4*s**5 - 37500000000*r**5*s**5 - 8789062500*p**6*q*s**6 - 3906250000*p**3*q**3*s**6 + 1464843750*q**5*s**6 + 102929687500*p**4*q*r*s**6 + 297119140625*p*q**3*r*s**6 - 217773437500*p**2*q*r**2*s**6 + 167968750000*q*r**3*s**6 + 10986328125*p**5*s**7 + 98876953125*p**2*q**2*s**7 - 188964843750*p**3*r*s**7 - 278320312500*q**2*r*s**7 + 517578125000*p*r**2*s**7 - 610351562500*p*q*s**8 + 762939453125*s**9
b[2][4] = -200*p**7*q**9 + 1850*p**4*q**11 + 21600*p*q**13 + 3200*p**8*q**7*r - 19200*p**5*q**9*r - 316350*p**2*q**11*r - 19050*p**9*q**5*r**2 + 37400*p**6*q**7*r**2 + 1759250*p**3*q**9*r**2 + 440100*q**11*r**2 + 48750*p**10*q**3*r**3 + 190200*p**7*q**5*r**3 - 4604200*p**4*q**7*r**3 - 6072800*p*q**9*r**3 - 43200*p**11*q*r**4 - 834500*p**8*q**3*r**4 + 4916000*p**5*q**5*r**4 + 27926850*p**2*q**7*r**4 + 969600*p**9*q*r**5 + 2467200*p**6*q**3*r**5 - 45393200*p**3*q**5*r**5 - 5399500*q**7*r**5 - 7283200*p**7*q*r**6 + 10536000*p**4*q**3*r**6 + 41656000*p*q**5*r**6 + 22784000*p**5*q*r**7 - 35200000*p**2*q**3*r**7 - 25600000*p**3*q*r**8 + 96000000*q**3*r**8 - 3000*p**9*q**6*s + 40400*p**6*q**8*s + 136550*p**3*q**10*s - 1647000*q**12*s + 40500*p**10*q**4*r*s - 173600*p**7*q**6*r*s - 126500*p**4*q**8*r*s + 23969250*p*q**10*r*s - 153900*p**11*q**2*r**2*s - 486150*p**8*q**4*r**2*s - 4115800*p**5*q**6*r**2*s - 112653250*p**2*q**8*r**2*s + 129600*p**12*r**3*s + 2683350*p**9*q**2*r**3*s + 10906650*p**6*q**4*r**3*s + 187289500*p**3*q**6*r**3*s + 44098750*q**8*r**3*s - 4384800*p**10*r**4*s - 35660800*p**7*q**2*r**4*s - 175420000*p**4*q**4*r**4*s - 426538750*p*q**6*r**4*s + 60857600*p**8*r**5*s + 349436000*p**5*q**2*r**5*s + 900600000*p**2*q**4*r**5*s - 429568000*p**6*r**6*s - 1511200000*p**3*q**2*r**6*s - 1286000000*q**4*r**6*s + 1472000000*p**4*r**7*s + 1440000000*p*q**2*r**7*s - 1920000000*p**2*r**8*s - 36450*p**11*q**3*s**2 - 188100*p**8*q**5*s**2 - 5504750*p**5*q**7*s**2 - 37968750*p**2*q**9*s**2 + 255150*p**12*q*r*s**2 + 2754000*p**9*q**3*r*s**2 + 49196500*p**6*q**5*r*s**2 + 323587500*p**3*q**7*r*s**2 - 83250000*q**9*r*s**2 - 465750*p**10*q*r**2*s**2 - 31881500*p**7*q**3*r**2*s**2 - 415585000*p**4*q**5*r**2*s**2 + 1054775000*p*q**7*r**2*s**2 - 96823500*p**8*q*r**3*s**2 - 701490000*p**5*q**3*r**3*s**2 - 2953531250*p**2*q**5*r**3*s**2 + 1454560000*p**6*q*r**4*s**2 + 7670500000*p**3*q**3*r**4*s**2 + 5661062500*q**5*r**4*s**2 - 7785000000*p**4*q*r**5*s**2 - 9450000000*p*q**3*r**5*s**2 + 14000000000*p**2*q*r**6*s**2 + 2400000000*q*r**7*s**2 - 437400*p**13*s**3 - 10145250*p**10*q**2*s**3 - 121912500*p**7*q**4*s**3 - 576531250*p**4*q**6*s**3 - 528593750*p*q**8*s**3 + 12939750*p**11*r*s**3 + 313368750*p**8*q**2*r*s**3 + 2171812500*p**5*q**4*r*s**3 + 2381718750*p**2*q**6*r*s**3 - 124638750*p**9*r**2*s**3 - 3001575000*p**6*q**2*r**2*s**3 - 12259375000*p**3*q**4*r**2*s**3 - 9985312500*q**6*r**2*s**3 + 384000000*p**7*r**3*s**3 + 13997500000*p**4*q**2*r**3*s**3 + 20749531250*p*q**4*r**3*s**3 - 553500000*p**5*r**4*s**3 - 41835000000*p**2*q**2*r**4*s**3 + 5420000000*p**3*r**5*s**3 - 16300000000*q**2*r**5*s**3 - 17600000000*p*r**6*s**3 - 7593750*p**9*q*s**4 + 289218750*p**6*q**3*s**4 + 3591406250*p**3*q**5*s**4 + 5992187500*q**7*s**4 + 658125000*p**7*q*r*s**4 - 269531250*p**4*q**3*r*s**4 - 15882812500*p*q**5*r*s**4 - 4785000000*p**5*q*r**2*s**4 + 54375781250*p**2*q**3*r**2*s**4 - 5668750000*p**3*q*r**3*s**4 + 35867187500*q**3*r**3*s**4 + 113875000000*p*q*r**4*s**4 - 544218750*p**8*s**5 - 5407031250*p**5*q**2*s**5 - 14277343750*p**2*q**4*s**5 + 5421093750*p**6*r*s**5 - 24941406250*p**3*q**2*r*s**5 - 25488281250*q**4*r*s**5 - 11500000000*p**4*r**2*s**5 - 231894531250*p*q**2*r**2*s**5 - 6250000000*p**2*r**3*s**5 - 43750000000*r**4*s**5 + 35449218750*p**4*q*s**6 + 137695312500*p*q**3*s**6 + 34667968750*p**2*q*r*s**6 + 202148437500*q*r**2*s**6 - 33691406250*p**3*s**7 - 214843750000*q**2*s**7 - 31738281250*p*r*s**7
b[2][3] = -800*p**5*q**9 - 5400*p**2*q**11 + 5800*p**6*q**7*r + 48750*p**3*q**9*r + 16200*q**11*r - 3000*p**7*q**5*r**2 - 108350*p**4*q**7*r**2 - 263250*p*q**9*r**2 - 60700*p**8*q**3*r**3 - 386250*p**5*q**5*r**3 + 253100*p**2*q**7*r**3 + 127800*p**9*q*r**4 + 2326700*p**6*q**3*r**4 + 6565550*p**3*q**5*r**4 - 705750*q**7*r**4 - 2903200*p**7*q*r**5 - 21218000*p**4*q**3*r**5 + 1057000*p*q**5*r**5 + 20368000*p**5*q*r**6 + 33000000*p**2*q**3*r**6 - 43200000*p**3*q*r**7 + 52000000*q**3*r**7 + 6200*p**7*q**6*s + 188250*p**4*q**8*s + 931500*p*q**10*s - 73800*p**8*q**4*r*s - 1466850*p**5*q**6*r*s - 6894000*p**2*q**8*r*s + 315900*p**9*q**2*r**2*s + 4547000*p**6*q**4*r**2*s + 20362500*p**3*q**6*r**2*s + 15018750*q**8*r**2*s - 653400*p**10*r**3*s - 13897550*p**7*q**2*r**3*s - 76757500*p**4*q**4*r**3*s - 124207500*p*q**6*r**3*s + 18567600*p**8*r**4*s + 175911000*p**5*q**2*r**4*s + 253787500*p**2*q**4*r**4*s - 183816000*p**6*r**5*s - 706900000*p**3*q**2*r**5*s - 665750000*q**4*r**5*s + 740000000*p**4*r**6*s + 890000000*p*q**2*r**6*s - 1040000000*p**2*r**7*s - 763000*p**6*q**5*s**2 - 12375000*p**3*q**7*s**2 - 40500000*q**9*s**2 + 364500*p**10*q*r*s**2 + 15537000*p**7*q**3*r*s**2 + 154392500*p**4*q**5*r*s**2 + 372206250*p*q**7*r*s**2 - 25481250*p**8*q*r**2*s**2 - 386300000*p**5*q**3*r**2*s**2 - 996343750*p**2*q**5*r**2*s**2 + 459872500*p**6*q*r**3*s**2 + 2943937500*p**3*q**3*r**3*s**2 + 2437781250*q**5*r**3*s**2 - 2883750000*p**4*q*r**4*s**2 - 4343750000*p*q**3*r**4*s**2 + 5495000000*p**2*q*r**5*s**2 + 1300000000*q*r**6*s**2 - 364500*p**11*s**3 - 13668750*p**8*q**2*s**3 - 113406250*p**5*q**4*s**3 - 159062500*p**2*q**6*s**3 + 13972500*p**9*r*s**3 + 61537500*p**6*q**2*r*s**3 - 1622656250*p**3*q**4*r*s**3 - 2720625000*q**6*r*s**3 - 201656250*p**7*r**2*s**3 + 1949687500*p**4*q**2*r**2*s**3 + 4979687500*p*q**4*r**2*s**3 + 497125000*p**5*r**3*s**3 - 11150625000*p**2*q**2*r**3*s**3 + 2982500000*p**3*r**4*s**3 - 6612500000*q**2*r**4*s**3 - 10450000000*p*r**5*s**3 + 126562500*p**7*q*s**4 + 1443750000*p**4*q**3*s**4 + 281250000*p*q**5*s**4 - 1648125000*p**5*q*r*s**4 + 11271093750*p**2*q**3*r*s**4 - 4785156250*p**3*q*r**2*s**4 + 8808593750*q**3*r**2*s**4 + 52390625000*p*q*r**3*s**4 - 611718750*p**6*s**5 - 13027343750*p**3*q**2*s**5 - 1464843750*q**4*s**5 + 6492187500*p**4*r*s**5 - 65351562500*p*q**2*r*s**5 - 13476562500*p**2*r**2*s**5 - 24218750000*r**3*s**5 + 41992187500*p**2*q*s**6 + 69824218750*q*r*s**6 - 34179687500*p*s**7
b[2][2] = -1000*p**6*q**7 - 5150*p**3*q**9 + 10800*q**11 + 11000*p**7*q**5*r + 66450*p**4*q**7*r - 127800*p*q**9*r - 41250*p**8*q**3*r**2 - 368400*p**5*q**5*r**2 + 204200*p**2*q**7*r**2 + 54000*p**9*q*r**3 + 1040950*p**6*q**3*r**3 + 2096500*p**3*q**5*r**3 + 200000*q**7*r**3 - 1140000*p**7*q*r**4 - 7691000*p**4*q**3*r**4 - 2281000*p*q**5*r**4 + 7296000*p**5*q*r**5 + 13300000*p**2*q**3*r**5 - 14400000*p**3*q*r**6 + 14000000*q**3*r**6 - 9000*p**8*q**4*s + 52100*p**5*q**6*s + 710250*p**2*q**8*s + 67500*p**9*q**2*r*s - 256100*p**6*q**4*r*s - 5753000*p**3*q**6*r*s + 292500*q**8*r*s - 162000*p**10*r**2*s - 1432350*p**7*q**2*r**2*s + 5410000*p**4*q**4*r**2*s - 7408750*p*q**6*r**2*s + 4401000*p**8*r**3*s + 24185000*p**5*q**2*r**3*s + 20781250*p**2*q**4*r**3*s - 43012000*p**6*r**4*s - 146300000*p**3*q**2*r**4*s - 165875000*q**4*r**4*s + 182000000*p**4*r**5*s + 250000000*p*q**2*r**5*s - 280000000*p**2*r**6*s + 60750*p**10*q*s**2 + 2414250*p**7*q**3*s**2 + 15770000*p**4*q**5*s**2 + 15825000*p*q**7*s**2 - 6021000*p**8*q*r*s**2 - 62252500*p**5*q**3*r*s**2 - 74718750*p**2*q**5*r*s**2 + 90888750*p**6*q*r**2*s**2 + 471312500*p**3*q**3*r**2*s**2 + 525875000*q**5*r**2*s**2 - 539375000*p**4*q*r**3*s**2 - 1030000000*p*q**3*r**3*s**2 + 1142500000*p**2*q*r**4*s**2 + 350000000*q*r**5*s**2 - 303750*p**9*s**3 - 35943750*p**6*q**2*s**3 - 331875000*p**3*q**4*s**3 - 505937500*q**6*s**3 + 8437500*p**7*r*s**3 + 530781250*p**4*q**2*r*s**3 + 1150312500*p*q**4*r*s**3 - 154500000*p**5*r**2*s**3 - 2059062500*p**2*q**2*r**2*s**3 + 1150000000*p**3*r**3*s**3 - 1343750000*q**2*r**3*s**3 - 2900000000*p*r**4*s**3 + 30937500*p**5*q*s**4 + 1166406250*p**2*q**3*s**4 - 1496875000*p**3*q*r*s**4 + 1296875000*q**3*r*s**4 + 10640625000*p*q*r**2*s**4 - 281250000*p**4*s**5 - 9746093750*p*q**2*s**5 + 1269531250*p**2*r*s**5 - 7421875000*r**2*s**5 + 15625000000*q*s**6
b[2][1] = -1600*p**4*q**7 - 10800*p*q**9 + 9800*p**5*q**5*r + 80550*p**2*q**7*r - 4600*p**6*q**3*r**2 - 112700*p**3*q**5*r**2 + 40500*q**7*r**2 - 34200*p**7*q*r**3 - 279500*p**4*q**3*r**3 - 665750*p*q**5*r**3 + 632000*p**5*q*r**4 + 3200000*p**2*q**3*r**4 - 2800000*p**3*q*r**5 + 3000000*q**3*r**5 - 18600*p**6*q**4*s - 51750*p**3*q**6*s + 405000*q**8*s + 21600*p**7*q**2*r*s - 122500*p**4*q**4*r*s - 2891250*p*q**6*r*s + 156600*p**8*r**2*s + 1569750*p**5*q**2*r**2*s + 6943750*p**2*q**4*r**2*s - 3774000*p**6*r**3*s - 27100000*p**3*q**2*r**3*s - 30187500*q**4*r**3*s + 28000000*p**4*r**4*s + 52500000*p*q**2*r**4*s - 60000000*p**2*r**5*s - 81000*p**8*q*s**2 - 240000*p**5*q**3*s**2 + 937500*p**2*q**5*s**2 + 3273750*p**6*q*r*s**2 + 30406250*p**3*q**3*r*s**2 + 55687500*q**5*r*s**2 - 42187500*p**4*q*r**2*s**2 - 112812500*p*q**3*r**2*s**2 + 152500000*p**2*q*r**3*s**2 + 75000000*q*r**4*s**2 - 4218750*p**4*q**2*s**3 + 15156250*p*q**4*s**3 + 5906250*p**5*r*s**3 - 206562500*p**2*q**2*r*s**3 + 107500000*p**3*r**2*s**3 - 159375000*q**2*r**2*s**3 - 612500000*p*r**3*s**3 + 135937500*p**3*q*s**4 + 46875000*q**3*s**4 + 1175781250*p*q*r*s**4 - 292968750*p**2*s**5 - 1367187500*r*s**5
b[2][0] = -800*p**5*q**5 - 5400*p**2*q**7 + 6000*p**6*q**3*r + 51700*p**3*q**5*r + 27000*q**7*r - 10800*p**7*q*r**2 - 163250*p**4*q**3*r**2 - 285750*p*q**5*r**2 + 192000*p**5*q*r**3 + 1000000*p**2*q**3*r**3 - 800000*p**3*q*r**4 + 500000*q**3*r**4 - 10800*p**7*q**2*s - 57500*p**4*q**4*s + 67500*p*q**6*s + 32400*p**8*r*s + 279000*p**5*q**2*r*s - 131250*p**2*q**4*r*s - 729000*p**6*r**2*s - 4100000*p**3*q**2*r**2*s - 5343750*q**4*r**2*s + 5000000*p**4*r**3*s + 10000000*p*q**2*r**3*s - 10000000*p**2*r**4*s + 641250*p**6*q*s**2 + 5812500*p**3*q**3*s**2 + 10125000*q**5*s**2 - 7031250*p**4*q*r*s**2 - 20625000*p*q**3*r*s**2 + 17500000*p**2*q*r**2*s**2 + 12500000*q*r**3*s**2 - 843750*p**5*s**3 - 19375000*p**2*q**2*s**3 + 30000000*p**3*r*s**3 - 20312500*q**2*r*s**3 - 112500000*p*r**2*s**3 + 183593750*p*q*s**4 - 292968750*s**5
b[3][5] = 500*p**11*q**6 + 9875*p**8*q**8 + 42625*p**5*q**10 - 35000*p**2*q**12 - 4500*p**12*q**4*r - 108375*p**9*q**6*r - 516750*p**6*q**8*r + 1110500*p**3*q**10*r + 2730000*q**12*r + 10125*p**13*q**2*r**2 + 358250*p**10*q**4*r**2 + 1908625*p**7*q**6*r**2 - 11744250*p**4*q**8*r**2 - 43383250*p*q**10*r**2 - 313875*p**11*q**2*r**3 - 2074875*p**8*q**4*r**3 + 52094750*p**5*q**6*r**3 + 264567500*p**2*q**8*r**3 + 796125*p**9*q**2*r**4 - 92486250*p**6*q**4*r**4 - 757957500*p**3*q**6*r**4 - 29354375*q**8*r**4 + 60970000*p**7*q**2*r**5 + 1112462500*p**4*q**4*r**5 + 571094375*p*q**6*r**5 - 685290000*p**5*q**2*r**6 - 2037800000*p**2*q**4*r**6 + 2279600000*p**3*q**2*r**7 + 849000000*q**4*r**7 - 1480000000*p*q**2*r**8 + 13500*p**13*q**3*s + 363000*p**10*q**5*s + 2861250*p**7*q**7*s + 8493750*p**4*q**9*s + 17031250*p*q**11*s - 60750*p**14*q*r*s - 2319750*p**11*q**3*r*s - 22674250*p**8*q**5*r*s - 74368750*p**5*q**7*r*s - 170578125*p**2*q**9*r*s + 2760750*p**12*q*r**2*s + 46719000*p**9*q**3*r**2*s + 163356375*p**6*q**5*r**2*s + 360295625*p**3*q**7*r**2*s - 195990625*q**9*r**2*s - 37341750*p**10*q*r**3*s - 194739375*p**7*q**3*r**3*s - 105463125*p**4*q**5*r**3*s - 415825000*p*q**7*r**3*s + 90180000*p**8*q*r**4*s - 990552500*p**5*q**3*r**4*s + 3519212500*p**2*q**5*r**4*s + 1112220000*p**6*q*r**5*s - 4508750000*p**3*q**3*r**5*s - 8159500000*q**5*r**5*s - 4356000000*p**4*q*r**6*s + 14615000000*p*q**3*r**6*s - 2160000000*p**2*q*r**7*s + 91125*p**15*s**2 + 3290625*p**12*q**2*s**2 + 35100000*p**9*q**4*s**2 + 175406250*p**6*q**6*s**2 + 629062500*p**3*q**8*s**2 + 910937500*q**10*s**2 - 5710500*p**13*r*s**2 - 100423125*p**10*q**2*r*s**2 - 604743750*p**7*q**4*r*s**2 - 2954843750*p**4*q**6*r*s**2 - 4587578125*p*q**8*r*s**2 + 116194500*p**11*r**2*s**2 + 1280716250*p**8*q**2*r**2*s**2 + 7401190625*p**5*q**4*r**2*s**2 + 11619937500*p**2*q**6*r**2*s**2 - 952173125*p**9*r**3*s**2 - 6519712500*p**6*q**2*r**3*s**2 - 10238593750*p**3*q**4*r**3*s**2 + 29984609375*q**6*r**3*s**2 + 2558300000*p**7*r**4*s**2 + 16225000000*p**4*q**2*r**4*s**2 - 64994140625*p*q**4*r**4*s**2 + 4202250000*p**5*r**5*s**2 + 46925000000*p**2*q**2*r**5*s**2 - 28950000000*p**3*r**6*s**2 - 1000000000*q**2*r**6*s**2 + 37000000000*p*r**7*s**2 - 48093750*p**11*q*s**3 - 673359375*p**8*q**3*s**3 - 2170312500*p**5*q**5*s**3 - 2466796875*p**2*q**7*s**3 + 647578125*p**9*q*r*s**3 + 597031250*p**6*q**3*r*s**3 - 7542578125*p**3*q**5*r*s**3 - 41125000000*q**7*r*s**3 - 2175828125*p**7*q*r**2*s**3 - 7101562500*p**4*q**3*r**2*s**3 + 100596875000*p*q**5*r**2*s**3 - 8984687500*p**5*q*r**3*s**3 - 120070312500*p**2*q**3*r**3*s**3 + 57343750000*p**3*q*r**4*s**3 + 9500000000*q**3*r**4*s**3 - 342875000000*p*q*r**5*s**3 + 400781250*p**10*s**4 + 8531250000*p**7*q**2*s**4 + 34033203125*p**4*q**4*s**4 + 42724609375*p*q**6*s**4 - 6289453125*p**8*r*s**4 - 24037109375*p**5*q**2*r*s**4 - 62626953125*p**2*q**4*r*s**4 + 17299218750*p**6*r**2*s**4 + 108357421875*p**3*q**2*r**2*s**4 - 55380859375*q**4*r**2*s**4 + 105648437500*p**4*r**3*s**4 + 1204228515625*p*q**2*r**3*s**4 - 365000000000*p**2*r**4*s**4 + 184375000000*r**5*s**4 - 32080078125*p**6*q*s**5 - 98144531250*p**3*q**3*s**5 + 93994140625*q**5*s**5 - 178955078125*p**4*q*r*s**5 - 1299804687500*p*q**3*r*s**5 + 332421875000*p**2*q*r**2*s**5 - 1195312500000*q*r**3*s**5 + 72021484375*p**5*s**6 + 323486328125*p**2*q**2*s**6 + 682373046875*p**3*r*s**6 + 2447509765625*q**2*r*s**6 - 3011474609375*p*r**2*s**6 + 3051757812500*p*q*s**7 - 7629394531250*s**8
b[3][4] = 1500*p**9*q**6 + 69625*p**6*q**8 + 590375*p**3*q**10 + 1035000*q**12 - 13500*p**10*q**4*r - 760625*p**7*q**6*r - 7904500*p**4*q**8*r - 18169250*p*q**10*r + 30375*p**11*q**2*r**2 + 2628625*p**8*q**4*r**2 + 37879000*p**5*q**6*r**2 + 121367500*p**2*q**8*r**2 - 2699250*p**9*q**2*r**3 - 76776875*p**6*q**4*r**3 - 403583125*p**3*q**6*r**3 - 78865625*q**8*r**3 + 60907500*p**7*q**2*r**4 + 735291250*p**4*q**4*r**4 + 781142500*p*q**6*r**4 - 558270000*p**5*q**2*r**5 - 2150725000*p**2*q**4*r**5 + 2015400000*p**3*q**2*r**6 + 1181000000*q**4*r**6 - 2220000000*p*q**2*r**7 + 40500*p**11*q**3*s + 1376500*p**8*q**5*s + 9953125*p**5*q**7*s + 9765625*p**2*q**9*s - 182250*p**12*q*r*s - 8859000*p**9*q**3*r*s - 82854500*p**6*q**5*r*s - 71511250*p**3*q**7*r*s + 273631250*q**9*r*s + 10233000*p**10*q*r**2*s + 179627500*p**7*q**3*r**2*s + 25164375*p**4*q**5*r**2*s - 2927290625*p*q**7*r**2*s - 171305000*p**8*q*r**3*s - 544768750*p**5*q**3*r**3*s + 7583437500*p**2*q**5*r**3*s + 1139860000*p**6*q*r**4*s - 6489375000*p**3*q**3*r**4*s - 9625375000*q**5*r**4*s - 1838000000*p**4*q*r**5*s + 19835000000*p*q**3*r**5*s - 3240000000*p**2*q*r**6*s + 273375*p**13*s**2 + 9753750*p**10*q**2*s**2 + 82575000*p**7*q**4*s**2 + 202265625*p**4*q**6*s**2 + 556093750*p*q**8*s**2 - 11552625*p**11*r*s**2 - 115813125*p**8*q**2*r*s**2 + 630590625*p**5*q**4*r*s**2 + 1347015625*p**2*q**6*r*s**2 + 157578750*p**9*r**2*s**2 - 689206250*p**6*q**2*r**2*s**2 - 4299609375*p**3*q**4*r**2*s**2 + 23896171875*q**6*r**2*s**2 - 1022437500*p**7*r**3*s**2 + 6648125000*p**4*q**2*r**3*s**2 - 52895312500*p*q**4*r**3*s**2 + 4401750000*p**5*r**4*s**2 + 26500000000*p**2*q**2*r**4*s**2 - 22125000000*p**3*r**5*s**2 - 1500000000*q**2*r**5*s**2 + 55500000000*p*r**6*s**2 - 137109375*p**9*q*s**3 - 1955937500*p**6*q**3*s**3 - 6790234375*p**3*q**5*s**3 - 16996093750*q**7*s**3 + 2146218750*p**7*q*r*s**3 + 6570312500*p**4*q**3*r*s**3 + 39918750000*p*q**5*r*s**3 - 7673281250*p**5*q*r**2*s**3 - 52000000000*p**2*q**3*r**2*s**3 + 50796875000*p**3*q*r**3*s**3 + 18750000000*q**3*r**3*s**3 - 399875000000*p*q*r**4*s**3 + 780468750*p**8*s**4 + 14455078125*p**5*q**2*s**4 + 10048828125*p**2*q**4*s**4 - 15113671875*p**6*r*s**4 + 39298828125*p**3*q**2*r*s**4 - 52138671875*q**4*r*s**4 + 45964843750*p**4*r**2*s**4 + 914414062500*p*q**2*r**2*s**4 + 1953125000*p**2*r**3*s**4 + 334375000000*r**4*s**4 - 149169921875*p**4*q*s**5 - 459716796875*p*q**3*s**5 - 325585937500*p**2*q*r*s**5 - 1462890625000*q*r**2*s**5 + 296630859375*p**3*s**6 + 1324462890625*q**2*s**6 + 307617187500*p*r*s**6
b[3][3] = -20750*p**7*q**6 - 290125*p**4*q**8 - 993000*p*q**10 + 146125*p**8*q**4*r + 2721500*p**5*q**6*r + 11833750*p**2*q**8*r - 237375*p**9*q**2*r**2 - 8167500*p**6*q**4*r**2 - 54605625*p**3*q**6*r**2 - 23802500*q**8*r**2 + 8927500*p**7*q**2*r**3 + 131184375*p**4*q**4*r**3 + 254695000*p*q**6*r**3 - 121561250*p**5*q**2*r**4 - 728003125*p**2*q**4*r**4 + 702550000*p**3*q**2*r**5 + 597312500*q**4*r**5 - 1202500000*p*q**2*r**6 - 194625*p**9*q**3*s - 1568875*p**6*q**5*s + 9685625*p**3*q**7*s + 74662500*q**9*s + 327375*p**10*q*r*s + 1280000*p**7*q**3*r*s - 123703750*p**4*q**5*r*s - 850121875*p*q**7*r*s - 7436250*p**8*q*r**2*s + 164820000*p**5*q**3*r**2*s + 2336659375*p**2*q**5*r**2*s + 32202500*p**6*q*r**3*s - 2429765625*p**3*q**3*r**3*s - 4318609375*q**5*r**3*s + 148000000*p**4*q*r**4*s + 9902812500*p*q**3*r**4*s - 1755000000*p**2*q*r**5*s + 1154250*p**11*s**2 + 36821250*p**8*q**2*s**2 + 372825000*p**5*q**4*s**2 + 1170921875*p**2*q**6*s**2 - 38913750*p**9*r*s**2 - 797071875*p**6*q**2*r*s**2 - 2848984375*p**3*q**4*r*s**2 + 7651406250*q**6*r*s**2 + 415068750*p**7*r**2*s**2 + 3151328125*p**4*q**2*r**2*s**2 - 17696875000*p*q**4*r**2*s**2 - 725968750*p**5*r**3*s**2 + 5295312500*p**2*q**2*r**3*s**2 - 8581250000*p**3*r**4*s**2 - 812500000*q**2*r**4*s**2 + 30062500000*p*r**5*s**2 - 110109375*p**7*q*s**3 - 1976562500*p**4*q**3*s**3 - 6329296875*p*q**5*s**3 + 2256328125*p**5*q*r*s**3 + 8554687500*p**2*q**3*r*s**3 + 12947265625*p**3*q*r**2*s**3 + 7984375000*q**3*r**2*s**3 - 167039062500*p*q*r**3*s**3 + 1181250000*p**6*s**4 + 17873046875*p**3*q**2*s**4 - 20449218750*q**4*s**4 - 16265625000*p**4*r*s**4 + 260869140625*p*q**2*r*s**4 + 21025390625*p**2*r**2*s**4 + 207617187500*r**3*s**4 - 207177734375*p**2*q*s**5 - 615478515625*q*r*s**5 + 301513671875*p*s**6
b[3][2] = 53125*p**5*q**6 + 425000*p**2*q**8 - 394375*p**6*q**4*r - 4301875*p**3*q**6*r - 3225000*q**8*r + 851250*p**7*q**2*r**2 + 16910625*p**4*q**4*r**2 + 44210000*p*q**6*r**2 - 20474375*p**5*q**2*r**3 - 147190625*p**2*q**4*r**3 + 163975000*p**3*q**2*r**4 + 156812500*q**4*r**4 - 323750000*p*q**2*r**5 - 99375*p**7*q**3*s - 6395000*p**4*q**5*s - 49243750*p*q**7*s - 1164375*p**8*q*r*s + 4465625*p**5*q**3*r*s + 205546875*p**2*q**5*r*s + 12163750*p**6*q*r**2*s - 315546875*p**3*q**3*r**2*s - 946453125*q**5*r**2*s - 23500000*p**4*q*r**3*s + 2313437500*p*q**3*r**3*s - 472500000*p**2*q*r**4*s + 1316250*p**9*s**2 + 22715625*p**6*q**2*s**2 + 206953125*p**3*q**4*s**2 + 1220000000*q**6*s**2 - 20953125*p**7*r*s**2 - 277656250*p**4*q**2*r*s**2 - 3317187500*p*q**4*r*s**2 + 293734375*p**5*r**2*s**2 + 1351562500*p**2*q**2*r**2*s**2 - 2278125000*p**3*r**3*s**2 - 218750000*q**2*r**3*s**2 + 8093750000*p*r**4*s**2 - 9609375*p**5*q*s**3 + 240234375*p**2*q**3*s**3 + 2310546875*p**3*q*r*s**3 + 1171875000*q**3*r*s**3 - 33460937500*p*q*r**2*s**3 + 2185546875*p**4*s**4 + 32578125000*p*q**2*s**4 - 8544921875*p**2*r*s**4 + 58398437500*r**2*s**4 - 114013671875*q*s**5
b[3][1] = -16250*p**6*q**4 - 191875*p**3*q**6 - 495000*q**8 + 73125*p**7*q**2*r + 1437500*p**4*q**4*r + 5866250*p*q**6*r - 2043125*p**5*q**2*r**2 - 17218750*p**2*q**4*r**2 + 19106250*p**3*q**2*r**3 + 34015625*q**4*r**3 - 69375000*p*q**2*r**4 - 219375*p**8*q*s - 2846250*p**5*q**3*s - 8021875*p**2*q**5*s + 3420000*p**6*q*r*s - 1640625*p**3*q**3*r*s - 152468750*q**5*r*s + 3062500*p**4*q*r**2*s + 381171875*p*q**3*r**2*s - 101250000*p**2*q*r**3*s + 2784375*p**7*s**2 + 43515625*p**4*q**2*s**2 + 115625000*p*q**4*s**2 - 48140625*p**5*r*s**2 - 307421875*p**2*q**2*r*s**2 - 25781250*p**3*r**2*s**2 - 46875000*q**2*r**2*s**2 + 1734375000*p*r**3*s**2 - 128906250*p**3*q*s**3 + 339843750*q**3*s**3 - 4583984375*p*q*r*s**3 + 2236328125*p**2*s**4 + 12255859375*r*s**4
b[3][0] = 31875*p**4*q**4 + 255000*p*q**6 - 82500*p**5*q**2*r - 1106250*p**2*q**4*r + 1653125*p**3*q**2*r**2 + 5187500*q**4*r**2 - 11562500*p*q**2*r**3 - 118125*p**6*q*s - 3593750*p**3*q**3*s - 23812500*q**5*s + 4656250*p**4*q*r*s + 67109375*p*q**3*r*s - 16875000*p**2*q*r**2*s - 984375*p**5*s**2 - 19531250*p**2*q**2*s**2 - 37890625*p**3*r*s**2 - 7812500*q**2*r*s**2 + 289062500*p*r**2*s**2 - 529296875*p*q*s**3 + 2343750000*s**4
b[4][5] = 600*p**10*q**10 + 13850*p**7*q**12 + 106150*p**4*q**14 + 270000*p*q**16 - 9300*p**11*q**8*r - 234075*p**8*q**10*r - 1942825*p**5*q**12*r - 5319900*p**2*q**14*r + 52050*p**12*q**6*r**2 + 1481025*p**9*q**8*r**2 + 13594450*p**6*q**10*r**2 + 40062750*p**3*q**12*r**2 - 3569400*q**14*r**2 - 122175*p**13*q**4*r**3 - 4260350*p**10*q**6*r**3 - 45052375*p**7*q**8*r**3 - 142634900*p**4*q**10*r**3 + 54186350*p*q**12*r**3 + 97200*p**14*q**2*r**4 + 5284225*p**11*q**4*r**4 + 70389525*p**8*q**6*r**4 + 232732850*p**5*q**8*r**4 - 318849400*p**2*q**10*r**4 - 2046000*p**12*q**2*r**5 - 43874125*p**9*q**4*r**5 - 107411850*p**6*q**6*r**5 + 948310700*p**3*q**8*r**5 - 34763575*q**10*r**5 + 5915600*p**10*q**2*r**6 - 115887800*p**7*q**4*r**6 - 1649542400*p**4*q**6*r**6 + 224468875*p*q**8*r**6 + 120252800*p**8*q**2*r**7 + 1779902000*p**5*q**4*r**7 - 288250000*p**2*q**6*r**7 - 915200000*p**6*q**2*r**8 - 1164000000*p**3*q**4*r**8 - 444200000*q**6*r**8 + 2502400000*p**4*q**2*r**9 + 1984000000*p*q**4*r**9 - 2880000000*p**2*q**2*r**10 + 20700*p**12*q**7*s + 551475*p**9*q**9*s + 5194875*p**6*q**11*s + 18985000*p**3*q**13*s + 16875000*q**15*s - 218700*p**13*q**5*r*s - 6606475*p**10*q**7*r*s - 69770850*p**7*q**9*r*s - 285325500*p**4*q**11*r*s - 292005000*p*q**13*r*s + 694575*p**14*q**3*r**2*s + 26187750*p**11*q**5*r**2*s + 328992825*p**8*q**7*r**2*s + 1573292400*p**5*q**9*r**2*s + 1930043875*p**2*q**11*r**2*s - 583200*p**15*q*r**3*s - 37263225*p**12*q**3*r**3*s - 638579425*p**9*q**5*r**3*s - 3920212225*p**6*q**7*r**3*s - 6327336875*p**3*q**9*r**3*s + 440969375*q**11*r**3*s + 13446000*p**13*q*r**4*s + 462330325*p**10*q**3*r**4*s + 4509088275*p**7*q**5*r**4*s + 11709795625*p**4*q**7*r**4*s - 3579565625*p*q**9*r**4*s - 85033600*p**11*q*r**5*s - 2136801600*p**8*q**3*r**5*s - 12221575800*p**5*q**5*r**5*s + 9431044375*p**2*q**7*r**5*s + 10643200*p**9*q*r**6*s + 4565594000*p**6*q**3*r**6*s - 1778590000*p**3*q**5*r**6*s + 4842175000*q**7*r**6*s + 712320000*p**7*q*r**7*s - 16182000000*p**4*q**3*r**7*s - 21918000000*p*q**5*r**7*s - 742400000*p**5*q*r**8*s + 31040000000*p**2*q**3*r**8*s + 1280000000*p**3*q*r**9*s + 4800000000*q**3*r**9*s + 230850*p**14*q**4*s**2 + 7373250*p**11*q**6*s**2 + 85045625*p**8*q**8*s**2 + 399140625*p**5*q**10*s**2 + 565031250*p**2*q**12*s**2 - 1257525*p**15*q**2*r*s**2 - 52728975*p**12*q**4*r*s**2 - 743466375*p**9*q**6*r*s**2 - 4144915000*p**6*q**8*r*s**2 - 7102690625*p**3*q**10*r*s**2 - 1389937500*q**12*r*s**2 + 874800*p**16*r**2*s**2 + 89851275*p**13*q**2*r**2*s**2 + 1897236775*p**10*q**4*r**2*s**2 + 14144163000*p**7*q**6*r**2*s**2 + 31942921875*p**4*q**8*r**2*s**2 + 13305118750*p*q**10*r**2*s**2 - 23004000*p**14*r**3*s**2 - 1450715475*p**11*q**2*r**3*s**2 - 19427105000*p**8*q**4*r**3*s**2 - 70634028750*p**5*q**6*r**3*s**2 - 47854218750*p**2*q**8*r**3*s**2 + 204710400*p**12*r**4*s**2 + 10875135000*p**9*q**2*r**4*s**2 + 83618806250*p**6*q**4*r**4*s**2 + 62744500000*p**3*q**6*r**4*s**2 - 19806718750*q**8*r**4*s**2 - 757094800*p**10*r**5*s**2 - 37718030000*p**7*q**2*r**5*s**2 - 22479500000*p**4*q**4*r**5*s**2 + 91556093750*p*q**6*r**5*s**2 + 2306320000*p**8*r**6*s**2 + 55539600000*p**5*q**2*r**6*s**2 - 112851250000*p**2*q**4*r**6*s**2 - 10720000000*p**6*r**7*s**2 - 64720000000*p**3*q**2*r**7*s**2 - 59925000000*q**4*r**7*s**2 + 28000000000*p**4*r**8*s**2 + 28000000000*p*q**2*r**8*s**2 - 24000000000*p**2*r**9*s**2 + 820125*p**16*q*s**3 + 36804375*p**13*q**3*s**3 + 552225000*p**10*q**5*s**3 + 3357593750*p**7*q**7*s**3 + 7146562500*p**4*q**9*s**3 + 3851562500*p*q**11*s**3 - 92400750*p**14*q*r*s**3 - 2350175625*p**11*q**3*r*s**3 - 19470640625*p**8*q**5*r*s**3 - 52820593750*p**5*q**7*r*s**3 - 45447734375*p**2*q**9*r*s**3 + 1824363000*p**12*q*r**2*s**3 + 31435234375*p**9*q**3*r**2*s**3 + 141717537500*p**6*q**5*r**2*s**3 + 228370781250*p**3*q**7*r**2*s**3 + 34610078125*q**9*r**2*s**3 - 17591825625*p**10*q*r**3*s**3 - 188927187500*p**7*q**3*r**3*s**3 - 502088984375*p**4*q**5*r**3*s**3 - 187849296875*p*q**7*r**3*s**3 + 75577750000*p**8*q*r**4*s**3 + 342800000000*p**5*q**3*r**4*s**3 + 295384296875*p**2*q**5*r**4*s**3 - 107681250000*p**6*q*r**5*s**3 + 53330000000*p**3*q**3*r**5*s**3 + 271586875000*q**5*r**5*s**3 - 26410000000*p**4*q*r**6*s**3 - 188200000000*p*q**3*r**6*s**3 + 92000000000*p**2*q*r**7*s**3 + 120000000000*q*r**8*s**3 + 47840625*p**15*s**4 + 1150453125*p**12*q**2*s**4 + 9229453125*p**9*q**4*s**4 + 24954687500*p**6*q**6*s**4 + 22978515625*p**3*q**8*s**4 + 1367187500*q**10*s**4 - 1193737500*p**13*r*s**4 - 20817843750*p**10*q**2*r*s**4 - 98640000000*p**7*q**4*r*s**4 - 225767187500*p**4*q**6*r*s**4 - 74707031250*p*q**8*r*s**4 + 13431318750*p**11*r**2*s**4 + 188709843750*p**8*q**2*r**2*s**4 + 875157656250*p**5*q**4*r**2*s**4 + 593812890625*p**2*q**6*r**2*s**4 - 69869296875*p**9*r**3*s**4 - 854811093750*p**6*q**2*r**3*s**4 - 1730658203125*p**3*q**4*r**3*s**4 - 570867187500*q**6*r**3*s**4 + 162075625000*p**7*r**4*s**4 + 1536375000000*p**4*q**2*r**4*s**4 + 765156250000*p*q**4*r**4*s**4 - 165988750000*p**5*r**5*s**4 - 728968750000*p**2*q**2*r**5*s**4 + 121500000000*p**3*r**6*s**4 - 1039375000000*q**2*r**6*s**4 - 100000000000*p*r**7*s**4 - 379687500*p**11*q*s**5 - 11607421875*p**8*q**3*s**5 - 20830078125*p**5*q**5*s**5 - 33691406250*p**2*q**7*s**5 - 41491406250*p**9*q*r*s**5 - 419054687500*p**6*q**3*r*s**5 - 129511718750*p**3*q**5*r*s**5 + 311767578125*q**7*r*s**5 + 620116015625*p**7*q*r**2*s**5 + 1154687500000*p**4*q**3*r**2*s**5 + 36455078125*p*q**5*r**2*s**5 - 2265953125000*p**5*q*r**3*s**5 - 1509521484375*p**2*q**3*r**3*s**5 + 2530468750000*p**3*q*r**4*s**5 + 3259765625000*q**3*r**4*s**5 + 93750000000*p*q*r**5*s**5 + 23730468750*p**10*s**6 + 243603515625*p**7*q**2*s**6 + 341552734375*p**4*q**4*s**6 - 12207031250*p*q**6*s**6 - 357099609375*p**8*r*s**6 - 298193359375*p**5*q**2*r*s**6 + 406738281250*p**2*q**4*r*s**6 + 1615683593750*p**6*r**2*s**6 + 558593750000*p**3*q**2*r**2*s**6 - 2811035156250*q**4*r**2*s**6 - 2960937500000*p**4*r**3*s**6 - 3802246093750*p*q**2*r**3*s**6 + 2347656250000*p**2*r**4*s**6 - 671875000000*r**5*s**6 - 651855468750*p**6*q*s**7 - 1458740234375*p**3*q**3*s**7 - 152587890625*q**5*s**7 + 1628417968750*p**4*q*r*s**7 + 3948974609375*p*q**3*r*s**7 - 916748046875*p**2*q*r**2*s**7 + 1611328125000*q*r**3*s**7 + 640869140625*p**5*s**8 + 1068115234375*p**2*q**2*s**8 - 2044677734375*p**3*r*s**8 - 3204345703125*q**2*r*s**8 + 1739501953125*p*r**2*s**8
b[4][4] = -600*p**11*q**8 - 14050*p**8*q**10 - 109100*p**5*q**12 - 280800*p**2*q**14 + 7200*p**12*q**6*r + 188700*p**9*q**8*r + 1621725*p**6*q**10*r + 4577075*p**3*q**12*r + 5400*q**14*r - 28350*p**13*q**4*r**2 - 910600*p**10*q**6*r**2 - 9237975*p**7*q**8*r**2 - 30718900*p**4*q**10*r**2 - 5575950*p*q**12*r**2 + 36450*p**14*q**2*r**3 + 1848125*p**11*q**4*r**3 + 25137775*p**8*q**6*r**3 + 109591450*p**5*q**8*r**3 + 70627650*p**2*q**10*r**3 - 1317150*p**12*q**2*r**4 - 32857100*p**9*q**4*r**4 - 219125575*p**6*q**6*r**4 - 327565875*p**3*q**8*r**4 - 13011875*q**10*r**4 + 16484150*p**10*q**2*r**5 + 222242250*p**7*q**4*r**5 + 642173750*p**4*q**6*r**5 + 101263750*p*q**8*r**5 - 79345000*p**8*q**2*r**6 - 433180000*p**5*q**4*r**6 - 93731250*p**2*q**6*r**6 - 74300000*p**6*q**2*r**7 - 1057900000*p**3*q**4*r**7 - 591175000*q**6*r**7 + 1891600000*p**4*q**2*r**8 + 2796000000*p*q**4*r**8 - 4320000000*p**2*q**2*r**9 - 16200*p**13*q**5*s - 359500*p**10*q**7*s - 2603825*p**7*q**9*s - 4590375*p**4*q**11*s + 12352500*p*q**13*s + 121500*p**14*q**3*r*s + 3227400*p**11*q**5*r*s + 27301725*p**8*q**7*r*s + 59480975*p**5*q**9*r*s - 137308875*p**2*q**11*r*s - 218700*p**15*q*r**2*s - 8903925*p**12*q**3*r**2*s - 100918225*p**9*q**5*r**2*s - 325291300*p**6*q**7*r**2*s + 365705000*p**3*q**9*r**2*s + 94342500*q**11*r**2*s + 7632900*p**13*q*r**3*s + 162995400*p**10*q**3*r**3*s + 974558975*p**7*q**5*r**3*s + 930991250*p**4*q**7*r**3*s - 495368750*p*q**9*r**3*s - 97344900*p**11*q*r**4*s - 1406739250*p**8*q**3*r**4*s - 5572526250*p**5*q**5*r**4*s - 1903987500*p**2*q**7*r**4*s + 678550000*p**9*q*r**5*s + 8176215000*p**6*q**3*r**5*s + 18082050000*p**3*q**5*r**5*s + 5435843750*q**7*r**5*s - 2979800000*p**7*q*r**6*s - 29163500000*p**4*q**3*r**6*s - 27417500000*p*q**5*r**6*s + 6282400000*p**5*q*r**7*s + 48690000000*p**2*q**3*r**7*s - 2880000000*p**3*q*r**8*s + 7200000000*q**3*r**8*s - 109350*p**15*q**2*s**2 - 2405700*p**12*q**4*s**2 - 16125250*p**9*q**6*s**2 - 4930000*p**6*q**8*s**2 + 201150000*p**3*q**10*s**2 - 243000000*q**12*s**2 + 328050*p**16*r*s**2 + 10552275*p**13*q**2*r*s**2 + 88019100*p**10*q**4*r*s**2 - 4208625*p**7*q**6*r*s**2 - 1920390625*p**4*q**8*r*s**2 + 1759537500*p*q**10*r*s**2 - 11955600*p**14*r**2*s**2 - 196375050*p**11*q**2*r**2*s**2 - 555196250*p**8*q**4*r**2*s**2 + 4213270000*p**5*q**6*r**2*s**2 - 157468750*p**2*q**8*r**2*s**2 + 162656100*p**12*r**3*s**2 + 1880870000*p**9*q**2*r**3*s**2 + 753684375*p**6*q**4*r**3*s**2 - 25423062500*p**3*q**6*r**3*s**2 - 14142031250*q**8*r**3*s**2 - 1251948750*p**10*r**4*s**2 - 12524475000*p**7*q**2*r**4*s**2 + 18067656250*p**4*q**4*r**4*s**2 + 60531875000*p*q**6*r**4*s**2 + 6827725000*p**8*r**5*s**2 + 57157000000*p**5*q**2*r**5*s**2 - 75844531250*p**2*q**4*r**5*s**2 - 24452500000*p**6*r**6*s**2 - 144950000000*p**3*q**2*r**6*s**2 - 82109375000*q**4*r**6*s**2 + 46950000000*p**4*r**7*s**2 + 60000000000*p*q**2*r**7*s**2 - 36000000000*p**2*r**8*s**2 + 1549125*p**14*q*s**3 + 51873750*p**11*q**3*s**3 + 599781250*p**8*q**5*s**3 + 2421156250*p**5*q**7*s**3 - 1693515625*p**2*q**9*s**3 - 104884875*p**12*q*r*s**3 - 1937437500*p**9*q**3*r*s**3 - 11461053125*p**6*q**5*r*s**3 + 10299375000*p**3*q**7*r*s**3 + 10551250000*q**9*r*s**3 + 1336263750*p**10*q*r**2*s**3 + 23737250000*p**7*q**3*r**2*s**3 + 57136718750*p**4*q**5*r**2*s**3 - 8288906250*p*q**7*r**2*s**3 - 10907218750*p**8*q*r**3*s**3 - 160615000000*p**5*q**3*r**3*s**3 - 111134687500*p**2*q**5*r**3*s**3 + 46743125000*p**6*q*r**4*s**3 + 570509375000*p**3*q**3*r**4*s**3 + 274839843750*q**5*r**4*s**3 - 73312500000*p**4*q*r**5*s**3 - 145437500000*p*q**3*r**5*s**3 + 8750000000*p**2*q*r**6*s**3 + 180000000000*q*r**7*s**3 + 15946875*p**13*s**4 + 1265625*p**10*q**2*s**4 - 3282343750*p**7*q**4*s**4 - 38241406250*p**4*q**6*s**4 - 40136718750*p*q**8*s**4 - 113146875*p**11*r*s**4 - 2302734375*p**8*q**2*r*s**4 + 68450156250*p**5*q**4*r*s**4 + 177376562500*p**2*q**6*r*s**4 + 3164062500*p**9*r**2*s**4 + 14392890625*p**6*q**2*r**2*s**4 - 543781250000*p**3*q**4*r**2*s**4 - 319769531250*q**6*r**2*s**4 - 21048281250*p**7*r**3*s**4 - 240687500000*p**4*q**2*r**3*s**4 - 228164062500*p*q**4*r**3*s**4 + 23062500000*p**5*r**4*s**4 + 300410156250*p**2*q**2*r**4*s**4 + 93437500000*p**3*r**5*s**4 - 1141015625000*q**2*r**5*s**4 - 187500000000*p*r**6*s**4 + 1761328125*p**9*q*s**5 - 3177734375*p**6*q**3*s**5 + 60019531250*p**3*q**5*s**5 + 108398437500*q**7*s**5 + 24106640625*p**7*q*r*s**5 + 429589843750*p**4*q**3*r*s**5 + 410371093750*p*q**5*r*s**5 - 23582031250*p**5*q*r**2*s**5 + 202441406250*p**2*q**3*r**2*s**5 - 383203125000*p**3*q*r**3*s**5 + 2232910156250*q**3*r**3*s**5 + 1500000000000*p*q*r**4*s**5 - 13710937500*p**8*s**6 - 202832031250*p**5*q**2*s**6 - 531738281250*p**2*q**4*s**6 + 73330078125*p**6*r*s**6 - 3906250000*p**3*q**2*r*s**6 - 1275878906250*q**4*r*s**6 - 121093750000*p**4*r**2*s**6 - 3308593750000*p*q**2*r**2*s**6 + 18066406250*p**2*r**3*s**6 - 244140625000*r**4*s**6 + 327148437500*p**4*q*s**7 + 1672363281250*p*q**3*s**7 + 446777343750*p**2*q*r*s**7 + 1232910156250*q*r**2*s**7 - 274658203125*p**3*s**8 - 1068115234375*q**2*s**8 - 61035156250*p*r*s**8
b[4][3] = 200*p**9*q**8 + 7550*p**6*q**10 + 78650*p**3*q**12 + 248400*q**14 - 4800*p**10*q**6*r - 164300*p**7*q**8*r - 1709575*p**4*q**10*r - 5566500*p*q**12*r + 31050*p**11*q**4*r**2 + 1116175*p**8*q**6*r**2 + 12674650*p**5*q**8*r**2 + 45333850*p**2*q**10*r**2 - 60750*p**12*q**2*r**3 - 2872725*p**9*q**4*r**3 - 40403050*p**6*q**6*r**3 - 173564375*p**3*q**8*r**3 - 11242250*q**10*r**3 + 2174100*p**10*q**2*r**4 + 54010000*p**7*q**4*r**4 + 331074875*p**4*q**6*r**4 + 114173750*p*q**8*r**4 - 24858500*p**8*q**2*r**5 - 300875000*p**5*q**4*r**5 - 319430625*p**2*q**6*r**5 + 69810000*p**6*q**2*r**6 - 23900000*p**3*q**4*r**6 - 294662500*q**6*r**6 + 524200000*p**4*q**2*r**7 + 1432000000*p*q**4*r**7 - 2340000000*p**2*q**2*r**8 + 5400*p**11*q**5*s + 310400*p**8*q**7*s + 3591725*p**5*q**9*s + 11556750*p**2*q**11*s - 105300*p**12*q**3*r*s - 4234650*p**9*q**5*r*s - 49928875*p**6*q**7*r*s - 174078125*p**3*q**9*r*s + 18000000*q**11*r*s + 364500*p**13*q*r**2*s + 15763050*p**10*q**3*r**2*s + 220187400*p**7*q**5*r**2*s + 929609375*p**4*q**7*r**2*s - 43653125*p*q**9*r**2*s - 13427100*p**11*q*r**3*s - 346066250*p**8*q**3*r**3*s - 2287673375*p**5*q**5*r**3*s - 1403903125*p**2*q**7*r**3*s + 184586000*p**9*q*r**4*s + 2983460000*p**6*q**3*r**4*s + 8725818750*p**3*q**5*r**4*s + 2527734375*q**7*r**4*s - 1284480000*p**7*q*r**5*s - 13138250000*p**4*q**3*r**5*s - 14001625000*p*q**5*r**5*s + 4224800000*p**5*q*r**6*s + 27460000000*p**2*q**3*r**6*s - 3760000000*p**3*q*r**7*s + 3900000000*q**3*r**7*s + 36450*p**13*q**2*s**2 + 2765475*p**10*q**4*s**2 + 34027625*p**7*q**6*s**2 + 97375000*p**4*q**8*s**2 - 88275000*p*q**10*s**2 - 546750*p**14*r*s**2 - 21961125*p**11*q**2*r*s**2 - 273059375*p**8*q**4*r*s**2 - 761562500*p**5*q**6*r*s**2 + 1869656250*p**2*q**8*r*s**2 + 20545650*p**12*r**2*s**2 + 473934375*p**9*q**2*r**2*s**2 + 1758053125*p**6*q**4*r**2*s**2 - 8743359375*p**3*q**6*r**2*s**2 - 4154375000*q**8*r**2*s**2 - 296559000*p**10*r**3*s**2 - 4065056250*p**7*q**2*r**3*s**2 - 186328125*p**4*q**4*r**3*s**2 + 19419453125*p*q**6*r**3*s**2 + 2326262500*p**8*r**4*s**2 + 21189375000*p**5*q**2*r**4*s**2 - 26301953125*p**2*q**4*r**4*s**2 - 10513250000*p**6*r**5*s**2 - 69937500000*p**3*q**2*r**5*s**2 - 42257812500*q**4*r**5*s**2 + 23375000000*p**4*r**6*s**2 + 40750000000*p*q**2*r**6*s**2 - 19500000000*p**2*r**7*s**2 + 4009500*p**12*q*s**3 + 36140625*p**9*q**3*s**3 - 335459375*p**6*q**5*s**3 - 2695312500*p**3*q**7*s**3 - 1486250000*q**9*s**3 + 102515625*p**10*q*r*s**3 + 4006812500*p**7*q**3*r*s**3 + 27589609375*p**4*q**5*r*s**3 + 20195312500*p*q**7*r*s**3 - 2792812500*p**8*q*r**2*s**3 - 44115156250*p**5*q**3*r**2*s**3 - 72609453125*p**2*q**5*r**2*s**3 + 18752500000*p**6*q*r**3*s**3 + 218140625000*p**3*q**3*r**3*s**3 + 109940234375*q**5*r**3*s**3 - 21893750000*p**4*q*r**4*s**3 - 65187500000*p*q**3*r**4*s**3 - 31000000000*p**2*q*r**5*s**3 + 97500000000*q*r**6*s**3 - 86568750*p**11*s**4 - 1955390625*p**8*q**2*s**4 - 8960781250*p**5*q**4*s**4 - 1357812500*p**2*q**6*s**4 + 1657968750*p**9*r*s**4 + 10467187500*p**6*q**2*r*s**4 - 55292968750*p**3*q**4*r*s**4 - 60683593750*q**6*r*s**4 - 11473593750*p**7*r**2*s**4 - 123281250000*p**4*q**2*r**2*s**4 - 164912109375*p*q**4*r**2*s**4 + 13150000000*p**5*r**3*s**4 + 190751953125*p**2*q**2*r**3*s**4 + 61875000000*p**3*r**4*s**4 - 467773437500*q**2*r**4*s**4 - 118750000000*p*r**5*s**4 + 7583203125*p**7*q*s**5 + 54638671875*p**4*q**3*s**5 + 39423828125*p*q**5*s**5 + 32392578125*p**5*q*r*s**5 + 278515625000*p**2*q**3*r*s**5 - 298339843750*p**3*q*r**2*s**5 + 560791015625*q**3*r**2*s**5 + 720703125000*p*q*r**3*s**5 - 19687500000*p**6*s**6 - 159667968750*p**3*q**2*s**6 - 72265625000*q**4*s**6 + 116699218750*p**4*r*s**6 - 924072265625*p*q**2*r*s**6 - 156005859375*p**2*r**2*s**6 - 112304687500*r**3*s**6 + 349121093750*p**2*q*s**7 + 396728515625*q*r*s**7 - 213623046875*p*s**8
b[4][2] = -600*p**10*q**6 - 18450*p**7*q**8 - 174000*p**4*q**10 - 518400*p*q**12 + 5400*p**11*q**4*r + 197550*p**8*q**6*r + 2147775*p**5*q**8*r + 7219800*p**2*q**10*r - 12150*p**12*q**2*r**2 - 662200*p**9*q**4*r**2 - 9274775*p**6*q**6*r**2 - 38330625*p**3*q**8*r**2 - 5508000*q**10*r**2 + 656550*p**10*q**2*r**3 + 16233750*p**7*q**4*r**3 + 97335875*p**4*q**6*r**3 + 58271250*p*q**8*r**3 - 9845500*p**8*q**2*r**4 - 119464375*p**5*q**4*r**4 - 194431875*p**2*q**6*r**4 + 49465000*p**6*q**2*r**5 + 166000000*p**3*q**4*r**5 - 80793750*q**6*r**5 + 54400000*p**4*q**2*r**6 + 377750000*p*q**4*r**6 - 630000000*p**2*q**2*r**7 - 16200*p**12*q**3*s - 459300*p**9*q**5*s - 4207225*p**6*q**7*s - 10827500*p**3*q**9*s + 13635000*q**11*s + 72900*p**13*q*r*s + 2877300*p**10*q**3*r*s + 33239700*p**7*q**5*r*s + 107080625*p**4*q**7*r*s - 114975000*p*q**9*r*s - 3601800*p**11*q*r**2*s - 75214375*p**8*q**3*r**2*s - 387073250*p**5*q**5*r**2*s + 55540625*p**2*q**7*r**2*s + 53793000*p**9*q*r**3*s + 687176875*p**6*q**3*r**3*s + 1670018750*p**3*q**5*r**3*s + 665234375*q**7*r**3*s - 391570000*p**7*q*r**4*s - 3420125000*p**4*q**3*r**4*s - 3609625000*p*q**5*r**4*s + 1365600000*p**5*q*r**5*s + 7236250000*p**2*q**3*r**5*s - 1220000000*p**3*q*r**6*s + 1050000000*q**3*r**6*s - 109350*p**14*s**2 - 3065850*p**11*q**2*s**2 - 26908125*p**8*q**4*s**2 - 44606875*p**5*q**6*s**2 + 269812500*p**2*q**8*s**2 + 5200200*p**12*r*s**2 + 81826875*p**9*q**2*r*s**2 + 155378125*p**6*q**4*r*s**2 - 1936203125*p**3*q**6*r*s**2 - 998437500*q**8*r*s**2 - 77145750*p**10*r**2*s**2 - 745528125*p**7*q**2*r**2*s**2 + 683437500*p**4*q**4*r**2*s**2 + 4083359375*p*q**6*r**2*s**2 + 593287500*p**8*r**3*s**2 + 4799375000*p**5*q**2*r**3*s**2 - 4167578125*p**2*q**4*r**3*s**2 - 2731125000*p**6*r**4*s**2 - 18668750000*p**3*q**2*r**4*s**2 - 10480468750*q**4*r**4*s**2 + 6200000000*p**4*r**5*s**2 + 11750000000*p*q**2*r**5*s**2 - 5250000000*p**2*r**6*s**2 + 26527500*p**10*q*s**3 + 526031250*p**7*q**3*s**3 + 3160703125*p**4*q**5*s**3 + 2650312500*p*q**7*s**3 - 448031250*p**8*q*r*s**3 - 6682968750*p**5*q**3*r*s**3 - 11642812500*p**2*q**5*r*s**3 + 2553203125*p**6*q*r**2*s**3 + 37234375000*p**3*q**3*r**2*s**3 + 21871484375*q**5*r**2*s**3 + 2803125000*p**4*q*r**3*s**3 - 10796875000*p*q**3*r**3*s**3 - 16656250000*p**2*q*r**4*s**3 + 26250000000*q*r**5*s**3 - 75937500*p**9*s**4 - 704062500*p**6*q**2*s**4 - 8363281250*p**3*q**4*s**4 - 10398437500*q**6*s**4 + 197578125*p**7*r*s**4 - 16441406250*p**4*q**2*r*s**4 - 24277343750*p*q**4*r*s**4 - 5716015625*p**5*r**2*s**4 + 31728515625*p**2*q**2*r**2*s**4 + 27031250000*p**3*r**3*s**4 - 92285156250*q**2*r**3*s**4 - 33593750000*p*r**4*s**4 + 10394531250*p**5*q*s**5 + 38037109375*p**2*q**3*s**5 - 48144531250*p**3*q*r*s**5 + 74462890625*q**3*r*s**5 + 121093750000*p*q*r**2*s**5 - 2197265625*p**4*s**6 - 92529296875*p*q**2*s**6 + 15380859375*p**2*r*s**6 - 31738281250*r**2*s**6 + 54931640625*q*s**7
b[4][1] = 200*p**8*q**6 + 2950*p**5*q**8 + 10800*p**2*q**10 - 1800*p**9*q**4*r - 49650*p**6*q**6*r - 403375*p**3*q**8*r - 999000*q**10*r + 4050*p**10*q**2*r**2 + 236625*p**7*q**4*r**2 + 3109500*p**4*q**6*r**2 + 11463750*p*q**8*r**2 - 331500*p**8*q**2*r**3 - 7818125*p**5*q**4*r**3 - 41411250*p**2*q**6*r**3 + 4782500*p**6*q**2*r**4 + 47475000*p**3*q**4*r**4 - 16728125*q**6*r**4 - 8700000*p**4*q**2*r**5 + 81750000*p*q**4*r**5 - 135000000*p**2*q**2*r**6 + 5400*p**10*q**3*s + 144200*p**7*q**5*s + 939375*p**4*q**7*s + 1012500*p*q**9*s - 24300*p**11*q*r*s - 1169250*p**8*q**3*r*s - 14027250*p**5*q**5*r*s - 44446875*p**2*q**7*r*s + 2011500*p**9*q*r**2*s + 49330625*p**6*q**3*r**2*s + 272009375*p**3*q**5*r**2*s + 104062500*q**7*r**2*s - 34660000*p**7*q*r**3*s - 455062500*p**4*q**3*r**3*s - 625906250*p*q**5*r**3*s + 210200000*p**5*q*r**4*s + 1298750000*p**2*q**3*r**4*s - 240000000*p**3*q*r**5*s + 225000000*q**3*r**5*s + 36450*p**12*s**2 + 1231875*p**9*q**2*s**2 + 10712500*p**6*q**4*s**2 + 21718750*p**3*q**6*s**2 + 16875000*q**8*s**2 - 2814750*p**10*r*s**2 - 67612500*p**7*q**2*r*s**2 - 345156250*p**4*q**4*r*s**2 - 283125000*p*q**6*r*s**2 + 51300000*p**8*r**2*s**2 + 734531250*p**5*q**2*r**2*s**2 + 1267187500*p**2*q**4*r**2*s**2 - 384312500*p**6*r**3*s**2 - 3912500000*p**3*q**2*r**3*s**2 - 1822265625*q**4*r**3*s**2 + 1112500000*p**4*r**4*s**2 + 2437500000*p*q**2*r**4*s**2 - 1125000000*p**2*r**5*s**2 - 72578125*p**5*q**3*s**3 - 189296875*p**2*q**5*s**3 + 127265625*p**6*q*r*s**3 + 1415625000*p**3*q**3*r*s**3 + 1229687500*q**5*r*s**3 + 1448437500*p**4*q*r**2*s**3 + 2218750000*p*q**3*r**2*s**3 - 4031250000*p**2*q*r**3*s**3 + 5625000000*q*r**4*s**3 - 132890625*p**7*s**4 - 529296875*p**4*q**2*s**4 - 175781250*p*q**4*s**4 - 401953125*p**5*r*s**4 - 4482421875*p**2*q**2*r*s**4 + 4140625000*p**3*r**2*s**4 - 10498046875*q**2*r**2*s**4 - 7031250000*p*r**3*s**4 + 1220703125*p**3*q*s**5 + 1953125000*q**3*s**5 + 14160156250*p*q*r*s**5 - 1708984375*p**2*s**6 - 3662109375*r*s**6
b[4][0] = -4600*p**6*q**6 - 67850*p**3*q**8 - 248400*q**10 + 38900*p**7*q**4*r + 679575*p**4*q**6*r + 2866500*p*q**8*r - 81900*p**8*q**2*r**2 - 2009750*p**5*q**4*r**2 - 10783750*p**2*q**6*r**2 + 1478750*p**6*q**2*r**3 + 14165625*p**3*q**4*r**3 - 2743750*q**6*r**3 - 5450000*p**4*q**2*r**4 + 12687500*p*q**4*r**4 - 22500000*p**2*q**2*r**5 - 101700*p**8*q**3*s - 1700975*p**5*q**5*s - 7061250*p**2*q**7*s + 423900*p**9*q*r*s + 9292375*p**6*q**3*r*s + 50438750*p**3*q**5*r*s + 20475000*q**7*r*s - 7852500*p**7*q*r**2*s - 87765625*p**4*q**3*r**2*s - 121609375*p*q**5*r**2*s + 47700000*p**5*q*r**3*s + 264687500*p**2*q**3*r**3*s - 65000000*p**3*q*r**4*s + 37500000*q**3*r**4*s - 534600*p**10*s**2 - 10344375*p**7*q**2*s**2 - 54859375*p**4*q**4*s**2 - 40312500*p*q**6*s**2 + 10158750*p**8*r*s**2 + 117778125*p**5*q**2*r*s**2 + 192421875*p**2*q**4*r*s**2 - 70593750*p**6*r**2*s**2 - 685312500*p**3*q**2*r**2*s**2 - 334375000*q**4*r**2*s**2 + 193750000*p**4*r**3*s**2 + 500000000*p*q**2*r**3*s**2 - 187500000*p**2*r**4*s**2 + 8437500*p**6*q*s**3 + 159218750*p**3*q**3*s**3 + 220625000*q**5*s**3 + 353828125*p**4*q*r*s**3 + 412500000*p*q**3*r*s**3 - 1023437500*p**2*q*r**2*s**3 + 937500000*q*r**3*s**3 - 206015625*p**5*s**4 - 701171875*p**2*q**2*s**4 + 998046875*p**3*r*s**4 - 1308593750*q**2*r*s**4 - 1367187500*p*r**2*s**4 + 1708984375*p*q*s**5 - 976562500*s**6
return b
@property
def o(self):
p, q, r, s = self.p, self.q, self.r, self.s
o = [0]*6
o[5] = -1600*p**10*q**10 - 23600*p**7*q**12 - 86400*p**4*q**14 + 24800*p**11*q**8*r + 419200*p**8*q**10*r + 1850450*p**5*q**12*r + 896400*p**2*q**14*r - 138800*p**12*q**6*r**2 - 2921900*p**9*q**8*r**2 - 17295200*p**6*q**10*r**2 - 27127750*p**3*q**12*r**2 - 26076600*q**14*r**2 + 325800*p**13*q**4*r**3 + 9993850*p**10*q**6*r**3 + 88010500*p**7*q**8*r**3 + 274047650*p**4*q**10*r**3 + 410171400*p*q**12*r**3 - 259200*p**14*q**2*r**4 - 17147100*p**11*q**4*r**4 - 254289150*p**8*q**6*r**4 - 1318548225*p**5*q**8*r**4 - 2633598475*p**2*q**10*r**4 + 12636000*p**12*q**2*r**5 + 388911000*p**9*q**4*r**5 + 3269704725*p**6*q**6*r**5 + 8791192300*p**3*q**8*r**5 + 93560575*q**10*r**5 - 228361600*p**10*q**2*r**6 - 3951199200*p**7*q**4*r**6 - 16276981100*p**4*q**6*r**6 - 1597227000*p*q**8*r**6 + 1947899200*p**8*q**2*r**7 + 17037648000*p**5*q**4*r**7 + 8919740000*p**2*q**6*r**7 - 7672160000*p**6*q**2*r**8 - 15496000000*p**3*q**4*r**8 + 4224000000*q**6*r**8 + 9968000000*p**4*q**2*r**9 - 8640000000*p*q**4*r**9 + 4800000000*p**2*q**2*r**10 - 55200*p**12*q**7*s - 685600*p**9*q**9*s + 1028250*p**6*q**11*s + 37650000*p**3*q**13*s + 111375000*q**15*s + 583200*p**13*q**5*r*s + 9075600*p**10*q**7*r*s - 883150*p**7*q**9*r*s - 506830750*p**4*q**11*r*s - 1793137500*p*q**13*r*s - 1852200*p**14*q**3*r**2*s - 41435250*p**11*q**5*r**2*s - 80566700*p**8*q**7*r**2*s + 2485673600*p**5*q**9*r**2*s + 11442286125*p**2*q**11*r**2*s + 1555200*p**15*q*r**3*s + 80846100*p**12*q**3*r**3*s + 564906800*p**9*q**5*r**3*s - 4493012400*p**6*q**7*r**3*s - 35492391250*p**3*q**9*r**3*s - 789931875*q**11*r**3*s - 71766000*p**13*q*r**4*s - 1551149200*p**10*q**3*r**4*s - 1773437900*p**7*q**5*r**4*s + 51957593125*p**4*q**7*r**4*s + 14964765625*p*q**9*r**4*s + 1231569600*p**11*q*r**5*s + 12042977600*p**8*q**3*r**5*s - 27151011200*p**5*q**5*r**5*s - 88080610000*p**2*q**7*r**5*s - 9912995200*p**9*q*r**6*s - 29448104000*p**6*q**3*r**6*s + 144954840000*p**3*q**5*r**6*s - 44601300000*q**7*r**6*s + 35453760000*p**7*q*r**7*s - 63264000000*p**4*q**3*r**7*s + 60544000000*p*q**5*r**7*s - 30048000000*p**5*q*r**8*s + 37040000000*p**2*q**3*r**8*s - 60800000000*p**3*q*r**9*s - 48000000000*q**3*r**9*s - 615600*p**14*q**4*s**2 - 10524500*p**11*q**6*s**2 - 33831250*p**8*q**8*s**2 + 222806250*p**5*q**10*s**2 + 1099687500*p**2*q**12*s**2 + 3353400*p**15*q**2*r*s**2 + 74269350*p**12*q**4*r*s**2 + 276445750*p**9*q**6*r*s**2 - 2618600000*p**6*q**8*r*s**2 - 14473243750*p**3*q**10*r*s**2 + 1383750000*q**12*r*s**2 - 2332800*p**16*r**2*s**2 - 132750900*p**13*q**2*r**2*s**2 - 900775150*p**10*q**4*r**2*s**2 + 8249244500*p**7*q**6*r**2*s**2 + 59525796875*p**4*q**8*r**2*s**2 - 40292868750*p*q**10*r**2*s**2 + 128304000*p**14*r**3*s**2 + 3160232100*p**11*q**2*r**3*s**2 + 8329580000*p**8*q**4*r**3*s**2 - 45558458750*p**5*q**6*r**3*s**2 + 297252890625*p**2*q**8*r**3*s**2 - 2769854400*p**12*r**4*s**2 - 37065970000*p**9*q**2*r**4*s**2 - 90812546875*p**6*q**4*r**4*s**2 - 627902000000*p**3*q**6*r**4*s**2 + 181347421875*q**8*r**4*s**2 + 30946932800*p**10*r**5*s**2 + 249954680000*p**7*q**2*r**5*s**2 + 802954812500*p**4*q**4*r**5*s**2 - 80900000000*p*q**6*r**5*s**2 - 192137320000*p**8*r**6*s**2 - 932641600000*p**5*q**2*r**6*s**2 - 943242500000*p**2*q**4*r**6*s**2 + 658412000000*p**6*r**7*s**2 + 1930720000000*p**3*q**2*r**7*s**2 + 593800000000*q**4*r**7*s**2 - 1162800000000*p**4*r**8*s**2 - 280000000000*p*q**2*r**8*s**2 + 840000000000*p**2*r**9*s**2 - 2187000*p**16*q*s**3 - 47418750*p**13*q**3*s**3 - 180618750*p**10*q**5*s**3 + 2231250000*p**7*q**7*s**3 + 17857734375*p**4*q**9*s**3 + 29882812500*p*q**11*s**3 + 24664500*p**14*q*r*s**3 - 853368750*p**11*q**3*r*s**3 - 25939693750*p**8*q**5*r*s**3 - 177541562500*p**5*q**7*r*s**3 - 297978828125*p**2*q**9*r*s**3 - 153468000*p**12*q*r**2*s**3 + 30188125000*p**9*q**3*r**2*s**3 + 344049821875*p**6*q**5*r**2*s**3 + 534026875000*p**3*q**7*r**2*s**3 - 340726484375*q**9*r**2*s**3 - 9056190000*p**10*q*r**3*s**3 - 322314687500*p**7*q**3*r**3*s**3 - 769632109375*p**4*q**5*r**3*s**3 - 83276875000*p*q**7*r**3*s**3 + 164061000000*p**8*q*r**4*s**3 + 1381358750000*p**5*q**3*r**4*s**3 + 3088020000000*p**2*q**5*r**4*s**3 - 1267655000000*p**6*q*r**5*s**3 - 7642630000000*p**3*q**3*r**5*s**3 - 2759877500000*q**5*r**5*s**3 + 4597760000000*p**4*q*r**6*s**3 + 1846200000000*p*q**3*r**6*s**3 - 7006000000000*p**2*q*r**7*s**3 - 1200000000000*q*r**8*s**3 + 18225000*p**15*s**4 + 1328906250*p**12*q**2*s**4 + 24729140625*p**9*q**4*s**4 + 169467187500*p**6*q**6*s**4 + 413281250000*p**3*q**8*s**4 + 223828125000*q**10*s**4 + 710775000*p**13*r*s**4 - 18611015625*p**10*q**2*r*s**4 - 314344375000*p**7*q**4*r*s**4 - 828439843750*p**4*q**6*r*s**4 + 460937500000*p*q**8*r*s**4 - 25674975000*p**11*r**2*s**4 - 52223515625*p**8*q**2*r**2*s**4 - 387160000000*p**5*q**4*r**2*s**4 - 4733680078125*p**2*q**6*r**2*s**4 + 343911875000*p**9*r**3*s**4 + 3328658359375*p**6*q**2*r**3*s**4 + 16532406250000*p**3*q**4*r**3*s**4 + 5980613281250*q**6*r**3*s**4 - 2295497500000*p**7*r**4*s**4 - 14809820312500*p**4*q**2*r**4*s**4 - 6491406250000*p*q**4*r**4*s**4 + 7768470000000*p**5*r**5*s**4 + 34192562500000*p**2*q**2*r**5*s**4 - 11859000000000*p**3*r**6*s**4 + 10530000000000*q**2*r**6*s**4 + 6000000000000*p*r**7*s**4 + 11453906250*p**11*q*s**5 + 149765625000*p**8*q**3*s**5 + 545537109375*p**5*q**5*s**5 + 527343750000*p**2*q**7*s**5 - 371313281250*p**9*q*r*s**5 - 3461455078125*p**6*q**3*r*s**5 - 7920878906250*p**3*q**5*r*s**5 - 4747314453125*q**7*r*s**5 + 2417815625000*p**7*q*r**2*s**5 + 5465576171875*p**4*q**3*r**2*s**5 + 5937128906250*p*q**5*r**2*s**5 - 10661156250000*p**5*q*r**3*s**5 - 63574218750000*p**2*q**3*r**3*s**5 + 24059375000000*p**3*q*r**4*s**5 - 33023437500000*q**3*r**4*s**5 - 43125000000000*p*q*r**5*s**5 + 94394531250*p**10*s**6 + 1097167968750*p**7*q**2*s**6 + 2829833984375*p**4*q**4*s**6 - 1525878906250*p*q**6*s**6 + 2724609375*p**8*r*s**6 + 13998535156250*p**5*q**2*r*s**6 + 57094482421875*p**2*q**4*r*s**6 - 8512509765625*p**6*r**2*s**6 - 37941406250000*p**3*q**2*r**2*s**6 + 33191894531250*q**4*r**2*s**6 + 50534179687500*p**4*r**3*s**6 + 156656250000000*p*q**2*r**3*s**6 - 85023437500000*p**2*r**4*s**6 + 10125000000000*r**5*s**6 - 2717285156250*p**6*q*s**7 - 11352539062500*p**3*q**3*s**7 - 2593994140625*q**5*s**7 - 47154541015625*p**4*q*r*s**7 - 160644531250000*p*q**3*r*s**7 + 142500000000000*p**2*q*r**2*s**7 - 26757812500000*q*r**3*s**7 - 4364013671875*p**5*s**8 - 94604492187500*p**2*q**2*s**8 + 114379882812500*p**3*r*s**8 + 51116943359375*q**2*r*s**8 - 346435546875000*p*r**2*s**8 + 476837158203125*p*q*s**9 - 476837158203125*s**10
o[4] = 1600*p**11*q**8 + 20800*p**8*q**10 + 45100*p**5*q**12 - 151200*p**2*q**14 - 19200*p**12*q**6*r - 293200*p**9*q**8*r - 794600*p**6*q**10*r + 2634675*p**3*q**12*r + 2640600*q**14*r + 75600*p**13*q**4*r**2 + 1529100*p**10*q**6*r**2 + 6233350*p**7*q**8*r**2 - 12013350*p**4*q**10*r**2 - 29069550*p*q**12*r**2 - 97200*p**14*q**2*r**3 - 3562500*p**11*q**4*r**3 - 26984900*p**8*q**6*r**3 - 15900325*p**5*q**8*r**3 + 76267100*p**2*q**10*r**3 + 3272400*p**12*q**2*r**4 + 59486850*p**9*q**4*r**4 + 221270075*p**6*q**6*r**4 + 74065250*p**3*q**8*r**4 - 300564375*q**10*r**4 - 45569400*p**10*q**2*r**5 - 438666000*p**7*q**4*r**5 - 444821250*p**4*q**6*r**5 + 2448256250*p*q**8*r**5 + 290640000*p**8*q**2*r**6 + 855850000*p**5*q**4*r**6 - 5741875000*p**2*q**6*r**6 - 644000000*p**6*q**2*r**7 + 5574000000*p**3*q**4*r**7 + 4643000000*q**6*r**7 - 1696000000*p**4*q**2*r**8 - 12660000000*p*q**4*r**8 + 7200000000*p**2*q**2*r**9 + 43200*p**13*q**5*s + 572000*p**10*q**7*s - 59800*p**7*q**9*s - 24174625*p**4*q**11*s - 74587500*p*q**13*s - 324000*p**14*q**3*r*s - 5531400*p**11*q**5*r*s - 3712100*p**8*q**7*r*s + 293009275*p**5*q**9*r*s + 1115548875*p**2*q**11*r*s + 583200*p**15*q*r**2*s + 18343800*p**12*q**3*r**2*s + 77911100*p**9*q**5*r**2*s - 957488825*p**6*q**7*r**2*s - 5449661250*p**3*q**9*r**2*s + 960120000*q**11*r**2*s - 23684400*p**13*q*r**3*s - 373761900*p**10*q**3*r**3*s - 27944975*p**7*q**5*r**3*s + 10375740625*p**4*q**7*r**3*s - 4649093750*p*q**9*r**3*s + 395816400*p**11*q*r**4*s + 2910968000*p**8*q**3*r**4*s - 9126162500*p**5*q**5*r**4*s - 11696118750*p**2*q**7*r**4*s - 3028640000*p**9*q*r**5*s - 3251550000*p**6*q**3*r**5*s + 47914250000*p**3*q**5*r**5*s - 30255625000*q**7*r**5*s + 9304000000*p**7*q*r**6*s - 42970000000*p**4*q**3*r**6*s + 31475000000*p*q**5*r**6*s + 2176000000*p**5*q*r**7*s + 62100000000*p**2*q**3*r**7*s - 43200000000*p**3*q*r**8*s - 72000000000*q**3*r**8*s + 291600*p**15*q**2*s**2 + 2702700*p**12*q**4*s**2 - 38692250*p**9*q**6*s**2 - 538903125*p**6*q**8*s**2 - 1613112500*p**3*q**10*s**2 + 320625000*q**12*s**2 - 874800*p**16*r*s**2 - 14166900*p**13*q**2*r*s**2 + 193284900*p**10*q**4*r*s**2 + 3688520500*p**7*q**6*r*s**2 + 11613390625*p**4*q**8*r*s**2 - 15609881250*p*q**10*r*s**2 + 44031600*p**14*r**2*s**2 + 482345550*p**11*q**2*r**2*s**2 - 2020881875*p**8*q**4*r**2*s**2 - 7407026250*p**5*q**6*r**2*s**2 + 136175750000*p**2*q**8*r**2*s**2 - 1000884600*p**12*r**3*s**2 - 8888950000*p**9*q**2*r**3*s**2 - 30101703125*p**6*q**4*r**3*s**2 - 319761000000*p**3*q**6*r**3*s**2 + 51519218750*q**8*r**3*s**2 + 12622395000*p**10*r**4*s**2 + 97032450000*p**7*q**2*r**4*s**2 + 469929218750*p**4*q**4*r**4*s**2 + 291342187500*p*q**6*r**4*s**2 - 96382000000*p**8*r**5*s**2 - 598070000000*p**5*q**2*r**5*s**2 - 1165021875000*p**2*q**4*r**5*s**2 + 446500000000*p**6*r**6*s**2 + 1651500000000*p**3*q**2*r**6*s**2 + 789375000000*q**4*r**6*s**2 - 1152000000000*p**4*r**7*s**2 - 600000000000*p*q**2*r**7*s**2 + 1260000000000*p**2*r**8*s**2 - 24786000*p**14*q*s**3 - 660487500*p**11*q**3*s**3 - 5886356250*p**8*q**5*s**3 - 18137187500*p**5*q**7*s**3 - 5120546875*p**2*q**9*s**3 + 827658000*p**12*q*r*s**3 + 13343062500*p**9*q**3*r*s**3 + 39782068750*p**6*q**5*r*s**3 - 111288437500*p**3*q**7*r*s**3 - 15438750000*q**9*r*s**3 - 14540782500*p**10*q*r**2*s**3 - 135889750000*p**7*q**3*r**2*s**3 - 176892578125*p**4*q**5*r**2*s**3 - 934462656250*p*q**7*r**2*s**3 + 171669250000*p**8*q*r**3*s**3 + 1164538125000*p**5*q**3*r**3*s**3 + 3192346406250*p**2*q**5*r**3*s**3 - 1295476250000*p**6*q*r**4*s**3 - 6540712500000*p**3*q**3*r**4*s**3 - 2957828125000*q**5*r**4*s**3 + 5366750000000*p**4*q*r**5*s**3 + 3165000000000*p*q**3*r**5*s**3 - 8862500000000*p**2*q*r**6*s**3 - 1800000000000*q*r**7*s**3 + 236925000*p**13*s**4 + 8895234375*p**10*q**2*s**4 + 106180781250*p**7*q**4*s**4 + 474221875000*p**4*q**6*s**4 + 616210937500*p*q**8*s**4 - 6995868750*p**11*r*s**4 - 184190625000*p**8*q**2*r*s**4 - 1299254453125*p**5*q**4*r*s**4 - 2475458593750*p**2*q**6*r*s**4 + 63049218750*p**9*r**2*s**4 + 1646791484375*p**6*q**2*r**2*s**4 + 9086886718750*p**3*q**4*r**2*s**4 + 4673421875000*q**6*r**2*s**4 - 215665000000*p**7*r**3*s**4 - 7864589843750*p**4*q**2*r**3*s**4 - 5987890625000*p*q**4*r**3*s**4 + 594843750000*p**5*r**4*s**4 + 27791171875000*p**2*q**2*r**4*s**4 - 3881250000000*p**3*r**5*s**4 + 12203125000000*q**2*r**5*s**4 + 10312500000000*p*r**6*s**4 - 34720312500*p**9*q*s**5 - 545126953125*p**6*q**3*s**5 - 2176425781250*p**3*q**5*s**5 - 2792968750000*q**7*s**5 - 1395703125*p**7*q*r*s**5 - 1957568359375*p**4*q**3*r*s**5 + 5122636718750*p*q**5*r*s**5 + 858210937500*p**5*q*r**2*s**5 - 42050097656250*p**2*q**3*r**2*s**5 + 7088281250000*p**3*q*r**3*s**5 - 25974609375000*q**3*r**3*s**5 - 69296875000000*p*q*r**4*s**5 + 384697265625*p**8*s**6 + 6403320312500*p**5*q**2*s**6 + 16742675781250*p**2*q**4*s**6 - 3467080078125*p**6*r*s**6 + 11009765625000*p**3*q**2*r*s**6 + 16451660156250*q**4*r*s**6 + 6979003906250*p**4*r**2*s**6 + 145403320312500*p*q**2*r**2*s**6 + 4076171875000*p**2*r**3*s**6 + 22265625000000*r**4*s**6 - 21915283203125*p**4*q*s**7 - 86608886718750*p*q**3*s**7 - 22785644531250*p**2*q*r*s**7 - 103466796875000*q*r**2*s**7 + 18798828125000*p**3*s**8 + 106048583984375*q**2*s**8 + 17761230468750*p*r*s**8
o[3] = 2800*p**9*q**8 + 55700*p**6*q**10 + 363600*p**3*q**12 + 777600*q**14 - 27200*p**10*q**6*r - 700200*p**7*q**8*r - 5726550*p**4*q**10*r - 15066000*p*q**12*r + 74700*p**11*q**4*r**2 + 2859575*p**8*q**6*r**2 + 31175725*p**5*q**8*r**2 + 103147650*p**2*q**10*r**2 - 40500*p**12*q**2*r**3 - 4274400*p**9*q**4*r**3 - 76065825*p**6*q**6*r**3 - 365623750*p**3*q**8*r**3 - 132264000*q**10*r**3 + 2192400*p**10*q**2*r**4 + 92562500*p**7*q**4*r**4 + 799193875*p**4*q**6*r**4 + 1188193125*p*q**8*r**4 - 41231500*p**8*q**2*r**5 - 914210000*p**5*q**4*r**5 - 3318853125*p**2*q**6*r**5 + 398850000*p**6*q**2*r**6 + 3944000000*p**3*q**4*r**6 + 2211312500*q**6*r**6 - 1817000000*p**4*q**2*r**7 - 6720000000*p*q**4*r**7 + 3900000000*p**2*q**2*r**8 + 75600*p**11*q**5*s + 1823100*p**8*q**7*s + 14534150*p**5*q**9*s + 38265750*p**2*q**11*s - 394200*p**12*q**3*r*s - 11453850*p**9*q**5*r*s - 101213000*p**6*q**7*r*s - 223565625*p**3*q**9*r*s + 415125000*q**11*r*s + 243000*p**13*q*r**2*s + 13654575*p**10*q**3*r**2*s + 163811725*p**7*q**5*r**2*s + 173461250*p**4*q**7*r**2*s - 3008671875*p*q**9*r**2*s - 2016900*p**11*q*r**3*s - 86576250*p**8*q**3*r**3*s - 324146625*p**5*q**5*r**3*s + 3378506250*p**2*q**7*r**3*s - 89211000*p**9*q*r**4*s - 55207500*p**6*q**3*r**4*s + 1493950000*p**3*q**5*r**4*s - 12573609375*q**7*r**4*s + 1140100000*p**7*q*r**5*s + 42500000*p**4*q**3*r**5*s + 21511250000*p*q**5*r**5*s - 4058000000*p**5*q*r**6*s + 6725000000*p**2*q**3*r**6*s - 1400000000*p**3*q*r**7*s - 39000000000*q**3*r**7*s + 510300*p**13*q**2*s**2 + 4814775*p**10*q**4*s**2 - 70265125*p**7*q**6*s**2 - 1016484375*p**4*q**8*s**2 - 3221100000*p*q**10*s**2 - 364500*p**14*r*s**2 + 30314250*p**11*q**2*r*s**2 + 1106765625*p**8*q**4*r*s**2 + 10984203125*p**5*q**6*r*s**2 + 33905812500*p**2*q**8*r*s**2 - 37980900*p**12*r**2*s**2 - 2142905625*p**9*q**2*r**2*s**2 - 26896125000*p**6*q**4*r**2*s**2 - 95551328125*p**3*q**6*r**2*s**2 + 11320312500*q**8*r**2*s**2 + 1743781500*p**10*r**3*s**2 + 35432262500*p**7*q**2*r**3*s**2 + 177855859375*p**4*q**4*r**3*s**2 + 121260546875*p*q**6*r**3*s**2 - 25943162500*p**8*r**4*s**2 - 249165500000*p**5*q**2*r**4*s**2 - 461739453125*p**2*q**4*r**4*s**2 + 177823750000*p**6*r**5*s**2 + 726225000000*p**3*q**2*r**5*s**2 + 404195312500*q**4*r**5*s**2 - 565875000000*p**4*r**6*s**2 - 407500000000*p*q**2*r**6*s**2 + 682500000000*p**2*r**7*s**2 - 59140125*p**12*q*s**3 - 1290515625*p**9*q**3*s**3 - 8785071875*p**6*q**5*s**3 - 15588281250*p**3*q**7*s**3 + 17505000000*q**9*s**3 + 896062500*p**10*q*r*s**3 + 2589750000*p**7*q**3*r*s**3 - 82700156250*p**4*q**5*r*s**3 - 347683593750*p*q**7*r*s**3 + 17022656250*p**8*q*r**2*s**3 + 320923593750*p**5*q**3*r**2*s**3 + 1042116875000*p**2*q**5*r**2*s**3 - 353262812500*p**6*q*r**3*s**3 - 2212664062500*p**3*q**3*r**3*s**3 - 1252408984375*q**5*r**3*s**3 + 1967362500000*p**4*q*r**4*s**3 + 1583343750000*p*q**3*r**4*s**3 - 3560625000000*p**2*q*r**5*s**3 - 975000000000*q*r**6*s**3 + 462459375*p**11*s**4 + 14210859375*p**8*q**2*s**4 + 99521718750*p**5*q**4*s**4 + 114955468750*p**2*q**6*s**4 - 17720859375*p**9*r*s**4 - 100320703125*p**6*q**2*r*s**4 + 1021943359375*p**3*q**4*r*s**4 + 1193203125000*q**6*r*s**4 + 171371250000*p**7*r**2*s**4 - 1113390625000*p**4*q**2*r**2*s**4 - 1211474609375*p*q**4*r**2*s**4 - 274056250000*p**5*r**3*s**4 + 8285166015625*p**2*q**2*r**3*s**4 - 2079375000000*p**3*r**4*s**4 + 5137304687500*q**2*r**4*s**4 + 6187500000000*p*r**5*s**4 - 135675000000*p**7*q*s**5 - 1275244140625*p**4*q**3*s**5 - 28388671875*p*q**5*s**5 + 1015166015625*p**5*q*r*s**5 - 10584423828125*p**2*q**3*r*s**5 + 3559570312500*p**3*q*r**2*s**5 - 6929931640625*q**3*r**2*s**5 - 32304687500000*p*q*r**3*s**5 + 430576171875*p**6*s**6 + 9397949218750*p**3*q**2*s**6 + 575195312500*q**4*s**6 - 4086425781250*p**4*r*s**6 + 42183837890625*p*q**2*r*s**6 + 8156494140625*p**2*r**2*s**6 + 12612304687500*r**3*s**6 - 25513916015625*p**2*q*s**7 - 37017822265625*q*r*s**7 + 18981933593750*p*s**8
o[2] = 1600*p**10*q**6 + 9200*p**7*q**8 - 126000*p**4*q**10 - 777600*p*q**12 - 14400*p**11*q**4*r - 119300*p**8*q**6*r + 1203225*p**5*q**8*r + 9412200*p**2*q**10*r + 32400*p**12*q**2*r**2 + 417950*p**9*q**4*r**2 - 4543725*p**6*q**6*r**2 - 49008125*p**3*q**8*r**2 - 24192000*q**10*r**2 - 292050*p**10*q**2*r**3 + 8760000*p**7*q**4*r**3 + 137506625*p**4*q**6*r**3 + 225438750*p*q**8*r**3 - 4213250*p**8*q**2*r**4 - 173595625*p**5*q**4*r**4 - 653003125*p**2*q**6*r**4 + 82575000*p**6*q**2*r**5 + 838125000*p**3*q**4*r**5 + 578562500*q**6*r**5 - 421500000*p**4*q**2*r**6 - 1796250000*p*q**4*r**6 + 1050000000*p**2*q**2*r**7 + 43200*p**12*q**3*s + 807300*p**9*q**5*s + 5328225*p**6*q**7*s + 16946250*p**3*q**9*s + 29565000*q**11*s - 194400*p**13*q*r*s - 5505300*p**10*q**3*r*s - 49886700*p**7*q**5*r*s - 178821875*p**4*q**7*r*s - 222750000*p*q**9*r*s + 6814800*p**11*q*r**2*s + 120525625*p**8*q**3*r**2*s + 526694500*p**5*q**5*r**2*s + 84065625*p**2*q**7*r**2*s - 123670500*p**9*q*r**3*s - 1106731875*p**6*q**3*r**3*s - 669556250*p**3*q**5*r**3*s - 2869265625*q**7*r**3*s + 1004350000*p**7*q*r**4*s + 3384375000*p**4*q**3*r**4*s + 5665625000*p*q**5*r**4*s - 3411000000*p**5*q*r**5*s - 418750000*p**2*q**3*r**5*s + 1700000000*p**3*q*r**6*s - 10500000000*q**3*r**6*s + 291600*p**14*s**2 + 9829350*p**11*q**2*s**2 + 114151875*p**8*q**4*s**2 + 522169375*p**5*q**6*s**2 + 716906250*p**2*q**8*s**2 - 18625950*p**12*r*s**2 - 387703125*p**9*q**2*r*s**2 - 2056109375*p**6*q**4*r*s**2 - 760203125*p**3*q**6*r*s**2 + 3071250000*q**8*r*s**2 + 512419500*p**10*r**2*s**2 + 5859053125*p**7*q**2*r**2*s**2 + 12154062500*p**4*q**4*r**2*s**2 + 15931640625*p*q**6*r**2*s**2 - 6598393750*p**8*r**3*s**2 - 43549625000*p**5*q**2*r**3*s**2 - 82011328125*p**2*q**4*r**3*s**2 + 43538125000*p**6*r**4*s**2 + 160831250000*p**3*q**2*r**4*s**2 + 99070312500*q**4*r**4*s**2 - 141812500000*p**4*r**5*s**2 - 117500000000*p*q**2*r**5*s**2 + 183750000000*p**2*r**6*s**2 - 154608750*p**10*q*s**3 - 3309468750*p**7*q**3*s**3 - 20834140625*p**4*q**5*s**3 - 34731562500*p*q**7*s**3 + 5970375000*p**8*q*r*s**3 + 68533281250*p**5*q**3*r*s**3 + 142698281250*p**2*q**5*r*s**3 - 74509140625*p**6*q*r**2*s**3 - 389148437500*p**3*q**3*r**2*s**3 - 270937890625*q**5*r**2*s**3 + 366696875000*p**4*q*r**3*s**3 + 400031250000*p*q**3*r**3*s**3 - 735156250000*p**2*q*r**4*s**3 - 262500000000*q*r**5*s**3 + 371250000*p**9*s**4 + 21315000000*p**6*q**2*s**4 + 179515625000*p**3*q**4*s**4 + 238406250000*q**6*s**4 - 9071015625*p**7*r*s**4 - 268945312500*p**4*q**2*r*s**4 - 379785156250*p*q**4*r*s**4 + 140262890625*p**5*r**2*s**4 + 1486259765625*p**2*q**2*r**2*s**4 - 806484375000*p**3*r**3*s**4 + 1066210937500*q**2*r**3*s**4 + 1722656250000*p*r**4*s**4 - 125648437500*p**5*q*s**5 - 1236279296875*p**2*q**3*s**5 + 1267871093750*p**3*q*r*s**5 - 1044677734375*q**3*r*s**5 - 6630859375000*p*q*r**2*s**5 + 160888671875*p**4*s**6 + 6352294921875*p*q**2*s**6 - 708740234375*p**2*r*s**6 + 3901367187500*r**2*s**6 - 8050537109375*q*s**7
o[1] = 2800*p**8*q**6 + 41300*p**5*q**8 + 151200*p**2*q**10 - 25200*p**9*q**4*r - 542600*p**6*q**6*r - 3397875*p**3*q**8*r - 5751000*q**10*r + 56700*p**10*q**2*r**2 + 1972125*p**7*q**4*r**2 + 18624250*p**4*q**6*r**2 + 50253750*p*q**8*r**2 - 1701000*p**8*q**2*r**3 - 32630625*p**5*q**4*r**3 - 139868750*p**2*q**6*r**3 + 18162500*p**6*q**2*r**4 + 177125000*p**3*q**4*r**4 + 121734375*q**6*r**4 - 100500000*p**4*q**2*r**5 - 386250000*p*q**4*r**5 + 225000000*p**2*q**2*r**6 + 75600*p**10*q**3*s + 1708800*p**7*q**5*s + 12836875*p**4*q**7*s + 32062500*p*q**9*s - 340200*p**11*q*r*s - 10185750*p**8*q**3*r*s - 97502750*p**5*q**5*r*s - 301640625*p**2*q**7*r*s + 7168500*p**9*q*r**2*s + 135960625*p**6*q**3*r**2*s + 587471875*p**3*q**5*r**2*s - 384750000*q**7*r**2*s - 29325000*p**7*q*r**3*s - 320625000*p**4*q**3*r**3*s + 523437500*p*q**5*r**3*s - 42000000*p**5*q*r**4*s + 343750000*p**2*q**3*r**4*s + 150000000*p**3*q*r**5*s - 2250000000*q**3*r**5*s + 510300*p**12*s**2 + 12808125*p**9*q**2*s**2 + 107062500*p**6*q**4*s**2 + 270312500*p**3*q**6*s**2 - 168750000*q**8*s**2 - 2551500*p**10*r*s**2 - 5062500*p**7*q**2*r*s**2 + 712343750*p**4*q**4*r*s**2 + 4788281250*p*q**6*r*s**2 - 256837500*p**8*r**2*s**2 - 3574812500*p**5*q**2*r**2*s**2 - 14967968750*p**2*q**4*r**2*s**2 + 4040937500*p**6*r**3*s**2 + 26400000000*p**3*q**2*r**3*s**2 + 17083984375*q**4*r**3*s**2 - 21812500000*p**4*r**4*s**2 - 24375000000*p*q**2*r**4*s**2 + 39375000000*p**2*r**5*s**2 - 127265625*p**5*q**3*s**3 - 680234375*p**2*q**5*s**3 - 2048203125*p**6*q*r*s**3 - 18794531250*p**3*q**3*r*s**3 - 25050000000*q**5*r*s**3 + 26621875000*p**4*q*r**2*s**3 + 37007812500*p*q**3*r**2*s**3 - 105468750000*p**2*q*r**3*s**3 - 56250000000*q*r**4*s**3 + 1124296875*p**7*s**4 + 9251953125*p**4*q**2*s**4 - 8007812500*p*q**4*s**4 - 4004296875*p**5*r*s**4 + 179931640625*p**2*q**2*r*s**4 - 75703125000*p**3*r**2*s**4 + 133447265625*q**2*r**2*s**4 + 363281250000*p*r**3*s**4 - 91552734375*p**3*q*s**5 - 19531250000*q**3*s**5 - 751953125000*p*q*r*s**5 + 157958984375*p**2*s**6 + 748291015625*r*s**6
o[0] = -14400*p**6*q**6 - 212400*p**3*q**8 - 777600*q**10 + 92100*p**7*q**4*r + 1689675*p**4*q**6*r + 7371000*p*q**8*r - 122850*p**8*q**2*r**2 - 3735250*p**5*q**4*r**2 - 22432500*p**2*q**6*r**2 + 2298750*p**6*q**2*r**3 + 29390625*p**3*q**4*r**3 + 18000000*q**6*r**3 - 17750000*p**4*q**2*r**4 - 62812500*p*q**4*r**4 + 37500000*p**2*q**2*r**5 - 51300*p**8*q**3*s - 768025*p**5*q**5*s - 2801250*p**2*q**7*s - 275400*p**9*q*r*s - 5479875*p**6*q**3*r*s - 35538750*p**3*q**5*r*s - 68850000*q**7*r*s + 12757500*p**7*q*r**2*s + 133640625*p**4*q**3*r**2*s + 222609375*p*q**5*r**2*s - 108500000*p**5*q*r**3*s - 290312500*p**2*q**3*r**3*s + 275000000*p**3*q*r**4*s - 375000000*q**3*r**4*s + 1931850*p**10*s**2 + 40213125*p**7*q**2*s**2 + 253921875*p**4*q**4*s**2 + 464062500*p*q**6*s**2 - 71077500*p**8*r*s**2 - 818746875*p**5*q**2*r*s**2 - 1882265625*p**2*q**4*r*s**2 + 826031250*p**6*r**2*s**2 + 4369687500*p**3*q**2*r**2*s**2 + 3107812500*q**4*r**2*s**2 - 3943750000*p**4*r**3*s**2 - 5000000000*p*q**2*r**3*s**2 + 6562500000*p**2*r**4*s**2 - 295312500*p**6*q*s**3 - 2938906250*p**3*q**3*s**3 - 4848750000*q**5*s**3 + 3791484375*p**4*q*r*s**3 + 7556250000*p*q**3*r*s**3 - 11960937500*p**2*q*r**2*s**3 - 9375000000*q*r**3*s**3 + 1668515625*p**5*s**4 + 20447265625*p**2*q**2*s**4 - 21955078125*p**3*r*s**4 + 18984375000*q**2*r*s**4 + 67382812500*p*r**2*s**4 - 120849609375*p*q*s**5 + 157226562500*s**6
return o
@property
def a(self):
p, q, r, s = self.p, self.q, self.r, self.s
a = [0]*6
a[5] = -100*p**7*q**7 - 2175*p**4*q**9 - 10500*p*q**11 + 1100*p**8*q**5*r + 27975*p**5*q**7*r + 152950*p**2*q**9*r - 4125*p**9*q**3*r**2 - 128875*p**6*q**5*r**2 - 830525*p**3*q**7*r**2 + 59450*q**9*r**2 + 5400*p**10*q*r**3 + 243800*p**7*q**3*r**3 + 2082650*p**4*q**5*r**3 - 333925*p*q**7*r**3 - 139200*p**8*q*r**4 - 2406000*p**5*q**3*r**4 - 122600*p**2*q**5*r**4 + 1254400*p**6*q*r**5 + 3776000*p**3*q**3*r**5 + 1832000*q**5*r**5 - 4736000*p**4*q*r**6 - 6720000*p*q**3*r**6 + 6400000*p**2*q*r**7 - 900*p**9*q**4*s - 37400*p**6*q**6*s - 281625*p**3*q**8*s - 435000*q**10*s + 6750*p**10*q**2*r*s + 322300*p**7*q**4*r*s + 2718575*p**4*q**6*r*s + 4214250*p*q**8*r*s - 16200*p**11*r**2*s - 859275*p**8*q**2*r**2*s - 8925475*p**5*q**4*r**2*s - 14427875*p**2*q**6*r**2*s + 453600*p**9*r**3*s + 10038400*p**6*q**2*r**3*s + 17397500*p**3*q**4*r**3*s - 11333125*q**6*r**3*s - 4451200*p**7*r**4*s - 15850000*p**4*q**2*r**4*s + 34000000*p*q**4*r**4*s + 17984000*p**5*r**5*s - 10000000*p**2*q**2*r**5*s - 25600000*p**3*r**6*s - 8000000*q**2*r**6*s + 6075*p**11*q*s**2 - 83250*p**8*q**3*s**2 - 1282500*p**5*q**5*s**2 - 2862500*p**2*q**7*s**2 + 724275*p**9*q*r*s**2 + 9807250*p**6*q**3*r*s**2 + 28374375*p**3*q**5*r*s**2 + 22212500*q**7*r*s**2 - 8982000*p**7*q*r**2*s**2 - 39600000*p**4*q**3*r**2*s**2 - 61746875*p*q**5*r**2*s**2 - 1010000*p**5*q*r**3*s**2 - 1000000*p**2*q**3*r**3*s**2 + 78000000*p**3*q*r**4*s**2 + 30000000*q**3*r**4*s**2 + 80000000*p*q*r**5*s**2 - 759375*p**10*s**3 - 9787500*p**7*q**2*s**3 - 39062500*p**4*q**4*s**3 - 52343750*p*q**6*s**3 + 12301875*p**8*r*s**3 + 98175000*p**5*q**2*r*s**3 + 225078125*p**2*q**4*r*s**3 - 54900000*p**6*r**2*s**3 - 310000000*p**3*q**2*r**2*s**3 - 7890625*q**4*r**2*s**3 + 51250000*p**4*r**3*s**3 - 420000000*p*q**2*r**3*s**3 + 110000000*p**2*r**4*s**3 - 200000000*r**5*s**3 + 2109375*p**6*q*s**4 - 21093750*p**3*q**3*s**4 - 89843750*q**5*s**4 + 182343750*p**4*q*r*s**4 + 733203125*p*q**3*r*s**4 - 196875000*p**2*q*r**2*s**4 + 1125000000*q*r**3*s**4 - 158203125*p**5*s**5 - 566406250*p**2*q**2*s**5 + 101562500*p**3*r*s**5 - 1669921875*q**2*r*s**5 + 1250000000*p*r**2*s**5 - 1220703125*p*q*s**6 + 6103515625*s**7
a[4] = 1000*p**5*q**7 + 7250*p**2*q**9 - 10800*p**6*q**5*r - 96900*p**3*q**7*r - 52500*q**9*r + 37400*p**7*q**3*r**2 + 470850*p**4*q**5*r**2 + 640600*p*q**7*r**2 - 39600*p**8*q*r**3 - 983600*p**5*q**3*r**3 - 2848100*p**2*q**5*r**3 + 814400*p**6*q*r**4 + 6076000*p**3*q**3*r**4 + 2308000*q**5*r**4 - 5024000*p**4*q*r**5 - 9680000*p*q**3*r**5 + 9600000*p**2*q*r**6 + 13800*p**7*q**4*s + 94650*p**4*q**6*s - 26500*p*q**8*s - 86400*p**8*q**2*r*s - 816500*p**5*q**4*r*s - 257500*p**2*q**6*r*s + 91800*p**9*r**2*s + 1853700*p**6*q**2*r**2*s + 630000*p**3*q**4*r**2*s - 8971250*q**6*r**2*s - 2071200*p**7*r**3*s - 7240000*p**4*q**2*r**3*s + 29375000*p*q**4*r**3*s + 14416000*p**5*r**4*s - 5200000*p**2*q**2*r**4*s - 30400000*p**3*r**5*s - 12000000*q**2*r**5*s + 64800*p**9*q*s**2 + 567000*p**6*q**3*s**2 + 1655000*p**3*q**5*s**2 + 6987500*q**7*s**2 + 337500*p**7*q*r*s**2 + 8462500*p**4*q**3*r*s**2 - 5812500*p*q**5*r*s**2 - 24930000*p**5*q*r**2*s**2 - 69125000*p**2*q**3*r**2*s**2 + 103500000*p**3*q*r**3*s**2 + 30000000*q**3*r**3*s**2 + 90000000*p*q*r**4*s**2 - 708750*p**8*s**3 - 5400000*p**5*q**2*s**3 + 8906250*p**2*q**4*s**3 + 18562500*p**6*r*s**3 - 625000*p**3*q**2*r*s**3 + 29687500*q**4*r*s**3 - 75000000*p**4*r**2*s**3 - 416250000*p*q**2*r**2*s**3 + 60000000*p**2*r**3*s**3 - 300000000*r**4*s**3 + 71718750*p**4*q*s**4 + 189062500*p*q**3*s**4 + 210937500*p**2*q*r*s**4 + 1187500000*q*r**2*s**4 - 187500000*p**3*s**5 - 800781250*q**2*s**5 - 390625000*p*r*s**5
a[3] = -500*p**6*q**5 - 6350*p**3*q**7 - 19800*q**9 + 3750*p**7*q**3*r + 65100*p**4*q**5*r + 264950*p*q**7*r - 6750*p**8*q*r**2 - 209050*p**5*q**3*r**2 - 1217250*p**2*q**5*r**2 + 219000*p**6*q*r**3 + 2510000*p**3*q**3*r**3 + 1098500*q**5*r**3 - 2068000*p**4*q*r**4 - 5060000*p*q**3*r**4 + 5200000*p**2*q*r**5 - 6750*p**8*q**2*s - 96350*p**5*q**4*s - 346000*p**2*q**6*s + 20250*p**9*r*s + 459900*p**6*q**2*r*s + 1828750*p**3*q**4*r*s - 2930000*q**6*r*s - 594000*p**7*r**2*s - 4301250*p**4*q**2*r**2*s + 10906250*p*q**4*r**2*s + 5252000*p**5*r**3*s - 1450000*p**2*q**2*r**3*s - 12800000*p**3*r**4*s - 6500000*q**2*r**4*s + 74250*p**7*q*s**2 + 1418750*p**4*q**3*s**2 + 5956250*p*q**5*s**2 - 4297500*p**5*q*r*s**2 - 29906250*p**2*q**3*r*s**2 + 31500000*p**3*q*r**2*s**2 + 12500000*q**3*r**2*s**2 + 35000000*p*q*r**3*s**2 + 1350000*p**6*s**3 + 6093750*p**3*q**2*s**3 + 17500000*q**4*s**3 - 7031250*p**4*r*s**3 - 127812500*p*q**2*r*s**3 + 18750000*p**2*r**2*s**3 - 162500000*r**3*s**3 + 107812500*p**2*q*s**4 + 460937500*q*r*s**4 - 214843750*p*s**5
a[2] = 1950*p**4*q**5 + 14100*p*q**7 - 14350*p**5*q**3*r - 125600*p**2*q**5*r + 27900*p**6*q*r**2 + 402250*p**3*q**3*r**2 + 288250*q**5*r**2 - 436000*p**4*q*r**3 - 1345000*p*q**3*r**3 + 1400000*p**2*q*r**4 + 9450*p**6*q**2*s - 1250*p**3*q**4*s - 465000*q**6*s - 49950*p**7*r*s - 302500*p**4*q**2*r*s + 1718750*p*q**4*r*s + 834000*p**5*r**2*s + 437500*p**2*q**2*r**2*s - 3100000*p**3*r**3*s - 1750000*q**2*r**3*s - 292500*p**5*q*s**2 - 1937500*p**2*q**3*s**2 + 3343750*p**3*q*r*s**2 + 1875000*q**3*r*s**2 + 8125000*p*q*r**2*s**2 - 1406250*p**4*s**3 - 12343750*p*q**2*s**3 + 5312500*p**2*r*s**3 - 43750000*r**2*s**3 + 74218750*q*s**4
a[1] = -300*p**5*q**3 - 2150*p**2*q**5 + 1350*p**6*q*r + 21500*p**3*q**3*r + 61500*q**5*r - 42000*p**4*q*r**2 - 290000*p*q**3*r**2 + 300000*p**2*q*r**3 - 4050*p**7*s - 45000*p**4*q**2*s - 125000*p*q**4*s + 108000*p**5*r*s + 643750*p**2*q**2*r*s - 700000*p**3*r**2*s - 375000*q**2*r**2*s - 93750*p**3*q*s**2 - 312500*q**3*s**2 + 1875000*p*q*r*s**2 - 1406250*p**2*s**3 - 9375000*r*s**3
a[0] = 1250*p**3*q**3 + 9000*q**5 - 4500*p**4*q*r - 46250*p*q**3*r + 50000*p**2*q*r**2 + 6750*p**5*s + 43750*p**2*q**2*s - 75000*p**3*r*s - 62500*q**2*r*s + 156250*p*q*s**2 - 1562500*s**3
return a
@property
def c(self):
p, q, r, s = self.p, self.q, self.r, self.s
c = [0]*6
c[5] = -40*p**5*q**11 - 270*p**2*q**13 + 700*p**6*q**9*r + 5165*p**3*q**11*r + 540*q**13*r - 4230*p**7*q**7*r**2 - 31845*p**4*q**9*r**2 + 20880*p*q**11*r**2 + 9645*p**8*q**5*r**3 + 57615*p**5*q**7*r**3 - 358255*p**2*q**9*r**3 - 1880*p**9*q**3*r**4 + 114020*p**6*q**5*r**4 + 2012190*p**3*q**7*r**4 - 26855*q**9*r**4 - 14400*p**10*q*r**5 - 470400*p**7*q**3*r**5 - 5088640*p**4*q**5*r**5 + 920*p*q**7*r**5 + 332800*p**8*q*r**6 + 5797120*p**5*q**3*r**6 + 1608000*p**2*q**5*r**6 - 2611200*p**6*q*r**7 - 7424000*p**3*q**3*r**7 - 2323200*q**5*r**7 + 8601600*p**4*q*r**8 + 9472000*p*q**3*r**8 - 10240000*p**2*q*r**9 - 3060*p**7*q**8*s - 39085*p**4*q**10*s - 132300*p*q**12*s + 36580*p**8*q**6*r*s + 520185*p**5*q**8*r*s + 1969860*p**2*q**10*r*s - 144045*p**9*q**4*r**2*s - 2438425*p**6*q**6*r**2*s - 10809475*p**3*q**8*r**2*s + 518850*q**10*r**2*s + 182520*p**10*q**2*r**3*s + 4533930*p**7*q**4*r**3*s + 26196770*p**4*q**6*r**3*s - 4542325*p*q**8*r**3*s + 21600*p**11*r**4*s - 2208080*p**8*q**2*r**4*s - 24787960*p**5*q**4*r**4*s + 10813900*p**2*q**6*r**4*s - 499200*p**9*r**5*s + 3827840*p**6*q**2*r**5*s + 9596000*p**3*q**4*r**5*s + 22662000*q**6*r**5*s + 3916800*p**7*r**6*s - 29952000*p**4*q**2*r**6*s - 90800000*p*q**4*r**6*s - 12902400*p**5*r**7*s + 87040000*p**2*q**2*r**7*s + 15360000*p**3*r**8*s + 12800000*q**2*r**8*s - 38070*p**9*q**5*s**2 - 566700*p**6*q**7*s**2 - 2574375*p**3*q**9*s**2 - 1822500*q**11*s**2 + 292815*p**10*q**3*r*s**2 + 5170280*p**7*q**5*r*s**2 + 27918125*p**4*q**7*r*s**2 + 21997500*p*q**9*r*s**2 - 573480*p**11*q*r**2*s**2 - 14566350*p**8*q**3*r**2*s**2 - 104851575*p**5*q**5*r**2*s**2 - 96448750*p**2*q**7*r**2*s**2 + 11001240*p**9*q*r**3*s**2 + 147798600*p**6*q**3*r**3*s**2 + 158632750*p**3*q**5*r**3*s**2 - 78222500*q**7*r**3*s**2 - 62819200*p**7*q*r**4*s**2 - 136160000*p**4*q**3*r**4*s**2 + 317555000*p*q**5*r**4*s**2 + 160224000*p**5*q*r**5*s**2 - 267600000*p**2*q**3*r**5*s**2 - 153600000*p**3*q*r**6*s**2 - 120000000*q**3*r**6*s**2 - 32000000*p*q*r**7*s**2 - 127575*p**11*q**2*s**3 - 2148750*p**8*q**4*s**3 - 13652500*p**5*q**6*s**3 - 19531250*p**2*q**8*s**3 + 495720*p**12*r*s**3 + 11856375*p**9*q**2*r*s**3 + 107807500*p**6*q**4*r*s**3 + 222334375*p**3*q**6*r*s**3 + 105062500*q**8*r*s**3 - 11566800*p**10*r**2*s**3 - 216787500*p**7*q**2*r**2*s**3 - 633437500*p**4*q**4*r**2*s**3 - 504484375*p*q**6*r**2*s**3 + 90918000*p**8*r**3*s**3 + 567080000*p**5*q**2*r**3*s**3 + 692937500*p**2*q**4*r**3*s**3 - 326640000*p**6*r**4*s**3 - 339000000*p**3*q**2*r**4*s**3 + 369250000*q**4*r**4*s**3 + 560000000*p**4*r**5*s**3 + 508000000*p*q**2*r**5*s**3 - 480000000*p**2*r**6*s**3 + 320000000*r**7*s**3 - 455625*p**10*q*s**4 - 27562500*p**7*q**3*s**4 - 120593750*p**4*q**5*s**4 - 60312500*p*q**7*s**4 + 110615625*p**8*q*r*s**4 + 662984375*p**5*q**3*r*s**4 + 528515625*p**2*q**5*r*s**4 - 541687500*p**6*q*r**2*s**4 - 1262343750*p**3*q**3*r**2*s**4 - 466406250*q**5*r**2*s**4 + 633000000*p**4*q*r**3*s**4 - 1264375000*p*q**3*r**3*s**4 + 1085000000*p**2*q*r**4*s**4 - 2700000000*q*r**5*s**4 - 68343750*p**9*s**5 - 478828125*p**6*q**2*s**5 - 355468750*p**3*q**4*s**5 - 11718750*q**6*s**5 + 718031250*p**7*r*s**5 + 1658593750*p**4*q**2*r*s**5 + 2212890625*p*q**4*r*s**5 - 2855625000*p**5*r**2*s**5 - 4273437500*p**2*q**2*r**2*s**5 + 4537500000*p**3*r**3*s**5 + 8031250000*q**2*r**3*s**5 - 1750000000*p*r**4*s**5 + 1353515625*p**5*q*s**6 + 1562500000*p**2*q**3*s**6 - 3964843750*p**3*q*r*s**6 - 7226562500*q**3*r*s**6 + 1953125000*p*q*r**2*s**6 - 1757812500*p**4*s**7 - 3173828125*p*q**2*s**7 + 6445312500*p**2*r*s**7 - 3906250000*r**2*s**7 + 6103515625*q*s**8
c[4] = 40*p**6*q**9 + 110*p**3*q**11 - 1080*q**13 - 560*p**7*q**7*r - 1780*p**4*q**9*r + 17370*p*q**11*r + 2850*p**8*q**5*r**2 + 10520*p**5*q**7*r**2 - 115910*p**2*q**9*r**2 - 6090*p**9*q**3*r**3 - 25330*p**6*q**5*r**3 + 448740*p**3*q**7*r**3 + 128230*q**9*r**3 + 4320*p**10*q*r**4 + 16960*p**7*q**3*r**4 - 1143600*p**4*q**5*r**4 - 1410310*p*q**7*r**4 + 3840*p**8*q*r**5 + 1744480*p**5*q**3*r**5 + 5619520*p**2*q**5*r**5 - 1198080*p**6*q*r**6 - 10579200*p**3*q**3*r**6 - 2940800*q**5*r**6 + 8294400*p**4*q*r**7 + 13568000*p*q**3*r**7 - 15360000*p**2*q*r**8 + 840*p**8*q**6*s + 7580*p**5*q**8*s + 24420*p**2*q**10*s - 8100*p**9*q**4*r*s - 94100*p**6*q**6*r*s - 473000*p**3*q**8*r*s - 473400*q**10*r*s + 22680*p**10*q**2*r**2*s + 374370*p**7*q**4*r**2*s + 2888020*p**4*q**6*r**2*s + 5561050*p*q**8*r**2*s - 12960*p**11*r**3*s - 485820*p**8*q**2*r**3*s - 6723440*p**5*q**4*r**3*s - 23561400*p**2*q**6*r**3*s + 190080*p**9*r**4*s + 5894880*p**6*q**2*r**4*s + 50882000*p**3*q**4*r**4*s + 22411500*q**6*r**4*s - 258560*p**7*r**5*s - 46248000*p**4*q**2*r**5*s - 103800000*p*q**4*r**5*s - 3737600*p**5*r**6*s + 119680000*p**2*q**2*r**6*s + 10240000*p**3*r**7*s + 19200000*q**2*r**7*s + 7290*p**10*q**3*s**2 + 117360*p**7*q**5*s**2 + 691250*p**4*q**7*s**2 - 198750*p*q**9*s**2 - 36450*p**11*q*r*s**2 - 854550*p**8*q**3*r*s**2 - 7340700*p**5*q**5*r*s**2 - 2028750*p**2*q**7*r*s**2 + 995490*p**9*q*r**2*s**2 + 18896600*p**6*q**3*r**2*s**2 + 5026500*p**3*q**5*r**2*s**2 - 52272500*q**7*r**2*s**2 - 16636800*p**7*q*r**3*s**2 - 43200000*p**4*q**3*r**3*s**2 + 223426250*p*q**5*r**3*s**2 + 112068000*p**5*q*r**4*s**2 - 177000000*p**2*q**3*r**4*s**2 - 244000000*p**3*q*r**5*s**2 - 156000000*q**3*r**5*s**2 + 43740*p**12*s**3 + 1032750*p**9*q**2*s**3 + 8602500*p**6*q**4*s**3 + 15606250*p**3*q**6*s**3 + 39625000*q**8*s**3 - 1603800*p**10*r*s**3 - 26932500*p**7*q**2*r*s**3 - 19562500*p**4*q**4*r*s**3 - 152000000*p*q**6*r*s**3 + 25555500*p**8*r**2*s**3 + 16230000*p**5*q**2*r**2*s**3 + 42187500*p**2*q**4*r**2*s**3 - 165660000*p**6*r**3*s**3 + 373500000*p**3*q**2*r**3*s**3 + 332937500*q**4*r**3*s**3 + 465000000*p**4*r**4*s**3 + 586000000*p*q**2*r**4*s**3 - 592000000*p**2*r**5*s**3 + 480000000*r**6*s**3 - 1518750*p**8*q*s**4 - 62531250*p**5*q**3*s**4 + 7656250*p**2*q**5*s**4 + 184781250*p**6*q*r*s**4 - 15781250*p**3*q**3*r*s**4 - 135156250*q**5*r*s**4 - 1148250000*p**4*q*r**2*s**4 - 2121406250*p*q**3*r**2*s**4 + 1990000000*p**2*q*r**3*s**4 - 3150000000*q*r**4*s**4 - 2531250*p**7*s**5 + 660937500*p**4*q**2*s**5 + 1339843750*p*q**4*s**5 - 33750000*p**5*r*s**5 - 679687500*p**2*q**2*r*s**5 + 6250000*p**3*r**2*s**5 + 6195312500*q**2*r**2*s**5 + 1125000000*p*r**3*s**5 - 996093750*p**3*q*s**6 - 3125000000*q**3*s**6 - 3222656250*p*q*r*s**6 + 1171875000*p**2*s**7 + 976562500*r*s**7
c[3] = 80*p**4*q**9 + 540*p*q**11 - 600*p**5*q**7*r - 4770*p**2*q**9*r + 1230*p**6*q**5*r**2 + 20900*p**3*q**7*r**2 + 47250*q**9*r**2 - 710*p**7*q**3*r**3 - 84950*p**4*q**5*r**3 - 526310*p*q**7*r**3 + 720*p**8*q*r**4 + 216280*p**5*q**3*r**4 + 2068020*p**2*q**5*r**4 - 198080*p**6*q*r**5 - 3703200*p**3*q**3*r**5 - 1423600*q**5*r**5 + 2860800*p**4*q*r**6 + 7056000*p*q**3*r**6 - 8320000*p**2*q*r**7 - 2720*p**6*q**6*s - 46350*p**3*q**8*s - 178200*q**10*s + 25740*p**7*q**4*r*s + 489490*p**4*q**6*r*s + 2152350*p*q**8*r*s - 61560*p**8*q**2*r**2*s - 1568150*p**5*q**4*r**2*s - 9060500*p**2*q**6*r**2*s + 24840*p**9*r**3*s + 1692380*p**6*q**2*r**3*s + 18098250*p**3*q**4*r**3*s + 9387750*q**6*r**3*s - 382560*p**7*r**4*s - 16818000*p**4*q**2*r**4*s - 49325000*p*q**4*r**4*s + 1212800*p**5*r**5*s + 64840000*p**2*q**2*r**5*s - 320000*p**3*r**6*s + 10400000*q**2*r**6*s - 36450*p**8*q**3*s**2 - 588350*p**5*q**5*s**2 - 2156250*p**2*q**7*s**2 + 123930*p**9*q*r*s**2 + 2879700*p**6*q**3*r*s**2 + 12548000*p**3*q**5*r*s**2 - 14445000*q**7*r*s**2 - 3233250*p**7*q*r**2*s**2 - 28485000*p**4*q**3*r**2*s**2 + 72231250*p*q**5*r**2*s**2 + 32093000*p**5*q*r**3*s**2 - 61275000*p**2*q**3*r**3*s**2 - 107500000*p**3*q*r**4*s**2 - 78500000*q**3*r**4*s**2 + 22000000*p*q*r**5*s**2 - 72900*p**10*s**3 - 1215000*p**7*q**2*s**3 - 2937500*p**4*q**4*s**3 + 9156250*p*q**6*s**3 + 2612250*p**8*r*s**3 + 16560000*p**5*q**2*r*s**3 - 75468750*p**2*q**4*r*s**3 - 32737500*p**6*r**2*s**3 + 169062500*p**3*q**2*r**2*s**3 + 121718750*q**4*r**2*s**3 + 160250000*p**4*r**3*s**3 + 219750000*p*q**2*r**3*s**3 - 317000000*p**2*r**4*s**3 + 260000000*r**5*s**3 + 2531250*p**6*q*s**4 + 22500000*p**3*q**3*s**4 + 39843750*q**5*s**4 - 266343750*p**4*q*r*s**4 - 776406250*p*q**3*r*s**4 + 789062500*p**2*q*r**2*s**4 - 1368750000*q*r**3*s**4 + 67500000*p**5*s**5 + 441406250*p**2*q**2*s**5 - 311718750*p**3*r*s**5 + 1785156250*q**2*r*s**5 + 546875000*p*r**2*s**5 - 1269531250*p*q*s**6 + 488281250*s**7
c[2] = 120*p**5*q**7 + 810*p**2*q**9 - 1280*p**6*q**5*r - 9160*p**3*q**7*r + 3780*q**9*r + 4530*p**7*q**3*r**2 + 36640*p**4*q**5*r**2 - 45270*p*q**7*r**2 - 5400*p**8*q*r**3 - 60920*p**5*q**3*r**3 + 200050*p**2*q**5*r**3 + 31200*p**6*q*r**4 - 476000*p**3*q**3*r**4 - 378200*q**5*r**4 + 521600*p**4*q*r**5 + 1872000*p*q**3*r**5 - 2240000*p**2*q*r**6 + 1440*p**7*q**4*s + 15310*p**4*q**6*s + 59400*p*q**8*s - 9180*p**8*q**2*r*s - 115240*p**5*q**4*r*s - 589650*p**2*q**6*r*s + 16200*p**9*r**2*s + 316710*p**6*q**2*r**2*s + 2547750*p**3*q**4*r**2*s + 2178000*q**6*r**2*s - 259200*p**7*r**3*s - 4123000*p**4*q**2*r**3*s - 11700000*p*q**4*r**3*s + 937600*p**5*r**4*s + 16340000*p**2*q**2*r**4*s - 640000*p**3*r**5*s + 2800000*q**2*r**5*s - 2430*p**9*q*s**2 - 54450*p**6*q**3*s**2 - 285500*p**3*q**5*s**2 - 2767500*q**7*s**2 + 43200*p**7*q*r*s**2 - 916250*p**4*q**3*r*s**2 + 14482500*p*q**5*r*s**2 + 4806000*p**5*q*r**2*s**2 - 13212500*p**2*q**3*r**2*s**2 - 25400000*p**3*q*r**3*s**2 - 18750000*q**3*r**3*s**2 + 8000000*p*q*r**4*s**2 + 121500*p**8*s**3 + 2058750*p**5*q**2*s**3 - 6656250*p**2*q**4*s**3 - 6716250*p**6*r*s**3 + 24125000*p**3*q**2*r*s**3 + 23875000*q**4*r*s**3 + 43125000*p**4*r**2*s**3 + 45750000*p*q**2*r**2*s**3 - 87500000*p**2*r**3*s**3 + 70000000*r**4*s**3 - 44437500*p**4*q*s**4 - 107968750*p*q**3*s**4 + 159531250*p**2*q*r*s**4 - 284375000*q*r**2*s**4 + 7031250*p**3*s**5 + 265625000*q**2*s**5 + 31250000*p*r*s**5
c[1] = 160*p**3*q**7 + 1080*q**9 - 1080*p**4*q**5*r - 8730*p*q**7*r + 1510*p**5*q**3*r**2 + 20420*p**2*q**5*r**2 + 720*p**6*q*r**3 - 23200*p**3*q**3*r**3 - 79900*q**5*r**3 + 35200*p**4*q*r**4 + 404000*p*q**3*r**4 - 480000*p**2*q*r**5 + 960*p**5*q**4*s + 2850*p**2*q**6*s + 540*p**6*q**2*r*s + 63500*p**3*q**4*r*s + 319500*q**6*r*s - 7560*p**7*r**2*s - 253500*p**4*q**2*r**2*s - 1806250*p*q**4*r**2*s + 91200*p**5*r**3*s + 2600000*p**2*q**2*r**3*s - 80000*p**3*r**4*s + 600000*q**2*r**4*s - 4050*p**7*q*s**2 - 120000*p**4*q**3*s**2 - 273750*p*q**5*s**2 + 425250*p**5*q*r*s**2 + 2325000*p**2*q**3*r*s**2 - 5400000*p**3*q*r**2*s**2 - 2875000*q**3*r**2*s**2 + 1500000*p*q*r**3*s**2 - 303750*p**6*s**3 - 843750*p**3*q**2*s**3 - 812500*q**4*s**3 + 5062500*p**4*r*s**3 + 13312500*p*q**2*r*s**3 - 14500000*p**2*r**2*s**3 + 15000000*r**3*s**3 - 3750000*p**2*q*s**4 - 35937500*q*r*s**4 + 11718750*p*s**5
c[0] = 80*p**4*q**5 + 540*p*q**7 - 600*p**5*q**3*r - 4770*p**2*q**5*r + 1080*p**6*q*r**2 + 11200*p**3*q**3*r**2 - 12150*q**5*r**2 - 4800*p**4*q*r**3 + 64000*p*q**3*r**3 - 80000*p**2*q*r**4 + 1080*p**6*q**2*s + 13250*p**3*q**4*s + 54000*q**6*s - 3240*p**7*r*s - 56250*p**4*q**2*r*s - 337500*p*q**4*r*s + 43200*p**5*r**2*s + 560000*p**2*q**2*r**2*s - 80000*p**3*r**3*s + 100000*q**2*r**3*s + 6750*p**5*q*s**2 + 225000*p**2*q**3*s**2 - 900000*p**3*q*r*s**2 - 562500*q**3*r*s**2 + 500000*p*q*r**2*s**2 + 843750*p**4*s**3 + 1937500*p*q**2*s**3 - 3000000*p**2*r*s**3 + 2500000*r**2*s**3 - 5468750*q*s**4
return c
@property
def F(self):
p, q, r, s = self.p, self.q, self.r, self.s
F = 4*p**6*q**6 + 59*p**3*q**8 + 216*q**10 - 36*p**7*q**4*r - 623*p**4*q**6*r - 2610*p*q**8*r + 81*p**8*q**2*r**2 + 2015*p**5*q**4*r**2 + 10825*p**2*q**6*r**2 - 1800*p**6*q**2*r**3 - 17500*p**3*q**4*r**3 + 625*q**6*r**3 + 10000*p**4*q**2*r**4 + 108*p**8*q**3*s + 1584*p**5*q**5*s + 5700*p**2*q**7*s - 486*p**9*q*r*s - 9720*p**6*q**3*r*s - 45050*p**3*q**5*r*s - 9000*q**7*r*s + 10800*p**7*q*r**2*s + 92500*p**4*q**3*r**2*s + 32500*p*q**5*r**2*s - 60000*p**5*q*r**3*s - 50000*p**2*q**3*r**3*s + 729*p**10*s**2 + 12150*p**7*q**2*s**2 + 60000*p**4*q**4*s**2 + 93750*p*q**6*s**2 - 18225*p**8*r*s**2 - 175500*p**5*q**2*r*s**2 - 478125*p**2*q**4*r*s**2 + 135000*p**6*r**2*s**2 + 850000*p**3*q**2*r**2*s**2 + 15625*q**4*r**2*s**2 - 250000*p**4*r**3*s**2 + 225000*p**3*q**3*s**3 + 175000*q**5*s**3 - 1012500*p**4*q*r*s**3 - 1187500*p*q**3*r*s**3 + 1250000*p**2*q*r**2*s**3 + 928125*p**5*s**4 + 1875000*p**2*q**2*s**4 - 2812500*p**3*r*s**4 - 390625*q**2*r*s**4 - 9765625*s**6
return F
def l0(self, theta):
F = self.F
a = self.a
l0 = Poly(a, x).eval(theta)/F
return l0
def T(self, theta, d):
F = self.F
T = [0]*5
b = self.b
# Note that the order of sublists of the b's has been reversed compared to the paper
T[1] = -Poly(b[1], x).eval(theta)/(2*F)
T[2] = Poly(b[2], x).eval(theta)/(2*d*F)
T[3] = Poly(b[3], x).eval(theta)/(2*F)
T[4] = Poly(b[4], x).eval(theta)/(2*d*F)
return T
def order(self, theta, d):
F = self.F
o = self.o
order = Poly(o, x).eval(theta)/(d*F)
return N(order)
def uv(self, theta, d):
c = self.c
u = self.q*Rational(-25, 2)
v = Poly(c, x).eval(theta)/(2*d*self.F)
return N(u), N(v)
@property
def zeta(self):
return [self.zeta1, self.zeta2, self.zeta3, self.zeta4]
| PolyQuintic |
python | ray-project__ray | doc/source/custom_directives.py | {
"start": 16598,
"end": 16962
} | class ____(ExampleEnum):
ML_APPLICATIONS = "ML Applications"
LLM_APPLICATIONS = "LLM Applications"
INTEGRATIONS = "Integrations"
AI_ACCELERATORS = "AI Accelerators"
@classmethod
def formatted_name(cls):
return "Related Technology"
@classmethod
def key(cls: type) -> str:
return "related_technology"
| RelatedTechnology |
python | takluyver__flit | flit_core/flit_core/vendor/tomli/_parser.py | {
"start": 1414,
"end": 4123
} | class ____(ValueError):
"""An error raised if a document is not valid TOML."""
def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
"""Parse TOML from a binary file object."""
s_bytes = fp.read()
try:
s = s_bytes.decode()
except AttributeError:
warnings.warn(
"Text file object support is deprecated in favor of binary file objects."
' Use `open("foo.toml", "rb")` to open the file in binary mode.',
DeprecationWarning,
stacklevel=2,
)
s = s_bytes # type: ignore[assignment]
return loads(s, parse_float=parse_float)
def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: Optional[str] = src[pos + 1]
except IndexError:
second_char = None
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
| TOMLDecodeError |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/caxis/title/_font.py | {
"start": 235,
"end": 9931
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.caxis.title"
_path_str = "layout.ternary.caxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.caxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.caxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.caxis.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | walkccc__LeetCode | solutions/2657. Find the Prefix Common Array of Two Arrays/2657.py | {
"start": 0,
"end": 385
} | class ____:
def findThePrefixCommonArray(self, A: list[int], B: list[int]) -> list[int]:
n = len(A)
prefixCommon = 0
ans = []
count = [0] * (n + 1)
for a, b in zip(A, B):
count[a] += 1
if count[a] == 2:
prefixCommon += 1
count[b] += 1
if count[b] == 2:
prefixCommon += 1
ans.append(prefixCommon)
return ans
| Solution |
python | keras-team__keras | keras/src/layers/preprocessing/hashing_test.py | {
"start": 268,
"end": 504
} | class ____:
def __init__(self, values):
self.values = values
def __array__(self):
return np.array(self.values)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="Broken with NumPy backend."
)
| ArrayLike |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_param.py | {
"start": 1631,
"end": 2078
} | class ____(TypedDict, total=False):
path: Required[Iterable[ActionDragPath]]
"""An array of coordinates representing the path of the drag action.
Coordinates will appear as an array of objects, eg
```
[
{ x: 100, y: 200 },
{ x: 200, y: 300 }
]
```
"""
type: Required[Literal["drag"]]
"""Specifies the event type.
For a drag action, this property is always set to `drag`.
"""
| ActionDrag |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/inverted_pendulum_v4.py | {
"start": 201,
"end": 1693
} | class ____(MujocoEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": 25,
}
def __init__(self, **kwargs):
utils.EzPickle.__init__(self, **kwargs)
observation_space = Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64)
MujocoEnv.__init__(
self,
"inverted_pendulum.xml",
2,
observation_space=observation_space,
default_camera_config=DEFAULT_CAMERA_CONFIG,
**kwargs,
)
def step(self, a):
reward = 1.0
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
terminated = bool(not np.isfinite(ob).all() or (np.abs(ob[1]) > 0.2))
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return ob, reward, terminated, False, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.01, high=0.01
)
qvel = self.init_qvel + self.np_random.uniform(
size=self.model.nv, low=-0.01, high=0.01
)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.data.qpos, self.data.qvel]).ravel()
| InvertedPendulumEnv |
python | numpy__numpy | numpy/_core/code_generators/genapi.py | {
"start": 10884,
"end": 12259
} | class ____:
def __init__(self, name, index, ptr_cast, api_name, internal_type=None):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
# The type used internally, if None, same as exported (ptr_cast)
self.internal_type = internal_type
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return f" (void *) &{self.name}"
def internal_define(self):
if self.internal_type is None:
return f"extern NPY_NO_EXPORT {self.ptr_cast} {self.name};\n"
# If we are here, we need to define a larger struct internally, which
# the type can be cast safely. But we want to normally use the original
# type, so name mangle:
mangled_name = f"{self.name}Full"
astr = (
# Create the mangled name:
f"extern NPY_NO_EXPORT {self.internal_type} {mangled_name};\n"
# And define the name as: (*(type *)(&mangled_name))
f"#define {self.name} (*({self.ptr_cast} *)(&{mangled_name}))\n"
)
return astr
| TypeApi |
python | getsentry__sentry | src/sentry/integrations/jira/webhooks/installed.py | {
"start": 1159,
"end": 3780
} | class ____(JiraWebhookBase):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
"""
Webhook hit by Jira whenever someone installs the Sentry integration in their Jira instance.
"""
def post(self, request: Request, *args, **kwargs) -> Response:
with IntegrationPipelineViewEvent(
interaction_type=IntegrationPipelineViewType.VERIFY_INSTALLATION,
domain=IntegrationDomain.PROJECT_MANAGEMENT,
provider_key=self.provider,
).capture() as lifecycle:
token = self.get_token(request)
state = request.data
if not state:
lifecycle.record_failure(ProjectManagementFailuresReason.INSTALLATION_STATE_MISSING)
return self.respond(status=status.HTTP_400_BAD_REQUEST)
key_id = jwt.peek_header(token).get("kid")
lifecycle.add_extras(
{
"key_id": key_id,
"base_url": state.get("baseUrl", ""),
"description": state.get("description", ""),
"clientKey": state.get("clientKey", ""),
}
)
if key_id:
if key_id in INVALID_KEY_IDS:
lifecycle.record_halt(halt_reason="JWT contained invalid key_id (kid)")
return self.respond(
{"detail": "Invalid key id"}, status=status.HTTP_400_BAD_REQUEST
)
decoded_claims = authenticate_asymmetric_jwt(token, key_id)
verify_claims(decoded_claims, request.path, request.GET, method="POST")
data = JiraIntegrationProvider().build_integration(state)
integration = ensure_integration(self.provider, data)
# Note: Unlike in all other Jira webhooks, we don't call `bind_org_context_from_integration`
# here, because at this point the integration hasn't yet been bound to an organization. The
# best we can do at this point is to record the integration's id.
sentry_sdk.set_tag("integration_id", integration.id)
# Sync integration metadata from Jira. This must be executed *after*
# the integration has been installed on Jira as the access tokens will
# not work until then.
transaction.on_commit(
lambda: sync_metadata.delay(integration_id=integration.id),
using=router.db_for_write(integration.__class__),
)
return self.respond()
| JiraSentryInstalledWebhook |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 3031,
"end": 23052
} | class ____:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_reshape_shape_arg(self):
arr = np.arange(12)
shape = (3, 4)
expected = arr.reshape(shape)
with pytest.raises(
TypeError,
match=r"reshape\(\) missing 1 required positional "
"argument: 'shape'"
):
np.reshape(arr)
assert_equal(np.reshape(arr, shape), expected)
assert_equal(np.reshape(arr, shape, order="C"), expected)
assert_equal(np.reshape(arr, shape, "C"), expected)
assert_equal(np.reshape(arr, shape=shape), expected)
assert_equal(np.reshape(arr, shape=shape, order="C"), expected)
def test_reshape_copy_arg(self):
arr = np.arange(24).reshape(2, 3, 4)
arr_f_ord = np.array(arr, order="F")
shape = (12, 2)
assert np.shares_memory(np.reshape(arr, shape), arr)
assert np.shares_memory(np.reshape(arr, shape, order="C"), arr)
assert np.shares_memory(
np.reshape(arr_f_ord, shape, order="F"), arr_f_ord)
assert np.shares_memory(np.reshape(arr, shape, copy=None), arr)
assert np.shares_memory(np.reshape(arr, shape, copy=False), arr)
assert np.shares_memory(arr.reshape(shape, copy=False), arr)
assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr)
assert not np.shares_memory(
np.reshape(arr, shape, order="C", copy=True), arr)
assert not np.shares_memory(
np.reshape(arr, shape, order="F", copy=True), arr)
assert not np.shares_memory(
np.reshape(arr, shape, order="F", copy=None), arr)
err_msg = "Unable to avoid creating a copy while reshaping."
with pytest.raises(ValueError, match=err_msg):
np.reshape(arr, shape, order="F", copy=False)
with pytest.raises(ValueError, match=err_msg):
np.reshape(arr_f_ord, shape, order="C", copy=False)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
s = np.float64(1.)
assert_(isinstance(s.round(), np.float64))
assert_equal(s.round(), 1.)
@pytest.mark.parametrize('dtype', [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
])
def test_dunder_round(self, dtype):
s = dtype(1)
assert_(isinstance(round(s), int))
assert_(isinstance(round(s, None), int))
assert_(isinstance(round(s, ndigits=None), int))
assert_equal(round(s), 1)
assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1)
@pytest.mark.parametrize('val, ndigits', [
pytest.param(2**31 - 1, -1,
marks=pytest.mark.skip(reason="Out of range of int32")
),
(2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))),
(2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
])
def test_dunder_round_edgecases(self, val, ndigits):
assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
def test_dunder_round_accuracy(self):
f = np.float64(5.1 * 10**73)
assert_(isinstance(round(f, -73), np.float64))
assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
assert_(isinstance(round(f, ndigits=-73), np.float64))
assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
i = np.int64(501)
assert_(isinstance(round(i, -2), np.int64))
assert_array_max_ulp(round(i, -2), 500)
assert_(isinstance(round(i, ndigits=-2), np.int64))
assert_array_max_ulp(round(i, ndigits=-2), 500)
@pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
def test_round_py_consistency(self):
f = 5.1 * 10**73
assert_equal(round(np.float64(f), -73), round(f, -73))
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
assert_(np.size(A, ()) == 1)
assert_(np.size(A, (0,)) == 2)
assert_(np.size(A, (1,)) == 3)
assert_(np.size(A, (0, 1)) == 6)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
pairs = [
(np.int32, np.int32), (np.int32, np.int64),
(np.int64, np.int32), (np.int64, np.int64)
]
for array_type, indices_type in pairs:
x = np.array([1, 2, 3, 4, 5], dtype=array_type)
ind = np.array([0, 2, 2, 3], dtype=indices_type)
tgt = np.array([1, 3, 3, 4], dtype=array_type)
out = np.take(x, ind)
assert_equal(out, tgt)
assert_equal(out.dtype, tgt.dtype)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
assert_equal(np.transpose(arr, (-1, -2)), tgt)
assert_equal(np.matrix_transpose(arr), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
def test_std_with_mean_keyword(self):
# Setting the seed to make the test reproducible
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
mean_out = np.zeros((10, 1, 5))
std_out = np.zeros((10, 1, 5))
mean = np.mean(A,
out=mean_out,
axis=1,
keepdims=True)
# The returned object should be the object specified during calling
assert mean_out is mean
std = np.std(A,
out=std_out,
axis=1,
keepdims=True,
mean=mean)
# The returned object should be the object specified during calling
assert std_out is std
# Shape of returned mean and std should be same
assert std.shape == mean.shape
assert std.shape == (10, 1, 5)
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=1, keepdims=True)
assert std_old.shape == mean.shape
assert_almost_equal(std, std_old)
def test_var_with_mean_keyword(self):
# Setting the seed to make the test reproducible
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
mean_out = np.zeros((10, 1, 5))
var_out = np.zeros((10, 1, 5))
mean = np.mean(A,
out=mean_out,
axis=1,
keepdims=True)
# The returned object should be the object specified during calling
assert mean_out is mean
var = np.var(A,
out=var_out,
axis=1,
keepdims=True,
mean=mean)
# The returned object should be the object specified during calling
assert var_out is var
# Shape of returned mean and var should be same
assert var.shape == mean.shape
assert var.shape == (10, 1, 5)
# Output should be the same as from the individual algorithms
var_old = np.var(A, axis=1, keepdims=True)
assert var_old.shape == mean.shape
assert_almost_equal(var, var_old)
def test_std_with_mean_keyword_keepdims_false(self):
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
mean = np.mean(A,
axis=1,
keepdims=True)
std = np.std(A,
axis=1,
keepdims=False,
mean=mean)
# Shape of returned mean and std should be same
assert std.shape == (10, 5)
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=1, keepdims=False)
mean_old = np.mean(A, axis=1, keepdims=False)
assert std_old.shape == mean_old.shape
assert_equal(std, std_old)
def test_var_with_mean_keyword_keepdims_false(self):
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
mean = np.mean(A,
axis=1,
keepdims=True)
var = np.var(A,
axis=1,
keepdims=False,
mean=mean)
# Shape of returned mean and var should be same
assert var.shape == (10, 5)
# Output should be the same as from the individual algorithms
var_old = np.var(A, axis=1, keepdims=False)
mean_old = np.mean(A, axis=1, keepdims=False)
assert var_old.shape == mean_old.shape
assert_equal(var, var_old)
def test_std_with_mean_keyword_where_nontrivial(self):
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
where = A > 0.5
mean = np.mean(A,
axis=1,
keepdims=True,
where=where)
std = np.std(A,
axis=1,
keepdims=False,
mean=mean,
where=where)
# Shape of returned mean and std should be same
assert std.shape == (10, 5)
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=1, where=where)
mean_old = np.mean(A, axis=1, where=where)
assert std_old.shape == mean_old.shape
assert_equal(std, std_old)
def test_var_with_mean_keyword_where_nontrivial(self):
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
where = A > 0.5
mean = np.mean(A,
axis=1,
keepdims=True,
where=where)
var = np.var(A,
axis=1,
keepdims=False,
mean=mean,
where=where)
# Shape of returned mean and var should be same
assert var.shape == (10, 5)
# Output should be the same as from the individual algorithms
var_old = np.var(A, axis=1, where=where)
mean_old = np.mean(A, axis=1, where=where)
assert var_old.shape == mean_old.shape
assert_equal(var, var_old)
def test_std_with_mean_keyword_multiple_axis(self):
# Setting the seed to make the test reproducible
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
axis = (0, 2)
mean = np.mean(A,
out=None,
axis=axis,
keepdims=True)
std = np.std(A,
out=None,
axis=axis,
keepdims=False,
mean=mean)
# Shape of returned mean and std should be same
assert std.shape == (20,)
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=axis, keepdims=False)
assert_almost_equal(std, std_old)
def test_std_with_mean_keyword_axis_None(self):
# Setting the seed to make the test reproducible
rng = np.random.RandomState(1234)
A = rng.randn(10, 20, 5) + 0.5
axis = None
mean = np.mean(A,
out=None,
axis=axis,
keepdims=True)
std = np.std(A,
out=None,
axis=axis,
keepdims=False,
mean=mean)
# Shape of returned mean and std should be same
assert std.shape == ()
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=axis, keepdims=False)
assert_almost_equal(std, std_old)
def test_std_with_mean_keyword_keepdims_true_masked(self):
A = ma.array([[2., 3., 4., 5.],
[1., 2., 3., 4.]],
mask=[[True, False, True, False],
[True, False, True, False]])
B = ma.array([[100., 3., 104., 5.],
[101., 2., 103., 4.]],
mask=[[True, False, True, False],
[True, False, True, False]])
mean_out = ma.array([[0., 0., 0., 0.]],
mask=[[False, False, False, False]])
std_out = ma.array([[0., 0., 0., 0.]],
mask=[[False, False, False, False]])
axis = 0
mean = np.mean(A, out=mean_out,
axis=axis, keepdims=True)
std = np.std(A, out=std_out,
axis=axis, keepdims=True,
mean=mean)
# Shape of returned mean and std should be same
assert std.shape == mean.shape
assert std.shape == (1, 4)
# Output should be the same as from the individual algorithms
std_old = np.std(A, axis=axis, keepdims=True)
mean_old = np.mean(A, axis=axis, keepdims=True)
assert std_old.shape == mean_old.shape
assert_almost_equal(std, std_old)
assert_almost_equal(mean, mean_old)
assert mean_out is mean
assert std_out is std
# masked elements should be ignored
mean_b = np.mean(B, axis=axis, keepdims=True)
std_b = np.std(B, axis=axis, keepdims=True, mean=mean_b)
assert_almost_equal(std, std_b)
assert_almost_equal(mean, mean_b)
def test_var_with_mean_keyword_keepdims_true_masked(self):
A = ma.array([[2., 3., 4., 5.],
[1., 2., 3., 4.]],
mask=[[True, False, True, False],
[True, False, True, False]])
B = ma.array([[100., 3., 104., 5.],
[101., 2., 103., 4.]],
mask=[[True, False, True, False],
[True, False, True, False]])
mean_out = ma.array([[0., 0., 0., 0.]],
mask=[[False, False, False, False]])
var_out = ma.array([[0., 0., 0., 0.]],
mask=[[False, False, False, False]])
axis = 0
mean = np.mean(A, out=mean_out,
axis=axis, keepdims=True)
var = np.var(A, out=var_out,
axis=axis, keepdims=True,
mean=mean)
# Shape of returned mean and var should be same
assert var.shape == mean.shape
assert var.shape == (1, 4)
# Output should be the same as from the individual algorithms
var_old = np.var(A, axis=axis, keepdims=True)
mean_old = np.mean(A, axis=axis, keepdims=True)
assert var_old.shape == mean_old.shape
assert_almost_equal(var, var_old)
assert_almost_equal(mean, mean_old)
assert mean_out is mean
assert var_out is var
# masked elements should be ignored
mean_b = np.mean(B, axis=axis, keepdims=True)
var_b = np.var(B, axis=axis, keepdims=True, mean=mean_b)
assert_almost_equal(var, var_b)
assert_almost_equal(mean, mean_b)
| TestNonarrayArgs |
python | pytorch__pytorch | torch/autograd/graph.py | {
"start": 12076,
"end": 16608
} | class ____(saved_tensors_hooks):
"""Context manager under which tensors saved by the forward pass will be stored on cpu, then retrieved for backward.
When performing operations within this context manager, intermediary
results saved in the graph during the forward pass will be moved to CPU,
then copied back to the original device when needed for the backward pass.
If the graph was already on CPU, no tensor copy is performed.
Use this context-manager to trade compute for GPU memory usage (e.g.
when your model doesn't fit in GPU memory during training).
Args:
pin_memory (bool): If ``True`` tensors will be saved to CPU pinned memory
during packing and copied to GPU asynchronously during unpacking.
Defaults to ``False``.
Also see :ref:`cuda-memory-pinning`.
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
>>> a = torch.randn(5, requires_grad=True, device="cuda")
>>> b = torch.randn(5, requires_grad=True, device="cuda")
>>> c = torch.randn(5, requires_grad=True, device="cuda")
>>>
>>> def f(a, b, c):
... prod_1 = a * b # a and b are saved on GPU
... with torch.autograd.graph.save_on_cpu():
... prod_2 = prod_1 * c # prod_1 and c are saved on CPU
... y = prod_2 * a # prod_2 and a are saved on GPU
... return y
>>>
>>> y = f(a, b, c)
>>> del a, b, c # for illustration only
>>> # the content of a, b, and prod_2 are still alive on GPU
>>> # the content of prod_1 and c only live on CPU
>>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward
>>> # all intermediary tensors are released (deleted) after the call to backward
"""
def __init__(self, pin_memory: bool = False, device_type: str = "cuda") -> None:
device_module = getattr(torch, device_type, torch.cuda)
def pack_to_cpu(tensor: torch.Tensor) -> tuple[torch.device, torch.Tensor]:
if not pin_memory:
return (tensor.device, tensor.cpu())
packed = torch.empty(
tensor.size(),
dtype=tensor.dtype,
layout=tensor.layout,
pin_memory=(device_module.is_available() and not tensor.is_sparse),
)
packed.copy_(tensor)
return (tensor.device, packed)
def unpack_from_cpu(packed: tuple[torch.device, torch.Tensor]) -> torch.Tensor:
device, tensor = packed
return tensor.to(device, non_blocking=pin_memory)
super().__init__(pack_to_cpu, unpack_from_cpu)
@contextlib.contextmanager
def disable_saved_tensors_hooks(error_message: str) -> Generator[None, None, None]:
"""Context-manager that disables the saved tensors default hooks feature.
Useful for if you are creating a feature that does not work with saved
tensors default hooks.
Args:
error_message (str): When saved tensors default hooks are used when they
have been are disabled, a RuntimeError with this
error message gets raised.
Example::
>>> # xdoctest: +SKIP(failing)
>>> message = "saved tensors default hooks are disabled"
>>> with torch.autograd.graph.disable_saved_tensors_hooks(message):
... # Raises RuntimeError: saved tensors default hooks are disabled
... with torch.autograd.graph.save_on_cpu():
... pass
"""
maybe_prev_message = None
try:
maybe_prev_message = (
torch._C._autograd._saved_tensors_hooks_get_disabled_error_message()
)
torch._C._autograd._saved_tensors_hooks_disable(error_message)
yield
finally:
# See NOTE: [disabled_error_message invariant]
if maybe_prev_message is None:
torch._C._autograd._saved_tensors_hooks_enable()
else:
torch._C._autograd._saved_tensors_hooks_disable(maybe_prev_message)
def set_warn_on_accumulate_grad_stream_mismatch(enabled: bool) -> None:
"""Whether to warn when the AccumulateGrad node's stream does not match the stream
of the node that produced the incoming gradient.
"""
return torch._C._set_warn_on_accumulate_grad_stream_mismatch(enabled)
| save_on_cpu |
python | pandas-dev__pandas | asv_bench/benchmarks/sparse.py | {
"start": 6083,
"end": 6817
} | class ____:
params = [True, False, np.nan]
param_names = ["fill_value"]
def setup(self, fill_value):
N = 1_000_000
d = 1e-5
arr = make_array(N, d, np.nan, np.float64)
self.sp_arr = SparseArray(arr)
b_arr = np.full(shape=N, fill_value=fill_value, dtype=np.bool_)
fv_inds = np.unique(
np.random.randint(low=0, high=N - 1, size=int(N * d), dtype=np.int32)
)
b_arr[fv_inds] = True if pd.isna(fill_value) else not fill_value
self.sp_b_arr = SparseArray(b_arr, dtype=np.bool_, fill_value=fill_value)
def time_mask(self, fill_value):
self.sp_arr[self.sp_b_arr]
from .pandas_vb_common import setup # noqa: F401 isort:skip
| GetItemMask |
python | automl__auto-sklearn | test/test_pipeline/components/regression/test_base.py | {
"start": 493,
"end": 15441
} | class ____(unittest.TestCase):
res = None
module = None
sk_module = None
# Hyperparameter which is increased by iterative_fit
step_hyperparameter = None
# Magic command to not run tests on base class
__test__ = False
def test_default_boston(self):
if self.__class__ == BaseRegressionComponentTest:
return
for _ in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, n_calls = _test_regressor(
dataset="boston", Regressor=self.module
)
score = sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)
# Special treatment for Gaussian Process Regression
if "default_boston_le_ge" in self.res:
upper, lower = self.res["default_boston_le_ge"]
assert lower <= score <= upper
else:
fixture = self.res["default_boston"]
places = self.res.get("default_boston_places", 7)
if score < -1e10:
score = np.log(-score)
fixture = np.log(-fixture)
self.assertAlmostEqual(fixture, score, places)
if "boston_n_calls" in self.res:
expected = self.res["boston_n_calls"]
if isinstance(expected, Container):
assert n_calls in expected
else:
assert n_calls == expected
def test_default_boston_iterative_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, regressor = _test_regressor_iterative_fit(
dataset="boston", Regressor=self.module
)
score = sklearn.metrics.r2_score(targets, predictions)
fixture = self.res["default_boston_iterative"]
places = self.res.get("default_boston_iterative_places", 7)
if score < -1e10:
print(f"score = {score}, fixture = {fixture}")
score = np.log(-score)
fixture = np.log(-fixture)
self.assertAlmostEqual(fixture, score, places)
if self.step_hyperparameter is not None:
param_name = self.step_hyperparameter["name"]
default = self.step_hyperparameter["value"]
value = getattr(regressor.estimator, param_name)
expected = self.res.get("boston_iterative_n_iter", default)
# To currently allow for MLPRegressor which is indeterministic,
# we can have multiple values
if isinstance(expected, Container):
assert value in expected
else:
assert value == expected
def test_default_boston_iterative_sparse_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor_iterative_fit(
dataset="boston", Regressor=self.module, sparse=True
)
self.assertAlmostEqual(
self.res["default_boston_iterative_sparse"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_boston_iterative_sparse_places", 7),
)
def test_default_boston_sparse(self):
if self.__class__ == BaseRegressionComponentTest:
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor(
dataset="boston", Regressor=self.module, sparse=True
)
self.assertAlmostEqual(
self.res["default_boston_sparse"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_boston_sparse_places", 7),
)
def test_default_diabetes(self):
if self.__class__ == BaseRegressionComponentTest:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, n_calls = _test_regressor(
dataset="diabetes", Regressor=self.module
)
self.assertAlmostEqual(
self.res["default_diabetes"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_diabetes_places", 7),
)
if self.res.get("diabetes_n_calls"):
self.assertEqual(self.res["diabetes_n_calls"], n_calls)
def test_default_diabetes_iterative_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor_iterative_fit(
dataset="diabetes", Regressor=self.module
)
self.assertAlmostEqual(
self.res["default_diabetes_iterative"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_diabetes_iterative_places", 7),
)
def test_default_diabetes_iterative_sparse_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, regressor = _test_regressor_iterative_fit(
dataset="diabetes", Regressor=self.module, sparse=True
)
self.assertAlmostEqual(
self.res["default_diabetes_iterative_sparse"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_diabetes_iterative_sparse_places", 7),
)
if self.step_hyperparameter is not None:
self.assertEqual(
getattr(regressor.estimator, self.step_hyperparameter["name"]),
self.res.get(
"diabetes_iterative_n_iter", self.step_hyperparameter["value"]
),
)
def test_default_diabetes_sparse(self):
if self.__class__ == BaseRegressionComponentTest:
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor(
dataset="diabetes", Regressor=self.module, sparse=True
)
self.assertAlmostEqual(
self.res["default_diabetes_sparse"],
sklearn.metrics.r2_score(targets, predictions),
places=self.res.get("default_diabetes_sparse_places", 7),
)
def test_module_idempotent(self):
"""Fitting twice with the same config gives the same model params.
This is only valid when the random_state passed is an int. If a
RandomState object is passed then repeated calls to fit will have
different results. See the section on "Controlling Randomness" in the
sklearn docs.
https://scikit-learn.org/0.24/common_pitfalls.html#controlling-randomness
"""
if self.__class__ == BaseRegressionComponentTest:
return
regressor_cls = self.module
X = np.array(
[
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
)
y = np.array(
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
)
# We ignore certain keys when comparing
param_keys_ignored = ["base_estimator"]
# We use the default config + sampled ones
configuration_space = regressor_cls.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
sampled = [configuration_space.sample_configuration() for _ in range(2)]
for seed, config in enumerate([default] + sampled):
model_args = {"random_state": seed, **config}
regressor = regressor_cls(**model_args)
# Get the parameters on the first and second fit with config params
# Also compare their random state
with ignore_warnings(regressor_warnings):
params_first = regressor.fit(X.copy(), y.copy()).estimator.get_params()
if hasattr(regressor.estimator, "random_state"):
rs_1 = regressor.random_state
rs_estimator_1 = regressor.estimator.random_state
with ignore_warnings(regressor_warnings):
params_second = regressor.fit(X.copy(), y.copy()).estimator.get_params()
if hasattr(regressor.estimator, "random_state"):
rs_2 = regressor.random_state
rs_estimator_2 = regressor.estimator.random_state
# Remove keys we don't wish to include in the comparison
for params in [params_first, params_second]:
for key in param_keys_ignored:
if key in params:
del params[key]
# They should have equal parameters
self.assertEqual(
params_first, params_second, f"Failed with model args {model_args}"
)
if hasattr(regressor.estimator, "random_state") and not isinstance(
regressor, LibSVM_SVR
):
# sklearn.svm.SVR has it as an attribute but does not use it and
# defaults it to None, even if a value is passed in
assert all(
[
seed == random_state
for random_state in [rs_1, rs_estimator_1, rs_2, rs_estimator_2]
]
)
@pytest.mark.parametrize("regressor", _regressors.values())
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([1] * 20)])
def test_fit_and_predict_with_1d_targets_as_1d(
regressor: Type[RegressorChoice], X: np.ndarray, y: np.ndarray
) -> None:
"""Test that all pipelines work with 1d target types
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 1d targets
Expects
-------
* Should be able to fit with 1d targets
* Should be able to predict with 1d targest
* Should have predictions with the same shape as y
"""
assert len(X) == len(y)
assert y.ndim == 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.shape == y.shape
@pytest.mark.parametrize("regressor", _regressors.values())
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([[1]] * 20)])
def test_fit_and_predict_with_1d_targets_as_2d(
regressor: Type[RegressorChoice], X: np.ndarray, y: np.ndarray
) -> None:
"""Test that all pipelines work with 1d target types when they are wrapped as 2d
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 1d targets wrapped as 2d
Expects
-------
* Should be able to fit with 1d targets wrapped in 2d
* Should be able to predict 1d targets wrapped in 2d
* Should return 1d predictions
* Should have predictions with the same length as the y
"""
assert len(X) == len(y)
assert y.ndim == 2 and y.shape[1] == 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.ndim == 1
assert len(predictions) == len(y)
@pytest.mark.parametrize(
"regressor",
[
regressor
for regressor in _regressors.values()
if regressor.get_properties()["handles_multilabel"]
],
)
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([[1, 1, 1]] * 20)])
def test_fit_and_predict_with_2d_targets(
regressor: Type[RegressorChoice], X: np.ndarray, y: np.ndarray
) -> None:
"""Test that all pipelines work with 2d target types
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 2d targets
Expects
-------
* Should be able to fit with 2d targets
* Should be able to predict with 2d targets
* Should have predictions with the same shape as y
"""
assert len(X) == len(y)
assert y.ndim == 2 and y.shape[1] > 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.shape == y.shape
| BaseRegressionComponentTest |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sqlite_datasource.py | {
"start": 4243,
"end": 4898
} | class ____(SqlQueryAsset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# update the partitioner map with the Sqlite specific partitioner
self._partitioner_implementation_map[PartitionerConvertedDatetime] = (
SqlitePartitionerConvertedDateTime
)
type: Literal["query"] = "query"
@override
def validate_batch_definition(self, partitioner: ColumnPartitioner) -> None:
# TODO: Implement batch definition validation.
# sqlite stores datetimes as a string so we must override how we normally
# validate batch definitions.
pass
@public_api
| SqliteQueryAsset |
python | numba__numba | numba/tests/test_mixed_tuple_unroller.py | {
"start": 58255,
"end": 62041
} | class ____(TestCase):
def test_literal_unroll_not_invoked(self):
@njit(pipeline_class=CapturingCompiler)
def foo():
acc = 0
for i in (1, 2, 3):
acc += i
return acc
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll])
def test_literal_unroll_is_invoked(self):
@njit(pipeline_class=CapturingCompiler)
def foo():
acc = 0
for i in literal_unroll((1, 2, 3)):
acc += i
return acc
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll])
def test_literal_unroll_is_invoked_via_alias(self):
alias = literal_unroll
@njit(pipeline_class=CapturingCompiler)
def foo():
acc = 0
for i in alias((1, 2, 3)):
acc += i
return acc
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll])
def test_literal_unroll_assess_empty_function(self):
@njit(pipeline_class=CapturingCompiler)
def foo():
pass
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll])
def test_literal_unroll_not_in_globals(self):
f = """def foo():\n\tpass"""
l = {}
exec(f, {}, l)
foo = njit(pipeline_class=CapturingCompiler)(l['foo'])
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll])
def test_literal_unroll_globals_and_locals(self):
f = """def foo():\n\tfor x in literal_unroll((1,)):\n\t\tpass"""
l = {}
exec(f, {}, l)
foo = njit(pipeline_class=CapturingCompiler)(l['foo'])
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn("Untyped global name 'literal_unroll'",
str(raises.exception))
# same as above but now add literal_unroll to globals
l = {}
exec(f, {'literal_unroll': literal_unroll}, l)
foo = njit(pipeline_class=CapturingCompiler)(l['foo'])
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll])
# same as above, but now with import
from textwrap import dedent
f = """
def gen():
from numba import literal_unroll
def foo():
for x in literal_unroll((1,)):
pass
return foo
bar = gen()
"""
l = {}
exec(dedent(f), {}, l)
foo = njit(pipeline_class=CapturingCompiler)(l['bar'])
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll])
# same as above, but now with import as something else
from textwrap import dedent
f = """
def gen():
from numba import literal_unroll as something_else
def foo():
for x in something_else((1,)):
pass
return foo
bar = gen()
"""
l = {}
exec(dedent(f), {}, l)
foo = njit(pipeline_class=CapturingCompiler)(l['bar'])
foo()
cres = foo.overloads[foo.signatures[0]]
self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll])
if __name__ == '__main__':
unittest.main()
| TestLiteralUnrollPassTriggering |
python | pypa__pipenv | pipenv/vendor/importlib_metadata/__init__.py | {
"start": 29968,
"end": 35867
} | class ____(Distribution):
def __init__(self, path: SimplePath) -> None:
"""Construct a distribution.
:param path: SimplePath indicating the metadata directory.
"""
self._path = path
def read_text(self, filename: str | os.PathLike[str]) -> Optional[str]:
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
return None
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path: str | os.PathLike[str]) -> SimplePath:
return self._path.parent / path
@property
def _normalized_name(self):
"""
Performance optimization: where possible, resolve the
normalized name from the file system path.
"""
stem = os.path.basename(str(self._path))
return (
pass_none(Prepared.normalize)(self._name_from_stem(stem))
or super()._normalized_name
)
@staticmethod
def _name_from_stem(stem):
"""
>>> PathDistribution._name_from_stem('foo-3.0.egg-info')
'foo'
>>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info')
'CherryPy'
>>> PathDistribution._name_from_stem('face.egg-info')
'face'
>>> PathDistribution._name_from_stem('foo.bar')
"""
filename, ext = os.path.splitext(stem)
if ext not in ('.dist-info', '.egg-info'):
return
name, sep, rest = filename.partition('-')
return name
def distribution(distribution_name: str) -> Distribution:
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs) -> Iterable[Distribution]:
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name: str) -> _meta.PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name: str) -> str:
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
_unique = functools.partial(
unique_everseen,
key=py39.normalized_name,
)
"""
Wrapper for ``distributions`` to return unique distributions by name.
"""
def entry_points(**params) -> EntryPoints:
"""Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
:return: EntryPoints for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in _unique(distributions())
)
return EntryPoints(eps).select(**params)
def files(distribution_name: str) -> Optional[List[PackagePath]]:
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name: str) -> Optional[List[str]]:
"""
Return a list of requirements for the named package.
:return: An iterable of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
def packages_distributions() -> Mapping[str, List[str]]:
"""
Return a mapping of top-level packages to their
distributions.
>>> import collections.abc
>>> pkgs = packages_distributions()
>>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
True
"""
pkg_to_dist = collections.defaultdict(list)
for dist in distributions():
for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
pkg_to_dist[pkg].append(dist.metadata['Name'])
return dict(pkg_to_dist)
def _top_level_declared(dist):
return (dist.read_text('top_level.txt') or '').split()
def _topmost(name: PackagePath) -> Optional[str]:
"""
Return the top-most parent as long as there is a parent.
"""
top, *rest = name.parts
return top if rest else None
def _get_toplevel_name(name: PackagePath) -> str:
"""
Infer a possibly importable module name from a name presumed on
sys.path.
>>> _get_toplevel_name(PackagePath('foo.py'))
'foo'
>>> _get_toplevel_name(PackagePath('foo'))
'foo'
>>> _get_toplevel_name(PackagePath('foo.pyc'))
'foo'
>>> _get_toplevel_name(PackagePath('foo/__init__.py'))
'foo'
>>> _get_toplevel_name(PackagePath('foo.pth'))
'foo.pth'
>>> _get_toplevel_name(PackagePath('foo.dist-info'))
'foo.dist-info'
"""
# Defer import of inspect for performance (python/cpython#118761)
import inspect
return _topmost(name) or inspect.getmodulename(name) or str(name)
def _top_level_inferred(dist):
opt_names = set(map(_get_toplevel_name, always_iterable(dist.files)))
def importable_name(name):
return '.' not in name
return filter(importable_name, opt_names)
| PathDistribution |
python | viewflow__viewflow | tests/test_middleware.py | {
"start": 203,
"end": 374
} | class ____(Viewset):
app_name = "nested"
page_path = path(
"page/", TemplateView.as_view(template_name="viewflow/base.html"), name="page"
)
| NestedViewset |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 131636,
"end": 132856
} | class ____(Response):
"""
Response of events.get_task_metrics endpoint.
:param metrics: List of task with their metrics
:type metrics: Sequence[dict]
"""
_service = "events"
_action = "get_task_metrics"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"metrics": {
"description": "List of task with their metrics",
"items": {"type": "object"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, metrics: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskMetricsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[List[dict]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
self.assert_isinstance(value, "metrics", (dict,), is_array=True)
self._property_metrics = value
| GetTaskMetricsResponse |
python | fsspec__filesystem_spec | fsspec/implementations/data.py | {
"start": 98,
"end": 1627
} | class ____(AbstractFileSystem):
"""A handy decoder for data-URLs
Example
-------
>>> with fsspec.open("data:,Hello%2C%20World%21") as f:
... print(f.read())
b"Hello, World!"
See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
"""
protocol = "data"
def __init__(self, **kwargs):
"""No parameters for this filesystem"""
super().__init__(**kwargs)
def cat_file(self, path, start=None, end=None, **kwargs):
pref, data = path.split(",", 1)
if pref.endswith("base64"):
return base64.b64decode(data)[start:end]
return unquote(data).encode()[start:end]
def info(self, path, **kwargs):
pref, name = path.split(",", 1)
data = self.cat_file(path)
mime = pref.split(":", 1)[1].split(";", 1)[0]
return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
if "r" not in mode:
raise ValueError("Read only filesystem")
return io.BytesIO(self.cat_file(path))
@staticmethod
def encode(data: bytes, mime: str | None = None):
"""Format the given data into data-URL syntax
This version always base64 encodes, even when the data is ascii/url-safe.
"""
return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}"
| DataFileSystem |
python | dateutil__dateutil | src/dateutil/tz/_factories.py | {
"start": 115,
"end": 426
} | class ____(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super(_TzSingleton, cls).__init__(*args, **kwargs)
def __call__(cls):
if cls.__instance is None:
cls.__instance = super(_TzSingleton, cls).__call__()
return cls.__instance
| _TzSingleton |
python | apache__airflow | airflow-core/src/airflow/task/priority_strategy.py | {
"start": 2850,
"end": 3134
} | class ____(PriorityWeightStrategy):
"""Priority weight strategy that uses the task's priority weight directly."""
def get_weight(self, ti: TaskInstance):
if TYPE_CHECKING:
assert ti.task
return ti.task.priority_weight
| _AbsolutePriorityWeightStrategy |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/tensor_format.py | {
"start": 1153,
"end": 20279
} | class ____(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
auxiliary_message=None,
include_numeric_summary=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
auxiliary_message: An auxiliary message to display under the tensor label,
dtype and shape information lines.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if isinstance(tensor, debug_data.InconvertibleTensorProto):
if lines:
lines.append("")
lines.extend(str(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape).replace("L", ""))
if lines:
lines.append("")
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
if auxiliary_message:
formatted.extend(auxiliary_message)
if include_numeric_summary:
formatted.append("Numeric summary:")
formatted.extend(numeric_summary(tensor))
formatted.append("")
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
if tensor.dtype.type is not np.bytes_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions)
else:
annotations = None
formatted_array = debugger_cli_common.RichTextLines(
array_lines, annotations=annotations)
formatted.extend(formatted_array)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i, raw_line in enumerate(array_lines):
line = raw_line.strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in range(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in range(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
def _pad_string_to_length(string, length):
return " " * (length - len(string)) + string
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complexfloating) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool_:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype])
| HighlightOptions |
python | huggingface__transformers | src/transformers/models/timesformer/configuration_timesformer.py | {
"start": 788,
"end": 5568
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`TimesformerModel`]. It is used to instantiate a
TimeSformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the TimeSformer
[facebook/timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_frames (`int`, *optional*, defaults to 8):
The number of frames in each video.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_type (`str`, *optional*, defaults to `"divided_space_time"`):
The attention type to use. Must be one of `"divided_space_time"`, `"space_only"`, `"joint_space_time"`.
drop_path_rate (`float`, *optional*, defaults to 0):
The dropout ratio for stochastic depth.
Example:
```python
>>> from transformers import TimesformerConfig, TimesformerModel
>>> # Initializing a TimeSformer timesformer-base style configuration
>>> configuration = TimesformerConfig()
>>> # Initializing a model from the configuration
>>> model = TimesformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "timesformer"
def __init__(
self,
image_size=224,
patch_size=16,
num_channels=3,
num_frames=8,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
qkv_bias=True,
attention_type="divided_space_time",
drop_path_rate=0,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_type = attention_type
self.drop_path_rate = drop_path_rate
__all__ = ["TimesformerConfig"]
| TimesformerConfig |
python | getsentry__sentry | src/sentry/notifications/notification_action/types.py | {
"start": 20071,
"end": 20781
} | class ____(Protocol):
"""Protocol for notification action forms since they have various inheritance layers and but all have the same __init__ signature"""
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def is_valid(self) -> bool: ...
@property
def cleaned_data(self) -> dict[str, Any]: ...
@property
def errors(self) -> dict[str, Any]: ...
def _get_integrations(organization: Organization, provider: str) -> list[RpcIntegration]:
return integration_service.get_integrations(
organization_id=organization.id,
status=ObjectStatus.ACTIVE,
org_integration_status=ObjectStatus.ACTIVE,
providers=[provider],
)
| NotificationActionForm |
python | streamlit__streamlit | lib/tests/streamlit/watcher/util_test.py | {
"start": 777,
"end": 2650
} | class ____(unittest.TestCase):
def test_md5_calculation_succeeds_with_bytes_input(self):
with patch("streamlit.watcher.util.open", mock_open(read_data=b"hello")):
md5 = util.calc_md5_with_blocking_retries("foo")
assert md5 == "5d41402abc4b2a76b9719d911017c592"
@patch("os.path.isdir", MagicMock(return_value=True))
@patch("streamlit.watcher.util._stable_dir_identifier")
def test_md5_calculation_succeeds_with_dir_input(self, mock_stable_dir_identifier):
mock_stable_dir_identifier.return_value = "hello"
md5 = util.calc_md5_with_blocking_retries("foo")
assert md5 == "5d41402abc4b2a76b9719d911017c592"
mock_stable_dir_identifier.assert_called_once_with("foo", "*")
@patch("os.path.isdir", MagicMock(return_value=True))
@patch("streamlit.watcher.util._stable_dir_identifier")
def test_md5_calculation_can_pass_glob(self, mock_stable_dir_identifier):
mock_stable_dir_identifier.return_value = "hello"
util.calc_md5_with_blocking_retries("foo", glob_pattern="*.py")
mock_stable_dir_identifier.assert_called_once_with("foo", "*.py")
@patch("os.path.exists", MagicMock(return_value=False))
def test_md5_calculation_allow_nonexistent(self):
md5 = util.calc_md5_with_blocking_retries("hello", allow_nonexistent=True)
assert md5 == "5d41402abc4b2a76b9719d911017c592"
def test_md5_calculation_opens_file_with_rb(self):
# This tests implementation :( . But since the issue this is addressing
# could easily come back to bite us if a distracted coder tweaks the
# implementation, I'm putting this here anyway.
with patch("streamlit.watcher.util.open", mock_open(read_data=b"hello")) as m:
util.calc_md5_with_blocking_retries("foo")
m.assert_called_once_with("foo", "rb")
| UtilTest |
python | buildout__buildout | src/zc/buildout/testing.py | {
"start": 12213,
"end": 12528
} | class ____(HTTPServer):
def __init__(self, tree, *args):
HTTPServer.__init__(self, *args)
self.tree = os.path.abspath(tree)
__run = True
def serve_forever(self):
while self.__run:
self.handle_request()
def handle_error(self, *_):
self.__run = False
| Server |
python | numba__numba | numba/cuda/stubs.py | {
"start": 4105,
"end": 4267
} | class ____(Stub):
'''
syncwarp(mask=0xFFFFFFFF)
Synchronizes a masked subset of threads in a warp.
'''
_description_ = '<warp_sync()>'
| syncwarp |
python | huggingface__transformers | src/transformers/models/starcoder2/modular_starcoder2.py | {
"start": 5630,
"end": 6102
} | class ____(MistralDecoderLayer):
def __init__(self, config: Starcoder2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
self.mlp = Starcoder2MLP(config)
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
| Starcoder2DecoderLayer |
python | ray-project__ray | python/ray/serve/tests/unit/test_application_state.py | {
"start": 1563,
"end": 1874
} | class ____:
def __init__(self):
self.endpoints = dict()
def update_endpoint(self, endpoint, endpoint_info):
self.endpoints[endpoint] = endpoint_info
def delete_endpoint(self, endpoint):
if endpoint in self.endpoints:
del self.endpoints[endpoint]
| MockEndpointState |
python | gevent__gevent | src/gevent/tests/test__signal.py | {
"start": 102,
"end": 285
} | class ____(Exception):
pass
def raise_Expected():
raise Expected('TestSignal')
@greentest.skipUnless(hasattr(signal, 'SIGALRM'),
"Uses SIGALRM")
| Expected |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 193086,
"end": 194033
} | class ____(Operation):
def __init__(self, axis1, axis2, *, name=None):
super().__init__(name=name)
self.axis1 = axis1
self.axis2 = axis2
def call(self, x):
return backend.numpy.swapaxes(x, self.axis1, self.axis2)
def compute_output_spec(self, x):
x_shape = list(x.shape)
tmp = x_shape[self.axis1]
x_shape[self.axis1] = x_shape[self.axis2]
x_shape[self.axis2] = tmp
return KerasTensor(x_shape, dtype=x.dtype)
@keras_export(["keras.ops.swapaxes", "keras.ops.numpy.swapaxes"])
def swapaxes(x, axis1, axis2):
"""Interchange two axes of a tensor.
Args:
x: Input tensor.
axis1: First axis.
axis2: Second axis.
Returns:
A tensor with the axes swapped.
"""
if any_symbolic_tensors((x,)):
return Swapaxes(axis1, axis2).symbolic_call(x)
return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2)
| Swapaxes |
python | ansible__ansible | lib/ansible/galaxy/collection/gpg.py | {
"start": 4498,
"end": 4667
} | class ____(GpgBaseError):
"""The signature with the keyid has not been verified okay."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
| GpgBadSig |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchvision_models.py | {
"start": 16348,
"end": 21856
} | class ____(nn.Module):
"""
Demo DETR implementation.
Demo implementation of DETR in minimal number of lines, with the
following differences wrt DETR in the paper:
* learned positional encoding (instead of sine)
* positional encoding is passed at input (instead of attention)
* fc bbox predictor (instead of MLP)
The model achieves ~40 AP on COCO val5k and runs at ~28 FPS on Tesla V100.
Only batch size 1 supported.
"""
def __init__(
self,
num_classes,
hidden_dim=256,
nheads=8,
num_encoder_layers=6,
num_decoder_layers=6,
):
super().__init__()
# create ResNet-50 backbone
self.backbone = resnet50()
del self.backbone.fc
# create conversion layer
self.conv = nn.Conv2d(2048, hidden_dim, 1)
# create a default PyTorch transformer
self.transformer = nn.Transformer(
hidden_dim, nheads, num_encoder_layers, num_decoder_layers
)
# prediction heads, one extra class for predicting non-empty slots
# note that in baseline DETR linear_bbox layer is 3-layer MLP
self.linear_class = nn.Linear(hidden_dim, num_classes + 1)
self.linear_bbox = nn.Linear(hidden_dim, 4)
# output positional encodings (object queries)
self.query_pos = nn.Parameter(torch.rand(100, hidden_dim))
# spatial positional encodings
# note that in baseline DETR we use sine positional encodings
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
self.col_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
def forward(self, inputs):
# propagate inputs through ResNet-50 up to avg-pool layer
x = self.backbone.conv1(inputs)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
# convert from 2048 to 256 feature planes for the transformer
h = self.conv(x)
# construct positional encodings
H, W = h.shape[-2:]
pos = (
torch.cat(
[
self.col_embed[:W].unsqueeze(0).repeat(H, 1, 1),
self.row_embed[:H].unsqueeze(1).repeat(1, W, 1),
],
dim=-1,
)
.flatten(0, 1)
.unsqueeze(1)
)
# propagate through the transformer
# TODO (alband) Why this is not automatically broadcasted? (had to add the repeat)
f = pos + 0.1 * h.flatten(2).permute(2, 0, 1)
s = self.query_pos.unsqueeze(1)
s = s.expand(s.size(0), inputs.size(0), s.size(2))
h = self.transformer(f, s).transpose(0, 1)
# finally project transformer outputs to class labels and bounding boxes
return {
"pred_logits": self.linear_class(h),
"pred_boxes": self.linear_bbox(h).sigmoid(),
}
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def is_dist_avail_and_initialized():
return False
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| DETR |
python | openai__openai-python | src/openai/types/vector_store_deleted.py | {
"start": 194,
"end": 307
} | class ____(BaseModel):
id: str
deleted: bool
object: Literal["vector_store.deleted"]
| VectorStoreDeleted |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/pipes/clients/lambda_.py | {
"start": 758,
"end": 4270
} | class ____(PipesClient, TreatAsResourceParam):
"""A pipes client for invoking AWS lambda.
By default context is injected via the lambda input event and messages are parsed out of the
4k tail of logs.
Args:
client (boto3.client): The boto lambda client used to call invoke.
context_injector (Optional[PipesContextInjector]): A context injector to use to inject
context into the lambda function. Defaults to :py:class:`PipesLambdaEventContextInjector`.
message_reader (Optional[PipesMessageReader]): A message reader to use to read messages
from the lambda function. Defaults to :py:class:`PipesLambdaLogsMessageReader`.
"""
def __init__(
self,
client: Optional[boto3.client] = None, # pyright: ignore (reportGeneralTypeIssues)
context_injector: Optional[PipesContextInjector] = None,
message_reader: Optional[PipesMessageReader] = None,
):
self._client = client or boto3.client("lambda")
self._message_reader = message_reader or PipesLambdaLogsMessageReader()
self._context_injector = context_injector or PipesLambdaEventContextInjector()
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@public
def run( # pyright: ignore[reportIncompatibleMethodOverride]
self,
*,
function_name: str,
event: Mapping[str, Any],
context: Union[OpExecutionContext, AssetExecutionContext],
) -> PipesClientCompletedInvocation:
"""Synchronously invoke a lambda function, enriched with the pipes protocol.
Args:
function_name (str): The name of the function to use.
event (Mapping[str, Any]): A JSON serializable object to pass as input to the lambda.
context (Union[OpExecutionContext, AssetExecutionContext]): The context of the currently executing Dagster op or asset.
Returns:
PipesClientCompletedInvocation: Wrapper containing results reported by the external
process.
"""
with open_pipes_session(
context=context,
message_reader=self._message_reader,
context_injector=self._context_injector,
) as session:
other_kwargs = {}
if isinstance(self._message_reader, PipesLambdaLogsMessageReader):
other_kwargs["LogType"] = "Tail"
if isinstance(self._context_injector, PipesLambdaEventContextInjector):
payload_data = {
**event,
**session.get_bootstrap_env_vars(),
}
else:
payload_data = event
response = self._client.invoke(
FunctionName=function_name,
InvocationType="RequestResponse",
Payload=json.dumps(payload_data),
**other_kwargs,
)
if isinstance(self._message_reader, PipesLambdaLogsMessageReader):
self._message_reader.consume_lambda_logs(response)
if "FunctionError" in response:
err_payload = json.loads(response["Payload"].read().decode("utf-8"))
raise Exception(
f"Lambda Function Error ({response['FunctionError']}):\n{json.dumps(err_payload, indent=2)}"
)
# should probably have a way to return the lambda result payload
return PipesClientCompletedInvocation(session)
| PipesLambdaClient |
python | google__pytype | pytype/abstract/mixin.py | {
"start": 8640,
"end": 10394
} | class ____(metaclass=MixinMeta):
"""Use lazy loading for the attributes of the represented value.
A class that mixes in LazyMembers must:
* pass init_mixin a dict of the raw attribute values. This will be stored as
the `_member_map` attribute.
* Define a `members` attribute to be a name->attribute dictionary.
* Implement a `_convert_member` method that processes a raw attribute into
an abstract value to store in `members`.
When accessing an attribute on a lazy value, the caller must first call
`load_lazy_attribute(name)` to ensure the attribute is loaded. Calling
`_convert_member` directly should be avoided! Doing so will create multiple
copies of the same attribute, leading to subtle bugs.
"""
members: dict[str, cfg.Variable]
def init_mixin(self, member_map: dict[str, cfg.Variable]) -> None:
self._member_map = member_map
def _convert_member(self, name: str, member, subst=None) -> cfg.Variable:
raise NotImplementedError()
def load_lazy_attribute(self, name, subst=None, store=True):
"""Load the named attribute into self.members."""
if name in self.members or name not in self._member_map:
return self.members.get(name)
member = self._member_map[name]
variable = self._convert_member(name, member, subst)
assert isinstance(variable, cfg.Variable)
# 'subst' can vary between attribute accesses, so it's not safe to store the
# attribute value in 'members' if it uses any of the subst keys.
if store and not (
isinstance(member, pytd.Node)
and subst
and any(
t.full_name in subst for t in pytd_utils.GetTypeParameters(member)
)
):
self.members[name] = variable
return variable
| LazyMembers |
python | huggingface__transformers | src/transformers/models/udop/modeling_udop.py | {
"start": 33198,
"end": 37059
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(
UdopLayerSelfAttention(
config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx
)
)
if self.is_decoder:
self.layer.append(UdopLayerCrossAttention(config, layer_idx=layer_idx))
self.layer.append(UdopLayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
return_dict=True,
cache_position=None,
):
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = self_attention_outputs[0]
attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
past_key_values=past_key_values,
query_length=cache_position[-1] + 1,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[1:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
return (
outputs + attention_outputs
) # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
| UdopBlock |
python | pennersr__django-allauth | allauth/headless/socialaccount/inputs.py | {
"start": 1503,
"end": 4727
} | class ____(inputs.Input):
provider = inputs.CharField()
process = inputs.ChoiceField(
choices=[
(AuthProcess.LOGIN, AuthProcess.LOGIN),
(AuthProcess.CONNECT, AuthProcess.CONNECT),
]
)
token = inputs.Field()
def clean(self):
cleaned_data = super().clean()
token = self.data.get("token")
adapter = get_adapter()
if not isinstance(token, dict):
self.add_error("token", adapter.validation_error("invalid_token"))
token = None
provider_id = cleaned_data.get("provider")
provider = None
if provider_id and token:
provider_class = registry.get_class(provider_id)
# If `provider_id` is a sub provider ID we won't find it by class.
client_id_required = provider_class is None or provider_class.uses_apps
client_id = token.get("client_id")
if client_id_required and not isinstance(client_id, str):
self.add_error("token", adapter.validation_error("client_id_required"))
else:
try:
provider = get_socialaccount_adapter().get_provider(
context.request, provider_id, client_id=client_id
)
except SocialApp.DoesNotExist:
self.add_error("token", adapter.validation_error("invalid_token"))
else:
if not provider.supports_token_authentication:
self.add_error(
"provider",
adapter.validation_error(
"token_authentication_not_supported"
),
)
elif (
provider.uses_apps
and client_id
and provider.app.client_id != client_id
):
self.add_error(
"token", adapter.validation_error("client_id_mismatch")
)
else:
id_token = token.get("id_token")
access_token = token.get("access_token")
if (
(id_token is not None and not isinstance(id_token, str))
or (
access_token is not None
and not isinstance(access_token, str)
)
or (not id_token and not access_token)
):
self.add_error(
"token", adapter.validation_error("token_required")
)
if not self.errors:
cleaned_data["provider"] = provider
try:
login = provider.verify_token(context.request, token)
login.state["process"] = cleaned_data["process"]
cleaned_data["sociallogin"] = login
except ValidationError as e:
self.add_error("token", e)
return cleaned_data
| ProviderTokenInput |
python | Pylons__pyramid | tests/test_session.py | {
"start": 17963,
"end": 19396
} | class ____(unittest.TestCase):
def _makeOne(self, wrapped):
from pyramid.session import manage_accessed
return manage_accessed(wrapped)
def test_accessed_set(self):
request = testing.DummyRequest()
session = DummySessionFactory(request)
session.renewed = 0
wrapper = self._makeOne(session.__class__.get)
wrapper(session, 'a')
self.assertNotEqual(session.accessed, None)
self.assertTrue(session._dirty)
def test_accessed_without_renew(self):
import time
request = testing.DummyRequest()
session = DummySessionFactory(request)
session._reissue_time = 5
session.renewed = time.time()
wrapper = self._makeOne(session.__class__.get)
wrapper(session, 'a')
self.assertNotEqual(session.accessed, None)
self.assertFalse(session._dirty)
def test_already_dirty(self):
request = testing.DummyRequest()
session = DummySessionFactory(request)
session.renewed = 0
session._dirty = True
session['a'] = 1
wrapper = self._makeOne(session.__class__.get)
self.assertEqual(wrapper.__doc__, session.get.__doc__)
result = wrapper(session, 'a')
self.assertEqual(result, 1)
callbacks = request.response_callbacks
if callbacks is not None:
self.assertEqual(len(callbacks), 0)
| Test_manage_accessed |
python | huggingface__transformers | src/transformers/modeling_gguf_pytorch_utils.py | {
"start": 1939,
"end": 3270
} | class ____(TensorProcessor):
def __init__(self, config=None):
super().__init__(config=config)
def process(self, weights, name, **kwargs):
if ".attn_k." in name or ".attn_q." in name:
num_heads = self.config.get("num_attention_heads")
num_kv_heads = self.config.get("num_key_value_heads")
if None in (num_heads, num_kv_heads):
return GGUFTensor(weights, name, {})
if ".attn_q." in name:
weights = self._reverse_permute_weights(weights, num_heads, num_heads)
elif ".attn_k." in name:
weights = self._reverse_permute_weights(weights, num_heads, num_kv_heads)
return GGUFTensor(weights, name, {})
def _reverse_permute_weights(
self, weights: np.ndarray, n_head: int, num_kv_heads: Optional[int] = None
) -> np.ndarray:
# Original permutation implementation
# https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L1402-L1408
if num_kv_heads is not None and n_head != num_kv_heads:
n_head = num_kv_heads
dim = weights.shape[0] // n_head // 2
w = weights.reshape(n_head, dim, 2, *weights.shape[1:])
return w.swapaxes(2, 1).reshape(weights.shape)
| LlamaTensorProcessor |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 12911,
"end": 14280
} | class ____(ContextWrappingVariable):
"""represents torch.autograd.forward_ad._set_fwd_grad_enabled() to enable/disable fwd grad"""
@staticmethod
def create(
tx: "InstructionTranslator", target_values: Any, **kwargs: Any
) -> "SetFwdGradEnabledContextManager":
return SetFwdGradEnabledContextManager(
target_values=target_values,
initial_values=None,
**kwargs,
)
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
[mode] = self.target_values
self.prev_state = torch._C._is_fwd_grad_enabled()
torch._C._set_fwd_grad_enabled(mode)
self.set_cleanup_hook(
tx,
lambda: torch._C._set_fwd_grad_enabled(self.prev_state),
)
self.proxy = tx.output.create_node(
"call_function",
torch._C._set_fwd_grad_enabled,
(mode,),
{},
)
return variables.ConstantVariable.create(None)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup()
tx.output.create_node(
"call_function",
torch._C._set_fwd_grad_enabled,
(self.prev_state,),
{},
)
return variables.ConstantVariable.create(None)
| SetFwdGradEnabledContextManager |
python | scikit-learn__scikit-learn | examples/cluster/plot_inductive_clustering.py | {
"start": 1765,
"end": 3953
} | class ____(BaseEstimator):
def __init__(self, clusterer, classifier):
self.clusterer = clusterer
self.classifier = classifier
def fit(self, X, y=None):
self.clusterer_ = clone(self.clusterer)
self.classifier_ = clone(self.classifier)
y = self.clusterer_.fit_predict(X)
self.classifier_.fit(X, y)
return self
@available_if(_classifier_has("predict"))
def predict(self, X):
check_is_fitted(self)
return self.classifier_.predict(X)
@available_if(_classifier_has("decision_function"))
def decision_function(self, X):
check_is_fitted(self)
return self.classifier_.decision_function(X)
def plot_scatter(X, color, alpha=0.5):
return plt.scatter(X[:, 0], X[:, 1], c=color, alpha=alpha, edgecolor="k")
# Generate some training data from clustering
X, y = make_blobs(
n_samples=N_SAMPLES,
cluster_std=[1.0, 1.0, 0.5],
centers=[(-5, -5), (0, 0), (5, 5)],
random_state=RANDOM_STATE,
)
# Train a clustering algorithm on the training data and get the cluster labels
clusterer = AgglomerativeClustering(n_clusters=3)
cluster_labels = clusterer.fit_predict(X)
plt.figure(figsize=(12, 4))
plt.subplot(131)
plot_scatter(X, cluster_labels)
plt.title("Ward Linkage")
# Generate new samples and plot them along with the original dataset
X_new, y_new = make_blobs(
n_samples=10, centers=[(-7, -1), (-2, 4), (3, 6)], random_state=RANDOM_STATE
)
plt.subplot(132)
plot_scatter(X, cluster_labels)
plot_scatter(X_new, "black", 1)
plt.title("Unknown instances")
# Declare the inductive learning model that it will be used to
# predict cluster membership for unknown instances
classifier = RandomForestClassifier(random_state=RANDOM_STATE)
inductive_learner = InductiveClusterer(clusterer, classifier).fit(X)
probable_clusters = inductive_learner.predict(X_new)
ax = plt.subplot(133)
plot_scatter(X, cluster_labels)
plot_scatter(X_new, probable_clusters)
# Plotting decision regions
DecisionBoundaryDisplay.from_estimator(
inductive_learner, X, response_method="predict", alpha=0.4, ax=ax
)
plt.title("Classify unknown instances")
plt.show()
| InductiveClusterer |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 14698,
"end": 15064
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
node: ir.Conditional
def codegen(self, code: IndentedBuffer) -> None:
raise NotImplementedError("Only supports FX codegen")
@staticmethod
def codegen_fx(converter: FxConverter) -> FxConversionFunc:
return converter._generate_conditional
@dataclasses.dataclass
| ConditionalLine |
python | walkccc__LeetCode | solutions/3410. Maximize Subarray Sum After Removing All Occurrences of One Element/3410-2.py | {
"start": 0,
"end": 873
} | class ____:
def maxSubarraySum(self, nums: list[int]) -> int:
ans = max(nums)
prefix = 0
minPrefix = 0
# the minimum prefix sum that can have a negative number removed
modifiedMinPrefix = 0
count = collections.Counter()
# minPrefixPlusRemoval[num] := the minimum prefix sum plus removed `num`
minPrefixPlusRemoval = {}
for num in nums:
prefix += num
ans = max(ans, prefix - modifiedMinPrefix)
if num < 0:
count[num] += 1
minPrefixPlusRemoval[num] = (
min(minPrefixPlusRemoval.get(num, 0), minPrefix) + num)
modifiedMinPrefix = min(modifiedMinPrefix,
count[num] * num,
minPrefixPlusRemoval[num])
minPrefix = min(minPrefix, prefix)
modifiedMinPrefix = min(modifiedMinPrefix, minPrefix)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 28913,
"end": 30910
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: GroupViTConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = GroupViTAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = GroupViTMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
@auto_docstring
| GroupViTEncoderLayer |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 30514,
"end": 31026
} | class ____(Structure):
_fields_ = (
("module_name", p_uint32),
("iextdefsym", p_uint32),
("nextdefsym", p_uint32),
("irefsym", p_uint32),
("nrefsym", p_uint32),
("ilocalsym", p_uint32),
("nlocalsym", p_uint32),
("iextrel", p_uint32),
("nextrel", p_uint32),
("iinit_iterm", p_uint32),
("ninit_nterm", p_uint32),
("objc_module_info_size", p_uint32),
("objc_module_info_addr", p_uint64),
)
| dylib_module_64 |
python | conda__conda | conda/models/records.py | {
"start": 18809,
"end": 20958
} | class ____(PackageRecord):
"""Representation of a package that has been downloaded or unpacked in the local package cache.
Specialization of :class:`PackageRecord` that adds information for packages that exist in the
local package cache, either as the downloaded package file, or unpacked in its own package dir,
or both.
Note that this class does not add new fields to the :attr:`PackageRecord._pkey` so that a pure
:class:`PackageRecord` object that has the same ``_pkey`` fields as a different
:class:`PackageCacheRecord` object (or, indeed, a :class:`PrefixRecord` object) will be considered
equal and will produce the same hash.
"""
#: str: Full path to the local package file.
package_tarball_full_path = StringField()
#: str: Full path to the local extracted package.
extracted_package_dir = StringField()
#: str: The md5 checksum of the package.
#:
#: If the package file exists locally, this class can calculate a missing checksum on-the-fly.
md5 = Md5Field()
@property
def is_fetched(self):
"""bool: Whether the package file exists locally."""
from ..gateways.disk.read import isfile
return isfile(self.package_tarball_full_path)
@property
def is_extracted(self):
"""bool: Whether the package has been extracted locally."""
from ..gateways.disk.read import isdir, isfile
epd = self.extracted_package_dir
return isdir(epd) and isfile(join(epd, "info", "index.json"))
@property
def tarball_basename(self):
"""str: The basename of the local package file."""
return basename(self.package_tarball_full_path)
def _calculate_md5sum(self):
memoized_md5 = getattr(self, "_memoized_md5", None)
if memoized_md5:
return memoized_md5
from os.path import isfile
if isfile(self.package_tarball_full_path):
from ..gateways.disk.read import compute_sum
md5sum = compute_sum(self.package_tarball_full_path, "md5")
setattr(self, "_memoized_md5", md5sum)
return md5sum
| PackageCacheRecord |
python | pytorch__pytorch | test/dynamo/test_dicts.py | {
"start": 814,
"end": 856
} | class ____(UserDict):
pass
| DummyUserDict |
python | wandb__wandb | wandb/integration/openai/resolver.py | {
"start": 466,
"end": 654
} | class ____:
usage: UsageMetrics = None
stats: wandb.Table = None
trace: trace_tree.WBTraceTree = None
usage_metric_keys = {f"usage/{k}" for k in asdict(UsageMetrics())}
| Metrics |
python | kamyu104__LeetCode-Solutions | Python/maximal-score-after-applying-k-operations.py | {
"start": 731,
"end": 1230
} | class ____(object):
def maxKelements(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
result = 0
for i, x in enumerate(nums):
nums[i] = -x
heapq.heapify(nums)
for _ in xrange(k):
x = -heapq.heappop(nums)
result += x
heapq.heappush(nums, -ceil_divide(x, 3))
return result
| Solution2 |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_function_base.py | {
"start": 82413,
"end": 85756
} | class ____(TestCase):
def test_simple(self):
# Condition is single bool list
x = piecewise([0, 0], [True, False], [1])
assert_array_equal(x, [1, 0])
# List of conditions: single bool list
x = piecewise([0, 0], [[True, False]], [1])
assert_array_equal(x, [1, 0])
# Conditions is single bool array
x = piecewise([0, 0], np.array([True, False]), [1])
assert_array_equal(x, [1, 0])
# Condition is single int array
x = piecewise([0, 0], np.array([1, 0]), [1])
assert_array_equal(x, [1, 0])
# List of conditions: int array
x = piecewise([0, 0], [np.array([1, 0])], [1])
assert_array_equal(x, [1, 0])
x = piecewise([0, 0], [[False, True]], [lambda x: -1])
assert_array_equal(x, [0, -1])
assert_raises_regex(
ValueError,
"1 or 2 functions are expected",
piecewise,
[0, 0],
[[False, True]],
[],
)
assert_raises_regex(
ValueError,
"1 or 2 functions are expected",
piecewise,
[0, 0],
[[False, True]],
[1, 2, 3],
)
def test_two_conditions(self):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
def test_scalar_domains_three_conditions(self):
x = piecewise(3, [True, False, False], [4, 2, 0])
assert_equal(x, 4)
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
assert_array_equal(x, [2, 0])
# Should set x[1] to 3
x = piecewise([1, 2], [True, False], [2, 3])
assert_array_equal(x, [2, 3])
def test_0d(self):
x = np.array(3)
y = piecewise(x, x > 3, [4, 0])
assert_(y.ndim == 0)
assert_(y == 0)
x = 5
y = piecewise(x, [True, False], [1, 0])
assert_(y.ndim == 0)
assert_(y == 1)
# With 3 ranges (It was failing, before)
y = piecewise(x, [False, False, True], [1, 2, 3])
assert_array_equal(y, 3)
def test_0d_comparison(self):
x = 3
y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
assert_equal(y, 4)
# With 3 ranges (It was failing, before)
x = 4
y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
assert_array_equal(y, 2)
assert_raises_regex(
ValueError,
"2 or 3 functions are expected",
piecewise,
x,
[x <= 3, x > 3],
[1],
)
assert_raises_regex(
ValueError,
"2 or 3 functions are expected",
piecewise,
x,
[x <= 3, x > 3],
[1, 1, 1, 1],
)
def test_0d_0d_condition(self):
x = np.array(3)
c = np.array(x > 3)
y = piecewise(x, [c], [1, 2])
assert_equal(y, 2)
def test_multidimensional_extrafunc(self):
x = np.array([[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]])
y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
assert_array_equal(y, np.array([[-1.0, -1.0, -1.0], [3.0, 3.0, 1.0]]))
@instantiate_parametrized_tests
| TestPiecewise |
python | PrefectHQ__prefect | src/prefect/_experimental/sla/client.py | {
"start": 1936,
"end": 3611
} | class ____(BaseAsyncClient):
async def apply_slas_for_deployment(
self, deployment_id: "UUID", slas: "list[SlaTypes]"
) -> "UUID":
"""
Applies service level agreements for a deployment. Performs matching by SLA name. If a SLA with the same name already exists, it will be updated. If a SLA with the same name does not exist, it will be created. Existing SLAs that are not in the list will be deleted.
Args:
deployment_id: The ID of the deployment to update SLAs for
slas: List of SLAs to associate with the deployment
Raises:
httpx.RequestError: if the SLAs were not updated for any reason
Returns:
SlaMergeResponse: The response from the backend, containing the names of the created, updated, and deleted SLAs
"""
resource_id = f"prefect.deployment.{deployment_id}"
for sla in slas:
sla.set_deployment_id(deployment_id)
slas_spec_list = [
sla.model_dump(mode="json", exclude_unset=True) for sla in slas
]
response = await self.request(
"POST",
f"/slas/apply-resource-slas/{resource_id}",
json=slas_spec_list,
)
response.raise_for_status()
response_json = response.json()
from prefect._experimental.sla.objects import SlaMergeResponse
return SlaMergeResponse(
created=[sla.get("name") for sla in response_json.get("created")],
updated=[sla.get("name") for sla in response_json.get("updated")],
deleted=[sla.get("name") for sla in response_json.get("deleted")],
)
| SlaAsyncClient |
python | has2k1__plotnine | tests/test_geom_smooth.py | {
"start": 3551,
"end": 6025
} | class ____:
p = ggplot(linear_data, aes("x")) + geom_point(aes(y="y_noisy"))
def test_wls(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="wls")
p.draw_test()
def test_rlm(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="rlm")
with pytest.warns(PlotnineWarning):
p.draw_test()
def test_glm(self):
p = self.p + geom_smooth(
aes(y="y_noisy"), method="glm", method_args={"family": "gaussian"}
)
p.draw_test()
def test_gls(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="gls")
p.draw_test()
def test_lowess(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="lowess")
with pytest.warns(PlotnineWarning):
p.draw_test()
def test_mavg(self):
p = self.p + geom_smooth(
aes(y="y_noisy"), method="mavg", method_args={"window": 10}
)
p.draw_test()
def test_gpr(self):
try:
from sklearn import gaussian_process # noqa: F401
except ImportError:
return
p = self.p + geom_smooth(aes(y="y_noisy"), method="gpr")
with pytest.warns(UserWarning):
p.draw_test()
def test_sorts_by_x():
data = pd.DataFrame({"x": [5, 0, 1, 2, 3, 4], "y": range(6)})
p = ggplot(data, aes("x", "y")) + geom_smooth(stat="identity")
assert p == "sorts_by_x"
def test_legend_fill_ratio():
p = (
ggplot(linear_data, aes("x", color="x<0.5"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(aes(y="y_noisy"), method="lm", size=0.5, span=0.3)
)
assert p == "legend_fill_ratio"
def test_init_and_fit_kwargs():
data = pd.DataFrame(
{
"x": np.arange(11),
"y": [0, 0, 0, 0.05, 0.25, 0.5, 0.75, 0.95, 1, 1, 1],
}
)
p = (
ggplot(data, aes("x", "y"))
+ geom_point()
+ geom_smooth(
method="glm",
method_args={
"family": sm.families.Binomial(), # init parameter
"method": "minimize", # fit parameter
},
se=False,
)
)
assert p == "init_and_fit_kwargs"
n = 100
random_state = np.random.RandomState(123)
mu = 0
sigma = 0.065
noise = random_state.randn(n) * sigma + mu
x = np.linspace(-2 * np.pi, 2 * np.pi, n)
data = pd.DataFrame(
{
"x": x,
"y": np.sin(x) + noise,
}
)
| TestOther |
python | kamyu104__LeetCode-Solutions | Python/can-i-win.py | {
"start": 30,
"end": 1004
} | class ____(object):
def canIWin(self, maxChoosableInteger, desiredTotal):
"""
:type maxChoosableInteger: int
:type desiredTotal: int
:rtype: bool
"""
def canIWinHelper(maxChoosableInteger, desiredTotal, visited, lookup):
if visited in lookup:
return lookup[visited]
mask = 1
for i in xrange(maxChoosableInteger):
if visited & mask == 0:
if i + 1 >= desiredTotal or \
not canIWinHelper(maxChoosableInteger, desiredTotal - (i + 1), visited | mask, lookup):
lookup[visited] = True
return True
mask <<= 1
lookup[visited] = False
return False
if (1 + maxChoosableInteger) * (maxChoosableInteger / 2) < desiredTotal:
return False
return canIWinHelper(maxChoosableInteger, desiredTotal, 0, {})
| Solution |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_config_command.py | {
"start": 10129,
"end": 11375
} | class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@conf_vars({("core", "test_key"): "test_value"})
def test_should_display_value(self, stdout_capture):
with stdout_capture as temp_stdout:
config_command.get_value(self.parser.parse_args(["config", "get-value", "core", "test_key"]))
assert temp_stdout.getvalue().strip() == "test_value"
@mock.patch("airflow.cli.commands.config_command.conf")
def test_should_not_raise_exception_when_section_for_config_with_value_defined_elsewhere_is_missing(
self, mock_conf
):
# no section in config
mock_conf.has_section.return_value = False
# pretend that the option is defined by other means
mock_conf.has_option.return_value = True
config_command.get_value(self.parser.parse_args(["config", "get-value", "some_section", "value"]))
def test_should_raise_exception_when_option_is_missing(self, caplog):
config_command.get_value(
self.parser.parse_args(["config", "get-value", "missing-section", "dags_folder"])
)
assert "section/key [missing-section/dags_folder] not found in config" in caplog.text
| TestCliConfigGetValue |
python | davidhalter__parso | test/test_diff_parser.py | {
"start": 19421,
"end": 34468
} | class ____:
def f():
return node
Some'random text: yeah
for push in plan.dfa_pushes:
def g():
try:
1
except KeyError:
2
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_many_nested_ifs(differ):
code1 = dedent('''\
class C:
def f(self):
def iterate():
if 1:
yield t
else:
yield
return
def g():
3
''')
code2 = dedent('''\
def f(self):
def iterate():
if 1:
yield t
hahahaha
if 2:
else:
yield
return
def g():
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
@pytest.mark.parametrize('prefix', ['', 'async '])
def test_with_and_funcdef_in_call(differ, prefix):
code1 = prefix + dedent('''\
with x:
la = C(
a=1,
b=2,
c=3,
)
''')
code2 = insert_line_into_code(code1, 3, 'def y(self, args):\n')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_wrong_backslash(differ):
code1 = dedent('''\
def y():
1
for x in y:
continue
''')
code2 = insert_line_into_code(code1, 3, '\\.whl$\n')
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_random_unicode_characters(differ):
"""
Those issues were all found with the fuzzer.
"""
differ.initialize('')
differ.parse('\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1,
expect_error_leaves=True)
differ.parse('\r\r', parsers=1)
differ.parse("˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
differ.parse('a\ntaǁ\rGĒōns__\n\nb', parsers=1)
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
differ.parse(s, parsers=1, expect_error_leaves=True)
differ.parse('')
differ.parse(s + '\n', parsers=1, expect_error_leaves=True)
differ.parse(' result = (\r\f\x17\t\x11res)', parsers=1, expect_error_leaves=True)
differ.parse('')
differ.parse(' a( # xx\ndef', parsers=1, expect_error_leaves=True)
def test_dedent_end_positions(differ):
code1 = dedent('''\
if 1:
if b:
2
c = {
5}
''')
code2 = dedent('''\
if 1:
if ⌟ഒᜈྡྷṭb:
2
'l': ''}
c = {
5}
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_special_no_newline_ending(differ):
code1 = dedent('''\
1
''')
code2 = dedent('''\
1
is ''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=0)
def test_random_character_insertion(differ):
code1 = dedent('''\
def create(self):
1
if self.path is not None:
return
# 3
# 4
''')
code2 = dedent('''\
def create(self):
1
if 2:
x return
# 3
# 4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_import_opening_bracket(differ):
code1 = dedent('''\
1
2
from bubu import (X,
''')
code2 = dedent('''\
11
2
from bubu import (X,
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2, expect_error_leaves=True)
def test_opening_bracket_at_end(differ):
code1 = dedent('''\
class C:
1
[
''')
code2 = dedent('''\
3
class C:
1
[
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
def test_all_sorts_of_indentation(differ):
code1 = dedent('''\
class C:
1
def f():
'same'
if foo:
a = b
end
''')
code2 = dedent('''\
class C:
1
def f(yield await %|(
'same'
\x02\x06\x0f\x1c\x11
if foo:
a = b
end
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
code3 = dedent('''\
if 1:
a
b
c
d
\x00
''')
differ.parse(code3, parsers=1, expect_error_leaves=True)
differ.parse('')
def test_dont_copy_dedents_in_beginning(differ):
code1 = dedent('''\
a
4
''')
code2 = dedent('''\
1
2
3
4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_dont_copy_error_leaves(differ):
code1 = dedent('''\
def f(n):
x
if 2:
3
''')
code2 = dedent('''\
def f(n):
def if 1:
indent
x
if 2:
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_error_dedent_in_between(differ):
code1 = dedent('''\
class C:
def f():
a
if something:
x
z
''')
code2 = dedent('''\
class C:
def f():
a
dedent
if other_thing:
b
if something:
x
z
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2)
def test_some_other_indentation_issues(differ):
code1 = dedent('''\
class C:
x
def f():
""
copied
a
''')
code2 = dedent('''\
try:
de
a
b
c
d
def f():
""
copied
a
''')
differ.initialize(code1)
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_open_bracket_case1(differ):
code1 = dedent('''\
class C:
1
2 # ha
''')
code2 = insert_line_into_code(code1, 2, ' [str\n')
code3 = insert_line_into_code(code2, 4, ' str\n')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_open_bracket_case2(differ):
code1 = dedent('''\
class C:
def f(self):
(
b
c
def g(self):
d
''')
code2 = dedent('''\
class C:
def f(self):
(
b
c
self.
def g(self):
d
''')
differ.initialize(code1)
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=0, parsers=1, expect_error_leaves=True)
def test_some_weird_removals(differ):
code1 = dedent('''\
class C:
1
''')
code2 = dedent('''\
class C:
1
@property
A
return
# x
omega
''')
code3 = dedent('''\
class C:
1
;
omega
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=3, expect_error_leaves=True)
differ.parse(code1, copies=1)
def test_async_copy(differ):
code1 = dedent('''\
async def main():
x = 3
print(
''')
code2 = dedent('''\
async def main():
x = 3
print()
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
def test_parent_on_decorator(differ):
code1 = dedent('''\
class AClass:
@decorator()
def b_test(self):
print("Hello")
print("world")
def a_test(self):
pass''')
code2 = dedent('''\
class AClass:
@decorator()
def b_test(self):
print("Hello")
print("world")
def a_test(self):
pass''')
differ.initialize(code1)
module_node = differ.parse(code2, parsers=1)
cls = module_node.children[0]
cls_suite = cls.children[-1]
assert len(cls_suite.children) == 3
def test_wrong_indent_in_def(differ):
code1 = dedent('''\
def x():
a
b
''')
code2 = dedent('''\
def x():
//
b
c
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_backslash_issue(differ):
code1 = dedent('''
pre = (
'')
after = 'instead'
''')
code2 = dedent('''
pre = (
'')
\\if
''') # noqa
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_paren_with_indentation(differ):
code1 = dedent('''
class C:
def f(self, fullname, path=None):
x
def load_module(self, fullname):
a
for prefix in self.search_path:
try:
b
except ImportError:
c
else:
raise
def x():
pass
''')
code2 = dedent('''
class C:
def f(self, fullname, path=None):
x
(
a
for prefix in self.search_path:
try:
b
except ImportError:
c
else:
raise
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=3, copies=1)
def test_error_dedent_in_function(differ):
code1 = dedent('''\
def x():
a
b
c
d
''')
code2 = dedent('''\
def x():
a
b
c
d
e
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_with_formfeed(differ):
code1 = dedent('''\
@bla
async def foo():
1
yield from []
return
return ''
''')
code2 = dedent('''\
@bla
async def foo():
1
\x0cimport
return
return ''
''') # noqa
differ.initialize(code1)
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
def test_repeating_invalid_indent(differ):
code1 = dedent('''\
def foo():
return
@bla
a
def foo():
a
b
c
''')
code2 = dedent('''\
def foo():
return
@bla
a
b
c
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_another_random_indent(differ):
code1 = dedent('''\
def foo():
a
b
c
return
def foo():
d
''')
code2 = dedent('''\
def foo():
a
c
return
def foo():
d
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=3)
def test_invalid_function(differ):
code1 = dedent('''\
a
def foo():
def foo():
b
''')
code2 = dedent('''\
a
def foo():
def foo():
b
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_async_func2(differ):
code1 = dedent('''\
async def foo():
return ''
@bla
async def foo():
x
''')
code2 = dedent('''\
async def foo():
return ''
{
@bla
async def foo():
x
y
''')
differ.initialize(code1)
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
def test_weird_ending(differ):
code1 = dedent('''\
def foo():
a
return
''')
code2 = dedent('''\
def foo():
a
nonlocal xF"""
y"""''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_nested_class(differ):
code1 = dedent('''\
def c():
a = 3
class X:
b
''')
code2 = dedent('''\
def c():
a = 3
class X:
elif
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_class_with_paren_breaker(differ):
code1 = dedent('''\
| C |
python | euske__pdfminer | pdfminer/converter.py | {
"start": 4270,
"end": 4658
} | class ____(PDFLayoutAnalyzer):
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFLayoutAnalyzer.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.result = None
return
def receive_layout(self, ltpage):
self.result = ltpage
return
def get_result(self):
return self.result
## PDFConverter
##
| PDFPageAggregator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-fauna/source_fauna/source.py | {
"start": 812,
"end": 1273
} | class ____:
def __init__(self, conf):
# Domain of Fauna connection (localhost, db.fauna.com).
self.domain = conf["domain"]
# Port of Fauna connection (8443, 443).
self.port = conf["port"]
# Scheme of Fauna connection (https, http).
self.scheme = conf["scheme"]
# Secret of a Fauna DB (my-secret).
self.secret = conf["secret"]
self.collection = CollectionConfig(conf["collection"])
| Config |
python | realpython__materials | qt-designer-python/sample_editor/app.py | {
"start": 913,
"end": 1181
} | class ____(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
loadUi("ui/find_replace.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec())
| FindReplaceDialog |
python | numpy__numpy | numpy/lib/_polynomial_impl.py | {
"start": 33516,
"end": 44125
} | class ____:
"""
A one-dimensional polynomial class.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
>>> import numpy as np
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> import numpy as np
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0], dtype=c_or_r.dtype)
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None, copy=None):
if t:
return NX.asarray(self.coeffs, t, copy=copy)
else:
return NX.asarray(self.coeffs, copy=copy)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return f"poly1d({vals})"
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs) - 1
def fmt_float(q):
s = f'{q:.4g}'
s = s.removesuffix('.0000')
return s
for k, coeff in enumerate(coeffs):
if not iscomplex(coeff):
coefstr = fmt_float(real(coeff))
elif real(coeff) == 0:
coefstr = f'{fmt_float(imag(coeff))}j'
else:
coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)'
power = (N - k)
if power == 0:
if coefstr != '0':
newstr = f'{coefstr}'
elif k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = f'{coefstr} {var}'
elif coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = f"{thestr} - {newstr[1:]}"
else:
thestr = f"{thestr} + {newstr}"
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __truediv__(self, other):
if isscalar(other):
return poly1d(self.coeffs / other)
else:
other = poly1d(other)
return polydiv(self, other)
def __rtruediv__(self, other):
if isscalar(other):
return poly1d(other / self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return self.coeffs.dtype.type(0)
if val < 0:
return self.coeffs.dtype.type(0)
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key - self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| poly1d |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 12665,
"end": 13002
} | class ____(io.IOBase):
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
self.code = 200
def read(self):
return ''
def info(self):
return {}
def geturl(self):
return self.url
| MockHTTPResponse |
python | huggingface__transformers | tests/models/d_fine/test_modeling_d_fine.py | {
"start": 1512,
"end": 10980
} | class ____:
def __init__(
self,
parent,
batch_size=3,
is_training=True,
use_labels=True,
n_targets=3,
num_labels=10,
initializer_range=0.02,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
# encoder HybridEncoder
encoder_hidden_dim=32,
encoder_in_channels=[128, 256, 512],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=64,
encoder_attention_heads=2,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
# decoder DFineTransformer
d_model=32,
num_queries=30,
decoder_in_channels=[32, 32, 32],
decoder_ffn_dim=64,
num_feature_levels=3,
decoder_n_points=[3, 6, 3],
decoder_n_levels=3,
decoder_layers=2,
decoder_attention_heads=2,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=0,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
image_size=64,
disable_custom_kernels=True,
with_box_refine=True,
decoder_offset_scale=0.5,
eval_idx=-1,
layer_scale=1,
reg_max=32,
reg_scale=4.0,
depth_mult=0.34,
hidden_expansion=0.5,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = 3
self.is_training = is_training
self.use_labels = use_labels
self.n_targets = n_targets
self.num_labels = num_labels
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.backbone_config = backbone_config
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = eval_size
self.normalize_before = normalize_before
self.d_model = d_model
self.num_queries = num_queries
self.decoder_in_channels = decoder_in_channels
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_n_levels = decoder_n_levels
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.decoder_offset_scale = decoder_offset_scale
self.eval_idx = eval_idx
self.layer_scale = layer_scale
self.reg_max = reg_max
self.reg_scale = reg_scale
self.depth_mult = depth_mult
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.with_box_refine = with_box_refine
self.hidden_expansion = hidden_expansion
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
labels.append(target)
config = self.get_config()
config.num_labels = self.num_labels
return config, pixel_values, pixel_mask, labels
def get_config(self):
hidden_sizes = [64, 128, 256, 512]
backbone_config = HGNetV2Config(
stage_in_channels=[16, 64, 128, 256],
stage_mid_channels=[16, 32, 64, 128],
stage_out_channels=[64, 128, 256, 512],
stage_num_blocks=[1, 1, 2, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[3, 3, 3, 3],
embeddings_size=10,
hidden_sizes=hidden_sizes,
depths=[1, 1, 2, 1],
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
stem_channels=[3, 16, 16],
use_lab=True,
)
return DFineConfig(
backbone_config=backbone_config,
encoder_hidden_dim=self.encoder_hidden_dim,
encoder_in_channels=self.encoder_in_channels,
feat_strides=self.feat_strides,
encoder_layers=self.encoder_layers,
encoder_ffn_dim=self.encoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
dropout=self.dropout,
activation_dropout=self.activation_dropout,
encode_proj_layers=self.encode_proj_layers,
positional_encoding_temperature=self.positional_encoding_temperature,
encoder_activation_function=self.encoder_activation_function,
activation_function=self.activation_function,
eval_size=self.eval_size,
normalize_before=self.normalize_before,
d_model=self.d_model,
num_queries=self.num_queries,
decoder_in_channels=self.decoder_in_channels,
decoder_ffn_dim=self.decoder_ffn_dim,
num_feature_levels=self.num_feature_levels,
decoder_n_points=self.decoder_n_points,
decoder_n_levels=self.decoder_n_levels,
decoder_layers=self.decoder_layers,
decoder_attention_heads=self.decoder_attention_heads,
decoder_activation_function=self.decoder_activation_function,
decoder_offset_scale=self.decoder_offset_scale,
eval_idx=self.eval_idx,
layer_scale=self.layer_scale,
reg_max=self.reg_max,
reg_scale=self.reg_scale,
depth_mult=self.depth_mult,
attention_dropout=self.attention_dropout,
num_denoising=self.num_denoising,
label_noise_ratio=self.label_noise_ratio,
box_noise_scale=self.box_noise_scale,
learn_initial_query=self.learn_initial_query,
anchor_image_size=self.anchor_image_size,
image_size=self.image_size,
disable_custom_kernels=self.disable_custom_kernels,
with_box_refine=self.with_box_refine,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def create_and_check_d_fine_model(self, config, pixel_values, pixel_mask, labels):
model = DFineModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model))
def create_and_check_d_fine_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = DFineForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
| DFineModelTester |
python | tornadoweb__tornado | tornado/test/tcpserver_test.py | {
"start": 363,
"end": 3814
} | class ____(AsyncTestCase):
@gen_test
def test_handle_stream_coroutine_logging(self):
# handle_stream may be a coroutine and any exception in its
# Future will be logged.
class TestServer(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
yield stream.read_bytes(len(b"hello"))
stream.close()
1 / 0
server = client = None
try:
sock, port = bind_unused_port()
server = TestServer()
server.add_socket(sock)
client = IOStream(socket.socket())
with ExpectLog(app_log, "Exception in callback"):
yield client.connect(("localhost", port))
yield client.write(b"hello")
yield client.read_until_close()
yield gen.moment
finally:
if server is not None:
server.stop()
if client is not None:
client.close()
@gen_test
def test_handle_stream_native_coroutine(self):
# handle_stream may be a native coroutine.
class TestServer(TCPServer):
async def handle_stream(self, stream, address):
stream.write(b"data")
stream.close()
sock, port = bind_unused_port()
server = TestServer()
server.add_socket(sock)
client = IOStream(socket.socket())
yield client.connect(("localhost", port))
result = yield client.read_until_close()
self.assertEqual(result, b"data")
server.stop()
client.close()
def test_stop_twice(self):
sock, port = bind_unused_port()
server = TCPServer()
server.add_socket(sock)
server.stop()
server.stop()
@gen_test
def test_stop_in_callback(self):
# Issue #2069: calling server.stop() in a loop callback should not
# raise EBADF when the loop handles other server connection
# requests in the same loop iteration
class TestServer(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
server.stop() # type: ignore
yield stream.read_until_close()
sock, port = bind_unused_port()
server = TestServer()
server.add_socket(sock)
server_addr = ("localhost", port)
N = 40
clients = [IOStream(socket.socket()) for i in range(N)]
connected_clients = []
@gen.coroutine
def connect(c):
try:
yield c.connect(server_addr)
except OSError:
pass
else:
connected_clients.append(c)
yield [connect(c) for c in clients]
self.assertGreater(len(connected_clients), 0, "all clients failed connecting")
try:
if len(connected_clients) == N:
# Ideally we'd make the test deterministic, but we're testing
# for a race condition in combination with the system's TCP stack...
self.skipTest(
"at least one client should fail connecting "
"for the test to be meaningful"
)
finally:
for c in connected_clients:
c.close()
# Here tearDown() would re-raise the EBADF encountered in the IO loop
@skipIfNonUnix
| TCPServerTest |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/properties/snippets.py | {
"start": 2583,
"end": 2701
} | class ____(messages.Message):
text = messages.StringField(1, required=True)
when = messages.IntegerField(2)
| Note |
python | getsentry__sentry | src/sentry/deletions/defaults/uptime_subscription.py | {
"start": 221,
"end": 1324
} | class ____(ModelDeletionTask[UptimeSubscription]):
def delete_instance(self, instance: UptimeSubscription) -> None:
detector = get_detector(instance)
# XXX: Typically quota updates would be handled by the
# delete_uptime_detector function exposed in the
# uptime.subscriptions.subscriptions module. However if a Detector is
# deleted without using this, we need to still ensure the billing east
# is revoked. This should never happen.
#
# Since the delete_uptime_detector function is already scheduling the
# detector for deletion, you may think we could remove the quota
# remove_seat call there, since it will happen here. But this would
# mean the customers quota is not freed up _immediately_ when the
# detector is deleted using that method.
remove_uptime_seat(detector)
# Ensure the remote subscription is removed if it wasn't already (again
# it should have been as part of delete_uptime_detector)
delete_uptime_subscription(instance)
| UptimeSubscriptionDeletionTask |
python | doocs__leetcode | solution/2600-2699/2653.Sliding Subarray Beauty/Solution.py | {
"start": 0,
"end": 547
} | class ____:
def getSubarrayBeauty(self, nums: List[int], k: int, x: int) -> List[int]:
def f(x: int) -> int:
s = 0
for i in range(50):
s += cnt[i]
if s >= x:
return i - 50
return 0
cnt = [0] * 101
for v in nums[:k]:
cnt[v + 50] += 1
ans = [f(x)]
for i in range(k, len(nums)):
cnt[nums[i] + 50] += 1
cnt[nums[i - k] + 50] -= 1
ans.append(f(x))
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.