language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | redis__redis-py | redis/commands/bf/info.py | {
"start": 716,
"end": 1644
} | class ____:
size = None
bucketNum = None
filterNum = None
insertedNum = None
deletedNum = None
bucketSize = None
expansionRate = None
maxIteration = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.size = response["Size"]
self.bucketNum = response["Number of buckets"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.deletedNum = response["Number of items deleted"]
self.bucketSize = response["Bucket size"]
self.expansionRate = response["Expansion rate"]
self.maxIteration = response["Max iterations"]
def get(self, item):
try:
return self.__getitem__(item)
except AttributeError:
return None
def __getitem__(self, item):
return getattr(self, item)
| CFInfo |
python | PrefectHQ__prefect | tests/events/client/instrumentation/test_events_workers_instrumentation.py | {
"start": 584,
"end": 12933
} | class ____(BaseWorker):
type = "events-test"
job_configuration = BaseJobConfiguration
async def run(self):
pass
async def test_worker_emits_submitted_event(
asserting_events_worker: EventsWorker,
reset_worker_events,
prefect_client: PrefectClient,
worker_deployment_wq1,
work_pool,
):
flow_run = await prefect_client.create_flow_run_from_deployment(
worker_deployment_wq1.id,
state=Scheduled(scheduled_time=now("UTC")),
tags=["flow-run-one"],
)
flow = await prefect_client.read_flow(flow_run.flow_id)
async with WorkerEventsTestImpl(work_pool_name=work_pool.name) as worker:
worker._work_pool = work_pool
worker.run = AsyncMock()
await worker.get_and_submit_flow_runs()
await asserting_events_worker.drain()
assert isinstance(asserting_events_worker._client, AssertingEventsClient)
# When a worker submits a flow-run, it monitors that flow run until it's complete.
# When it's complete, it fires a second 'monitored' event, which
# is covered by the test_worker_emits_monitored_event below.
assert len(asserting_events_worker._client.events) == 2
submit_events = list(
filter(
lambda e: e.event == "prefect.worker.submitted-flow-run",
asserting_events_worker._client.events,
)
)
assert len(submit_events) == 1
assert dict(submit_events[0].resource.items()) == {
"prefect.resource.id": f"prefect.worker.events-test.{worker.get_name_slug()}",
"prefect.resource.name": worker.name,
"prefect.version": str(__version__),
"prefect.worker-type": worker.type,
}
assert len(submit_events[0].related) == 6
related = [dict(r.items()) for r in submit_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.deployment.{worker_deployment_wq1.id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": worker_deployment_wq1.name,
},
{
"prefect.resource.id": f"prefect.flow.{flow.id}",
"prefect.resource.role": "flow",
"prefect.resource.name": flow.name,
},
{
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": flow_run.name,
},
{
"prefect.resource.id": "prefect.tag.flow-run-one",
"prefect.resource.role": "tag",
},
{
"prefect.resource.id": "prefect.tag.test",
"prefect.resource.role": "tag",
},
{
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
},
]
async def test_worker_emits_executed_event(
asserting_events_worker: EventsWorker,
reset_worker_events,
prefect_client: PrefectClient,
worker_deployment_wq1,
work_pool,
):
flow_run = await prefect_client.create_flow_run_from_deployment(
worker_deployment_wq1.id,
state=Scheduled(scheduled_time=now("UTC")),
tags=["flow-run-one"],
)
flow = await prefect_client.read_flow(flow_run.flow_id)
worker_result = BaseWorkerResult(status_code=1, identifier="process123")
run_flow_fn = AsyncMock(return_value=worker_result)
async with WorkerEventsTestImpl(work_pool_name=work_pool.name) as worker:
worker._work_pool = work_pool
worker.run = run_flow_fn
await worker.get_and_submit_flow_runs()
await asserting_events_worker.drain()
assert isinstance(asserting_events_worker._client, AssertingEventsClient)
# When a worker submits a flow-run, it monitors that flow run until it's complete.
# When it's complete, it fires a second 'submitted' event, which
# is covered by the test_worker_emits_submitted_event below.
assert len(asserting_events_worker._client.events) == 2
submitted_events = list(
filter(
lambda e: e.event == "prefect.worker.submitted-flow-run",
asserting_events_worker._client.events,
)
)
assert len(submitted_events) == 1
executed_events = list(
filter(
lambda e: e.event == "prefect.worker.executed-flow-run",
asserting_events_worker._client.events,
)
)
assert len(executed_events) == 1
assert executed_events[0].event == "prefect.worker.executed-flow-run"
assert dict(executed_events[0].resource.items()) == {
"prefect.resource.id": f"prefect.worker.events-test.{worker.get_name_slug()}",
"prefect.resource.name": worker.name,
"prefect.version": str(__version__),
"prefect.worker-type": worker.type,
}
assert len(executed_events[0].related) == 6
related = [dict(r.items()) for r in executed_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.deployment.{worker_deployment_wq1.id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": worker_deployment_wq1.name,
},
{
"prefect.resource.id": f"prefect.flow.{flow.id}",
"prefect.resource.role": "flow",
"prefect.resource.name": flow.name,
},
{
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": flow_run.name,
"prefect.infrastructure.status-code": "1",
"prefect.infrastructure.identifier": "process123",
},
{
"prefect.resource.id": "prefect.tag.flow-run-one",
"prefect.resource.role": "tag",
},
{
"prefect.resource.id": "prefect.tag.test",
"prefect.resource.role": "tag",
},
{
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
},
]
assert executed_events[0].follows == submitted_events[0].id
async def test_worker_event_includes_deployment_version(
asserting_events_worker: EventsWorker,
reset_worker_events,
prefect_client: PrefectClient,
worker_deployment_wq1: orm_models.Deployment,
work_pool: orm_models.WorkPool,
):
await prefect_client.create_flow_run_from_deployment(
worker_deployment_wq1.id,
state=Scheduled(scheduled_time=now("UTC")),
tags=["flow-run-one"],
)
worker_result = BaseWorkerResult(status_code=1, identifier="process123")
run_flow_fn = AsyncMock(return_value=worker_result)
# mock the client to return a DeploymentResponse with a version_id and version_info,
# which would be the case if the deployment was created Prefect Cloud experimental
# deployment versioning support.
server_deployment = await prefect_client.read_deployment(worker_deployment_wq1.id)
server_deployment.version_id = "aaaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
server_deployment.version_info = VersionInfo(
type="githubulous", version="1.2.3.4.5.6"
)
with mock.patch(
"prefect.client.orchestration.PrefectClient.read_deployment",
return_value=server_deployment,
):
async with WorkerEventsTestImpl(work_pool_name=work_pool.name) as worker:
worker._work_pool = work_pool
worker.run = run_flow_fn
await worker.get_and_submit_flow_runs()
await asserting_events_worker.drain()
assert isinstance(asserting_events_worker._client, AssertingEventsClient)
# When a worker submits a flow-run, it monitors that flow run until it's complete.
# When it's complete, it fires a second 'submitted' event, which
# is covered by the test_worker_emits_submitted_event below.
assert len(asserting_events_worker._client.events) == 2
events = list(
filter(
lambda e: (
e.event == "prefect.worker.submitted-flow-run"
or e.event == "prefect.worker.executed-flow-run"
),
asserting_events_worker._client.events,
)
)
# We can just spot-check one of the events here
event = events[0]
deployment = event.resource_in_role["deployment"]
assert (
deployment["prefect.resource.id"]
== f"prefect.deployment.{worker_deployment_wq1.id}"
)
assert deployment["prefect.resource.role"] == "deployment"
assert deployment["prefect.resource.name"] == worker_deployment_wq1.name
assert (
deployment["prefect.deployment.version-id"]
== "aaaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
)
assert deployment["prefect.deployment.version-type"] == "githubulous"
assert deployment["prefect.deployment.version"] == "1.2.3.4.5.6"
@pytest.mark.usefixtures("use_hosted_api_server")
def test_lifecycle_events(
asserting_events_worker: EventsWorker, reset_worker_events, work_pool
):
invoke_and_assert(
command=[
"worker",
"start",
"--run-once",
"-p",
work_pool.name,
"-n",
"test-worker",
"-t",
"process",
],
expected_code=0,
)
asserting_events_worker.drain()
assert isinstance(asserting_events_worker._client, AssertingEventsClient)
assert len(asserting_events_worker._client.events) == 2
# first event will always be `prefect.worker.started`
started_event = asserting_events_worker._client.events[0]
assert started_event.event == "prefect.worker.started"
assert dict(started_event.resource.items()) == {
"prefect.resource.id": "prefect.worker.process.test-worker",
"prefect.resource.name": "test-worker",
"prefect.version": str(__version__),
"prefect.worker-type": "process",
}
assert len(started_event.related) == 1
related = [dict(r.items()) for r in started_event.related]
assert related == [
{
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
},
]
# last event should be `prefect.worker.stopped`
stopped_event = asserting_events_worker._client.events[
len(asserting_events_worker._client.events) - 1
]
assert stopped_event.event == "prefect.worker.stopped"
assert dict(stopped_event.resource.items()) == {
"prefect.resource.id": "prefect.worker.process.test-worker",
"prefect.resource.name": "test-worker",
"prefect.version": str(__version__),
"prefect.worker-type": "process",
}
assert len(stopped_event.related) == 1
related = [dict(r.items()) for r in stopped_event.related]
assert related == [
{
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
},
]
def test_job_configuration_related_resources_no_objects():
config = BaseJobConfiguration()
config._related_objects = {
"deployment": None,
"flow": None,
"flow-run": None,
}
assert config._related_resources() == []
async def test_worker_can_include_itself_as_related(work_pool):
async with WorkerEventsTestImpl(work_pool_name=work_pool.name) as worker:
await worker.sync_with_backend()
related = [dict(r) for r in worker._event_related_resources(include_self=True)]
assert related == [
{
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
},
{
"prefect.resource.id": (
f"prefect.worker.events-test.{worker.get_name_slug()}"
),
"prefect.resource.role": "worker",
"prefect.resource.name": worker.name,
"prefect.version": str(__version__),
"prefect.worker-type": worker.type,
},
]
| WorkerEventsTestImpl |
python | keras-team__keras | keras/src/optimizers/schedules/learning_rate_schedule_test.py | {
"start": 6237,
"end": 8016
} | class ____(testing.TestCase):
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(
lr, 10, end_lr, power=power, cycle=True
)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_begin_with_cycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = schedules.PolynomialDecay(lr, decay_steps, cycle=True)
expected = lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
| SqrtDecayTest |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 42751,
"end": 47052
} | class ____:
"""Test hy_AM address provider methods"""
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
def test_building_number(self, faker, num_samples):
for _ in range(num_samples):
building_number = faker.building_number()
assert isinstance(building_number, str)
assert 0 <= int(building_number) <= 999
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in HyAmAddressProvider.cities
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in HyAmAddressProvider.city_prefixes
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in HyAmAddressProvider.countries
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert 200 <= int(postcode) <= 4299
def test_postcode_in_state(self, faker, num_samples):
for _ in range(num_samples):
for state_abbr in HyAmAddressProvider.states_abbr:
code = faker.postcode_in_state(state_abbr)
assert re.fullmatch(r"\d{4}", code)
assert int(code) >= HyAmAddressProvider.states_postcode[state_abbr][0]
assert int(code) <= HyAmAddressProvider.states_postcode[state_abbr][1]
with pytest.raises(Exception):
faker.postcode_in_state("XX")
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"բն\. \d{1,2}", secondary_address)
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in HyAmAddressProvider.states
def test_state_abbr(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in HyAmAddressProvider.states_abbr
assert state_abbr.isupper()
def test_street(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street()
assert isinstance(street, str)
assert street in HyAmAddressProvider.streets
def test_street_address(self, faker, num_samples):
for _ in range(num_samples):
street_address = faker.street_address()
assert isinstance(street_address, str)
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in HyAmAddressProvider.street_prefixes
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.street_suffix()
assert isinstance(suffix, str)
assert suffix in HyAmAddressProvider.street_suffixes
def test_village(self, faker, num_samples):
for _ in range(num_samples):
village = faker.village()
assert isinstance(village, str)
assert village in HyAmAddressProvider.villages
def test_village_prefix(self, faker, num_samples):
for _ in range(num_samples):
village_prefix = faker.village_prefix()
assert isinstance(village_prefix, str)
assert village_prefix in HyAmAddressProvider.village_prefixes
| TestHyAm |
python | apache__airflow | providers/docker/src/airflow/providers/docker/decorators/docker.py | {
"start": 3240,
"end": 8540
} | class ____(DecoratedOperator, DockerOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param python: Python binary name to use
:param expect_airflow: whether to expect airflow to be installed in the docker environment. if this
one is specified, the script to run callable will attempt to load Airflow macros.
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
:param serializer: Which serializer use to serialize the args and result. It can be one of the following:
- ``"pickle"``: (default) Use pickle for serialization. Included in the Python Standard Library.
- ``"cloudpickle"``: Use cloudpickle for serialize more complex types,
this requires to include cloudpickle in your requirements.
- ``"dill"``: Use dill for serialize more complex types,
this requires to include dill in your requirements.
"""
custom_operator_name = "@task.docker"
template_fields: Sequence[str] = (*DockerOperator.template_fields, "op_args", "op_kwargs")
def __init__(
self,
python_command="python3",
expect_airflow: bool = True,
serializer: Serializer | None = None,
**kwargs,
) -> None:
serializer = serializer or "pickle"
if serializer not in _SERIALIZERS:
msg = (
f"Unsupported serializer {serializer!r}. Expected one of {', '.join(map(repr, _SERIALIZERS))}"
)
raise AirflowException(msg)
command = "placeholder command"
self.python_command = python_command
self.expect_airflow = expect_airflow
self.serializer: Serializer = serializer
super().__init__(
command=command, retrieve_output=True, retrieve_output_path="/tmp/script.out", **kwargs
)
def generate_command(self):
return (
f"""bash -cx '{
_generate_decode_command("__PYTHON_SCRIPT", "/tmp/script.py", self.python_command)
} &&"""
f"{_generate_decode_command('__PYTHON_INPUT', '/tmp/script.in', self.python_command)} &&"
f"{self.python_command} /tmp/script.py /tmp/script.in /tmp/script.out none /tmp/script.out'"
)
def execute(self, context: Context):
with TemporaryDirectory(prefix="venv") as tmp_dir:
input_filename = os.path.join(tmp_dir, "script.in")
script_filename = os.path.join(tmp_dir, "script.py")
with open(input_filename, "wb") as file:
if self.op_args or self.op_kwargs:
self.pickling_library.dump({"args": self.op_args, "kwargs": self.op_kwargs}, file)
py_source = self.get_python_source()
write_python_script(
jinja_context={
"op_args": self.op_args,
"op_kwargs": self.op_kwargs,
"pickling_library": self.pickling_library.__name__,
"python_callable": self.python_callable.__name__,
"python_callable_source": py_source,
"expect_airflow": self.expect_airflow,
"string_args_global": False,
},
filename=script_filename,
)
# Pass the python script to be executed, and the input args, via environment variables. This is
# more than slightly hacky, but it means it can work when Airflow itself is in the same Docker
# engine where this task is going to run (unlike say trying to mount a file in)
self.environment["__PYTHON_SCRIPT"] = _b64_encode_file(script_filename)
if self.op_args or self.op_kwargs:
self.environment["__PYTHON_INPUT"] = _b64_encode_file(input_filename)
else:
self.environment["__PYTHON_INPUT"] = ""
self.command = self.generate_command()
return super().execute(context)
@property
def pickling_library(self):
return _SERIALIZERS[self.serializer]
def docker_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Python operator decorator; wraps a function into an Airflow operator.
Also accepts any argument that DockerOperator will via ``kwargs``. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set, function return value will be unrolled to multiple XCom values.
Dict will unroll to XCom values with keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_DockerDecoratedOperator,
**kwargs,
)
| _DockerDecoratedOperator |
python | keras-team__keras | keras/src/metrics/accuracy_metrics_test.py | {
"start": 2855,
"end": 7143
} | class ____(testing.TestCase):
def test_config(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
self.assertEqual(bin_acc_obj.name, "binary_accuracy")
self.assertEqual(len(bin_acc_obj.variables), 2)
self.assertEqual(bin_acc_obj._dtype, "float32")
# Test get_config
bin_acc_obj_config = bin_acc_obj.get_config()
self.assertEqual(bin_acc_obj_config["name"], "binary_accuracy")
self.assertEqual(bin_acc_obj_config["dtype"], "float32")
# Check save and restore config
bin_acc_obj2 = accuracy_metrics.BinaryAccuracy.from_config(
bin_acc_obj_config
)
self.assertEqual(bin_acc_obj2.name, "binary_accuracy")
self.assertEqual(len(bin_acc_obj2.variables), 2)
self.assertEqual(bin_acc_obj2._dtype, "float32")
def test_unweighted(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [1], [0], [0.6]])
bin_acc_obj.update_state(y_true, y_pred)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
# Test broadcasting case
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([1, 1, 0, 0])
y_pred = np.array([[0.98], [1], [0], [0.6]])
bin_acc_obj.update_state(y_true, y_pred)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
def test_weighted(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [1], [0], [0.6]])
sample_weight = np.array([1, 0, 0, 1])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_rank_1(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([1, 1, 0, 0])
y_pred = np.array([0.98, 1, 0, 0.6])
sample_weight = np.array([1, 0, 0, 1])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_weights(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1, 1], [0, 0]])
y_pred = np.array([[0.98, 1], [0, 0.6]])
sample_weight = np.array([[1, 0], [0, 1]])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_broadcast_weights(self):
bin_acc_obj = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32"
)
y_true = np.array([[1, 1], [0, 0]])
y_pred = np.array([[0.98, 1], [0, 0.6]])
sample_weight = np.array([[1, 0]])
bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = bin_acc_obj.result()
self.assertAllClose(result, 1.0, atol=1e-3)
def test_threshold(self):
bin_acc_obj_1 = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32", threshold=0.3
)
bin_acc_obj_2 = accuracy_metrics.BinaryAccuracy(
name="binary_accuracy", dtype="float32", threshold=0.9
)
y_true = np.array([[1], [1], [0], [0]])
y_pred = np.array([[0.98], [0.5], [0.1], [0.2]])
bin_acc_obj_1.update_state(y_true, y_pred)
bin_acc_obj_2.update_state(y_true, y_pred)
result_1 = bin_acc_obj_1.result()
result_2 = bin_acc_obj_2.result()
# Higher threshold must result in lower measured accuracy.
self.assertAllClose(result_1, 1.0)
self.assertAllClose(result_2, 0.75)
| BinaryAccuracyTest |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 9588,
"end": 9750
} | class ____(Formatter):
"""Always return the empty string."""
def __call__(self, x, pos=None):
# docstring inherited
return ''
| NullFormatter |
python | getsentry__sentry | src/sentry/services/base.py | {
"start": 0,
"end": 110
} | class ____:
name = ""
def __init__(self, debug: bool = False) -> None:
self.debug = debug
| Service |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/session_debug_testlib.py | {
"start": 3240,
"end": 60881
} | class ____(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.compat.v1.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variable_v1.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variable_v1.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variable_v1.VariableV1(2.1, name="u")
v = variable_v1.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegex(
ValueError, r"None of the .* device\(s\) has a node named "
):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variable_v1.VariableV1(str1_init, name=str1_name)
str2 = variable_v1.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variable_v1.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variable_v1.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variable_v1.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variable_v1.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = while_loop.while_loop(cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in range(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = while_loop.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_denylists(
run_options,
sess.graph,
node_name_regex_denylist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variable_v1.VariableV1(10.0, name="x")
y = variable_v1.VariableV1(20.0, name="y")
cond = tf_cond.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variable_v1.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variable_v1.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertLessEqual(3, len(bad_data))
node_names = [datum.node_name for datum in bad_data]
self.assertIn(x_name, node_names)
self.assertIn(y_name, node_names)
self.assertIn(z_name, node_names)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variable_v1.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variable_v1.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertLessEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
node_names = [datum.node_name for datum in bad_data]
self.assertIn(y_name, node_names)
self.assertIn(z_name, node_names)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variable_v1.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegex(
ValueError, "does not exist in partition graphs"
):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegex(
ValueError, r"None of the .* device\(s\) has a node named "
):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegex(
ValueError, r"None of the .* device\(s\) has a node named "
):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegex(
ValueError, r"None of the .* device\(s\) has a node named "
):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegex(
ValueError, r"None of the .* device\(s\) has a node named "
):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variable_v1.VariableV1(1.0, name="v1")
v2 = variable_v1.VariableV1(2.0, name="v2")
v3 = variable_v1.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variable_v1.VariableV1(10.0, name="v")
delta = variable_v1.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variable_v1.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegex(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variable_v1.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variable_v1.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variable_v1.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variable_v1.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
file_io.delete_recursively(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
file_io.delete_recursively(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variable_v1.VariableV1([
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf, -np.inf,
np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variable_v1.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variable_v1.VariableV1([42],
dtype=np.float32,
name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variable_v1.VariableV1("1", name="a")
b = variable_v1.VariableV1("3", name="b")
c = variable_v1.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variable_v1.VariableV1(10.0, name="a")
b = variable_v1.VariableV1(0.0, name="b")
c = variable_v1.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegex(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo",
):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegex(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:",
):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegex(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo",
):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variable_v1.VariableV1(10.0, name="a")
b = variable_v1.VariableV1(0.0, name="b")
c = variable_v1.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertLessEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
file_io.delete_recursively(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertLessEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variable_v1.VariableV1([10.0, 10.0], name="a")
b = variable_v1.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variable_v1.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variable_v1.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegex(
LookupError, "Python graph is not available for traceback lookup"
):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegex(
KeyError, r"Cannot find node \"foo\" in Python graph"
):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, tuple)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, tuple)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
| SessionDebugTestBase |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 11060,
"end": 68693
} | class ____:
"""
Base class for `linprog` tests. Generally, each test will be performed
once for every derived class of LinprogCommonTests, each of which will
typically change self.options and/or self.method. Effectively, these tests
are run for many combination of method (simplex, revised simplex, and
interior point) and options (such as pivoting rule or sparse treatment).
"""
##################
# Targeted Tests #
##################
def test_callback(self):
generic_callback_test(self)
def test_disp(self):
# test that display option does not break anything.
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"disp": True})
_assert_success(res, desired_fun=-63.47967608020187) # method='highs' solution
def test_docstring_example(self):
# Example from linprog docstring.
c = [-1, 4]
A = [[-3, 1], [1, 2]]
b = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
options=self.options, method=self.method)
_assert_success(res, desired_fun=-22)
def test_type_error(self):
# (presumably) checks that linprog recognizes type errors
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1]
A_eq = [[1]]
b_eq = "hello"
assert_raises(TypeError, linprog,
c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
def test_aliasing_b_ub(self):
# (presumably) checks that linprog does not modify b_ub
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_ub = np.array([[1.0]])
b_ub_orig = np.array([3.0])
b_ub = b_ub_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-4, desired_x=[-4])
assert_allclose(b_ub_orig, b_ub)
def test_aliasing_b_eq(self):
# (presumably) checks that linprog does not modify b_eq
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq_orig = np.array([3.0])
b_eq = b_eq_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
assert_allclose(b_eq_orig, b_eq)
def test_non_ndarray_args(self):
# (presumably) checks that linprog accepts list in place of arrays
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1.0]
A_ub = [[1.0]]
b_ub = [3.0]
A_eq = [[1.0]]
b_eq = [2.0]
bounds = (-1.0, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=2, desired_x=[2])
def test_unknown_options(self):
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
def f(c, A_ub=None, b_ub=None, A_eq=None,
b_eq=None, bounds=None, options=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=options)
o = {key: self.options[key] for key in self.options}
o['spam'] = 42
with pytest.warns(OptimizeWarning):
f(c, A_ub=A_ub, b_ub=b_ub, options=o)
def test_integrality_without_highs(self):
# ensure that using `integrality` parameter without `method='highs'`
# raises warning and produces correct solution to relaxed problem
# source: https://en.wikipedia.org/wiki/Integer_programming#Example
A_ub = np.array([[-1, 1], [3, 2], [2, 3]])
b_ub = np.array([1, 12, 12])
c = -np.array([0, 1])
bounds = [(0, np.inf)] * len(c)
integrality = [1] * len(c)
with pytest.warns(OptimizeWarning):
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
method=self.method, integrality=integrality)
np.testing.assert_allclose(res.x, [1.8, 2.8])
np.testing.assert_allclose(res.fun, -2.8)
def test_invalid_inputs(self):
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Test ill-formatted bounds
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Creating an ndarray from ragged", VisibleDeprecationWarning)
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)])
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)])
# Test other invalid inputs
assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
# this last check doesn't make sense for sparse presolve
if ("_sparse_presolve" in self.options and
self.options["_sparse_presolve"]):
return
# there aren't 3-D sparse matrices
assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
def test_sparse_constraints(self):
# gh-13559: improve error message for sparse inputs when unsupported
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
rng = np.random.default_rng(9938284754882992)
m = 100
n = 150
A_eq = scipy.sparse.random_array((m, n), density=0.5, rng=rng)
x_valid = rng.standard_normal(n)
c = rng.standard_normal(n)
ub = x_valid + rng.random(n)
lb = x_valid - rng.random(n)
bounds = np.column_stack((lb, ub))
b_eq = A_eq @ x_valid
if self.method in {'simplex', 'revised simplex'}:
# simplex and revised simplex should raise error
with assert_raises(ValueError, match=f"Method '{self.method}' "
"does not support sparse constraint matrices."):
linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
else:
# other methods should succeed
options = {**self.options}
if self.method in {'interior-point'}:
options['sparse'] = True
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=options)
assert res.success
def test_maxiter(self):
# test iteration limit w/ Enzo example
c = [4, 8, 3, 0, 0, 0]
A = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b = [185, 155, 600]
maxiter = 3
res = linprog(c, A_eq=A, b_eq=b, method=self.method,
options={"maxiter": maxiter})
_assert_iteration_limit_reached(res, maxiter)
assert_equal(res.nit, maxiter)
def test_bounds_fixed(self):
# Test fixed bounds (upper equal to lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, 1),
method=self.method, options=self.options)
_assert_success(res, 1, 1)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)],
method=self.method, options=self.options)
_assert_success(res, 12, [5, -1, 3])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1], bounds=[(1, 1), (1, 3)],
method=self.method, options=self.options)
_assert_success(res, 2, [1, 1])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7],
bounds=[(-5, 5), (0, 10), (3.5, 3.5)],
method=self.method, options=self.options)
_assert_success(res, 15, [1, 7, 3.5])
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible(self):
# Test ill-valued bounds (upper less than lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, -2), method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)],
method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible_2(self):
# Test ill-valued bounds (lower inf, upper -inf)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
# For the simplex method, the cases do not result in an
# infeasible status, but in a RuntimeWarning. This is a
# consequence of having _presolve() take care of feasibility
# checks. See issue gh-11618.
do_presolve = self.options.get('presolve', True)
simplex_without_presolve = not do_presolve and self.method == 'simplex'
c = [1, 2, 3]
bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)]
bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)]
if simplex_without_presolve:
def g(c, bounds):
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
return res
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_1)
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_2)
else:
res = linprog(c=c, bounds=bounds_1,
method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog(c=c, bounds=bounds_2,
method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_empty_constraint_1(self):
c = [-1, -2]
res = linprog(c, method=self.method, options=self.options)
_assert_unbounded(res)
def test_empty_constraint_2(self):
c = [-1, 1, -1, 1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve requires no iterations
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_empty_constraint_3(self):
c = [1, -1, 1, -1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
def test_inequality_constraints(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3, 2]) * -1 # maximize
A_ub = [[2, 1],
[1, 1],
[1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_inequality_constraints2(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
# (dead link)
c = [6, 3]
A_ub = [[0, 3],
[-1, -1],
[-2, 1]]
b_ub = [2, -1, -1]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
def test_bounds_simple(self):
c = [1, 2]
bounds = (1, 2)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
bounds = [(1, 2), (1, 2)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
def test_bounded_below_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (1.0, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_below_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (0.5, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounded_above_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (None, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_above_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, 4)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_infinity(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_mixed(self):
# Problem has one unbounded variable and
# another with a negative lower bound.
c = np.array([-1, 4]) * -1 # maximize
A_ub = np.array([[-3, 1],
[1, 2]], dtype=np.float64)
b_ub = [6, 4]
x0_bounds = (-np.inf, np.inf)
x1_bounds = (-3, np.inf)
bounds = (x0_bounds, x1_bounds)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
def test_bounds_equal_but_infeasible(self):
c = [-4, 1]
A_ub = [[7, -2], [0, 1], [2, -2]]
b_ub = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_but_infeasible2(self):
c = [-4, 1]
A_eq = [[7, -2], [0, 1], [2, -2]]
b_eq = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_no_presolve(self):
# There was a bug when a lower and upper bound were equal but
# presolve was not on to eliminate the variable. The bound
# was being converted to an equality constraint, but the bound
# was not eliminated, leading to issues in postprocessing.
c = [1, 2]
A_ub = [[1, 2], [1.1, 2.2]]
b_ub = [4, 8]
bounds = [(1, 2), (2, 2)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_zero_column_1(self):
m, n = 3, 4
rng = np.random.default_rng(558329500002933)
c = rng.random(n)
c[1] = 1
A_eq = rng.random((m, n))
A_eq[:, 1] = 0
b_eq = rng.random(m)
A_ub = [[1, 0, 1, 1]]
b_ub = 3
bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-9.485758655190649) # method='highs' solution
def test_zero_column_2(self):
if self.method in {'highs-ds', 'highs-ipm'}:
# See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648
pytest.xfail()
rng = np.random.default_rng(4492835845925983465)
m, n = 2, 4
c = rng.random(n)
c[1] = -1
A_eq = rng.random((m, n))
A_eq[:, 1] = 0
b_eq = rng.random(m)
A_ub = rng.random((m, n))
A_ub[:, 1] = 0
b_ub = rng.random(m)
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve
if self.options.get('presolve', True) and "highs" not in self.method:
# HiGHS detects unboundedness or infeasibility in presolve
# It needs an iteration of simplex to be sure of unboundedness
# Other solvers report that the problem is unbounded if feasible
assert_equal(res.nit, 0)
def test_zero_row_1(self):
c = [1, 2, 3]
A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_eq = [0, 3, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3)
def test_zero_row_2(self):
A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_ub = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0)
def test_zero_row_3(self):
m, n = 2, 4
rng = np.random.default_rng(49949482723982545)
c = rng.random(n)
A_eq = rng.random((m, n))
A_eq[0, :] = 0
b_eq = rng.random(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_4(self):
m, n = 2, 4
rng = np.random.default_rng(1032934859282349)
c = rng.random(n)
A_ub = rng.random((m, n))
A_ub[0, :] = 0
b_ub = -rng.random(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_1(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 2, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_2(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 1, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=4)
def test_singleton_row_ub_1(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -2, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_ub_2(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -0.5, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0.5)
def test_infeasible(self):
# Test linprog response to an infeasible problem
c = [-1, -1]
A_ub = [[1, 0],
[0, 1],
[-1, -1]]
b_ub = [2, 2, -5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_infeasible_inequality_bounds(self):
c = [1]
A_ub = [[2]]
b_ub = 4
bounds = (5, 6)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_unbounded(self):
# Test linprog response to an unbounded problem
c = np.array([1, 1]) * -1 # maximize
A_ub = [[-1, 1],
[-1, -1]]
b_ub = [-1, -2]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_unbounded_below_no_presolve_corrected(self):
c = [1]
bounds = [(None, 1)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, bounds=bounds,
method=self.method,
options=o)
if self.method == "revised simplex":
# Revised simplex has a special pathway for no constraints.
assert_equal(res.status, 5)
else:
_assert_unbounded(res)
def test_unbounded_no_nontrivial_constraints_1(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, -1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_unbounded_no_nontrivial_constraints_2(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, 1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (None, 0)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], -np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_cyclic_recovery(self):
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100, 10, 1]) * -1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200, 20, 1]]
b_ub = [1, 100, 10000]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
def test_cyclic_bland(self):
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
# copy the existing options dictionary but change maxiter
maxiter = 100
o = {key: val for key, val in self.options.items()}
o['maxiter'] = maxiter
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
if self.method == 'simplex' and not self.options.get('bland'):
# simplex cycles without Bland's rule
_assert_iteration_limit_reached(res, o['maxiter'])
else:
# other methods, including simplex with Bland's rule, succeed
_assert_success(res, desired_x=[1, 0, 1, 0])
# note that revised simplex skips this test because it may or may not
# cycle depending on the initial basis
def test_remove_redundancy_infeasibility(self):
# mostly a test of redundancy removal, which is carefully tested in
# test__remove_redundancy.py
m, n = 10, 10
rng = np.random.default_rng(253985716283940)
c = rng.random(n)
A_eq = rng.random((m, n))
b_eq = rng.random(m)
A_eq[-1, :] = 2 * A_eq[-2, :]
b_eq[-1] *= -1
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
#################
# General Tests #
#################
def test_nontrivial_problem(self):
# Problem involves all constraint types,
# negative resource limits, and rounding issues.
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
def test_lpgen_problem(self):
# Test linprog with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A_ub, b_ub, c = lpgen_2d(20, 20)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Solving system with option 'sym_pos'", OptimizeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-63.47967608020187) # method='highs' solution
def test_network_flow(self):
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
with warnings.catch_warnings():
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
def test_network_flow_limited_capacity(self):
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
c = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
with warnings.catch_warnings():
# this is an UmfpackWarning but I had trouble importing it
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "scipy.linalg.solve\nIll...", RuntimeWarning)
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
warnings.filterwarnings(
"ignore", "Solving system with option...", OptimizeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example(self):
# https://en.wikipedia.org/wiki/Simplex_algorithm#Example
c = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-20)
def test_enzo_example(self):
# https://github.com/scipy/scipy/issues/1779 lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
atol=6e-6, rtol=1e-7)
def test_enzo_example_b(self):
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
# This test relies on `cos(0) -1 == sin(0)`, so ensure that's true
# (SIMD code or -ffast-math may cause spurious failures otherwise)
row0 = np.cos(tmp) - 1
row0[0] = 0.0
row1 = np.sin(tmp)
row1[0] = 0.0
A_eq = np.vstack((row0, row1))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [1, 1]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_basic_artificial_vars(self):
# Problem is chosen to test two phase simplex methods when at the end
# of phase 1 some artificial variables remain in the basis.
# Also, for `method='simplex'`, the row in the tableau corresponding
# with the artificial variables is not all zero.
c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
[0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
[1.0, 1.0, 0, 0, 0, 0]])
b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
b_eq = np.array([0, 0])
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
atol=2e-6)
def test_optimize_result(self):
# check all fields in OptimizeResult
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
assert_(res.success)
assert_(res.nit)
assert_(not res.status)
if 'highs' not in self.method:
# HiGHS status/message tested separately
assert_(res.message == "Optimization terminated successfully.")
assert_allclose(c @ res.x, res.fun)
assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11)
assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11)
for key in ['eqlin', 'ineqlin', 'lower', 'upper']:
if key in res.keys():
assert isinstance(res[key]['marginals'], np.ndarray)
assert isinstance(res[key]['residual'], np.ndarray)
#################
# Bug Fix Tests #
#################
def test_bug_5400(self):
# https://github.com/scipy/scipy/issues/5400
bounds = [
(0, None),
(0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
(0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
(0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
f = 1 / 9
g = -1e4
h = -3.1
A_ub = np.array([
[1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
[1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
[0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
[0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
[0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
[0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
b_ub = np.array([
0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,
900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Solving system with option 'sym_pos'", OptimizeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-106.63507541835018)
def test_bug_6139(self):
# linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Note: This is not strictly a bug as the default tolerance determines
# if a result is "close enough" to zero and should not be expected
# to work for all cases.
c = np.array([1, 1, 1])
A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])
b_eq = np.array([5.00000000e+00, -1.00000000e+04])
A_ub = -np.array([[0., 1000000., 1010000.]])
b_ub = -np.array([10000000.])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14.95,
desired_x=np.array([5, 4.95, 5]))
def test_bug_6690(self):
# linprog simplex used to violate bound constraint despite reporting
# success.
# https://github.com/scipy/scipy/issues/6690
A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])
b_eq = np.array([0.9626])
A_ub = np.array([
[0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],
[0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],
[0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]
])
b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])
bounds = np.array([
[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
[0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]
]).T
c = np.array([
-1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28
])
with warnings.catch_warnings():
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "Solving system with option 'cholesky'", OptimizeWarning)
warnings.filterwarnings(
"ignore", "Solving system with option 'sym_pos'", OptimizeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = -1.19099999999
desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,
0.5000, 0.4700, 0.0900, 0.3200, -0.7300])
_assert_success(res, desired_fun=desired_fun, desired_x=desired_x)
# Add small tol value to ensure arrays are less than or equal.
atol = 1e-6
assert_array_less(bounds[:, 0] - atol, res.x)
assert_array_less(res.x, bounds[:, 1] + atol)
def test_bug_7044(self):
# linprog simplex failed to "identify correct constraints" (?)
# leading to a non-optimal solution if A is rank-deficient.
# https://github.com/scipy/scipy/issues/7044
A_eq, b_eq, c, _, _ = magic_square(3)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = 1.7002011030086288 # `method='highs' solution
_assert_success(res, desired_fun=desired_fun)
assert_allclose(A_eq.dot(res.x), b_eq)
assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)
def test_bug_7237(self):
# https://github.com/scipy/scipy/issues/7237
# linprog simplex "explodes" when the pivot value is very
# close to zero.
c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])
A_ub = np.array([
[1., -724., 911., -551., -555., -896., 478., -80., -293.],
[1., 566., 42., 937., 233., 883., 392., -909., 57.],
[1., -208., -894., 539., 321., 532., -924., 942., 55.],
[1., 857., -859., 83., 462., -265., -971., 826., 482.],
[1., 314., -424., 245., -424., 194., -443., -104., -429.],
[1., 540., 679., 361., 149., -827., 876., 633., 302.],
[0., -1., -0., -0., -0., -0., -0., -0., -0.],
[0., -0., -1., -0., -0., -0., -0., -0., -0.],
[0., -0., -0., -1., -0., -0., -0., -0., -0.],
[0., -0., -0., -0., -1., -0., -0., -0., -0.],
[0., -0., -0., -0., -0., -1., -0., -0., -0.],
[0., -0., -0., -0., -0., -0., -1., -0., -0.],
[0., -0., -0., -0., -0., -0., -0., -1., -0.],
[0., -0., -0., -0., -0., -0., -0., -0., -1.],
[0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1.]
])
b_ub = np.array([
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])
b_eq = np.array([[1.]])
bounds = [(None, None)] * 9
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=108.568535, atol=1e-6)
def test_bug_8174(self):
# https://github.com/scipy/scipy/issues/8174
# The simplex method sometimes "explodes" if the pivot value is very
# close to zero.
A_ub = np.array([
[22714, 1008, 13380, -2713.5, -1116],
[-4986, -1092, -31220, 17386.5, 684],
[-4986, 0, 0, -2713.5, 0],
[22714, 0, 0, 17386.5, 0]])
b_ub = np.zeros(A_ub.shape[0])
c = -np.ones(A_ub.shape[1])
bounds = [(0, 1)] * A_ub.shape[1]
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':
_assert_unable_to_find_basic_feasible_sol(res)
else:
_assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)
def test_bug_8174_2(self):
# Test supplementary example from issue 8174.
# https://github.com/scipy/scipy/issues/8174
# https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution
c = np.array([1, 0, 0, 0, 0, 0, 0])
A_ub = -np.identity(7)
b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])
A_eq = np.array([
[1, 1, 1, 1, 1, 1, 0],
[0.3, 1.3, 0.9, 0, 0, 0, -1],
[0.3, 0, 0, 0, 0, 0, -2/3],
[0, 0.65, 0, 0, 0, 0, -1/15],
[0, 0, 0.3, 0, 0, 0, -1/15]
])
b_eq = np.array([[100], [0], [0], [0], [0]])
with warnings.catch_warnings():
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=43.3333333331385)
def test_bug_8561(self):
# Test that pivot row is chosen correctly when using Bland's rule
# This was originally written for the simplex method with
# Bland's rule only, but it doesn't hurt to test all methods/options
# https://github.com/scipy/scipy/issues/8561
c = np.array([7, 0, -4, 1.5, 1.5])
A_ub = np.array([
[4, 5.5, 1.5, 1.0, -3.5],
[1, -2.5, -2, 2.5, 0.5],
[3, -0.5, 4, -12.5, -7],
[-1, 4.5, 2, -3.5, -2],
[5.5, 2, -4.5, -1, 9.5]])
b_ub = np.array([0, 0, 0, 0, 1])
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,
method=self.method)
_assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
def test_bug_8662(self):
# linprog simplex used to report incorrect optimal results
# https://github.com/scipy/scipy/issues/8662
c = [-10, 10, 6, 3]
A_ub = [[8, -8, -4, 6],
[-8, 8, 4, -6],
[-4, 4, 8, -4],
[3, -3, -3, -10]]
b_ub = [9, -9, -9, -4]
bounds = [(0, None), (0, None), (0, None), (0, None)]
desired_fun = 36.0000000000
with warnings.catch_warnings():
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Set boundary condition as a constraint
A_ub.append([0, 0, -1, 0])
b_ub.append(0)
bounds[2] = (None, None)
with warnings.catch_warnings():
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
rtol = 1e-5
_assert_success(res1, desired_fun=desired_fun, rtol=rtol)
_assert_success(res2, desired_fun=desired_fun, rtol=rtol)
def test_bug_8663(self):
# exposed a bug in presolve
# https://github.com/scipy/scipy/issues/8663
c = [1, 5]
A_eq = [[0, -7]]
b_eq = [-6]
bounds = [(0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
# tested for interior-point with presolve off in TestLinprogIPSpecific
# https://github.com/scipy/scipy/issues/8664
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
warnings.filterwarnings(
"ignore", "Solving system with option...", OptimizeWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bug_8973(self):
"""
Test whether bug described at:
https://github.com/scipy/scipy/issues/8973
was fixed.
"""
c = np.array([0, 0, 0, 1, -1])
A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])
b_ub = np.array([2, -2])
bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# solution vector x is not unique
_assert_success(res, desired_fun=-2)
# HiGHS IPM had an issue where the following wasn't true!
assert_equal(c @ res.x, res.fun)
def test_bug_8973_2(self):
"""
Additional test for:
https://github.com/scipy/scipy/issues/8973
suggested in
https://github.com/scipy/scipy/pull/8985
review by @antonior92
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[-2], desired_fun=0)
def test_bug_10124(self):
"""
Test for linprog docstring problem
'disp'=True caused revised simplex failure
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
c = [-1, 4]
A_ub = [[-3, 1], [1, 2]]
b_ub = [6, 4]
bounds = [(None, None), (-3, None)]
o = {"disp": True}
o.update(self.options)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_x=[10, -3], desired_fun=-22)
def test_bug_10349(self):
"""
Test for redundancy removal tolerance issue
https://github.com/scipy/scipy/issues/10349
"""
A_eq = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1]])
b_eq = np.array([221, 210, 10, 141, 198, 102])
c = np.concatenate((0, 1, np.zeros(4)), axis=None)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)
@pytest.mark.skipif(sys.platform == 'darwin',
reason=("Failing on some local macOS builds, "
"see gh-13846"))
def test_bug_10466(self):
"""
Test that autoscale fixes poorly-scaled problem
"""
c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]
A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]
b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,
1.00663296e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09]
o = {}
# HiGHS methods don't use autoscale option
if not self.method.startswith("highs"):
o = {"autoscale": True}
o.update(self.options)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Solving system with option...", OptimizeWarning)
if has_umfpack:
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore", "scipy.linalg.solve\nIll...", RuntimeWarning)
warnings.filterwarnings(
"ignore", "divide by zero encountered...", RuntimeWarning)
warnings.filterwarnings(
"ignore", "overflow encountered...", RuntimeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered...", RuntimeWarning)
warnings.filterwarnings(
"ignore", "Ill-conditioned matrix...", LinAlgWarning)
warnings.filterwarnings(
"ignore", "An ill-conditioned...", LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
assert_allclose(res.fun, -8589934560)
#########################
# Method-specific Tests #
#########################
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
| LinprogCommonTests |
python | fastapi__sqlmodel | docs_src/tutorial/update/tutorial004.py | {
"start": 100,
"end": 2392
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy") # (1)!
results = session.exec(statement) # (2)!
hero_1 = results.one() # (3)!
print("Hero 1:", hero_1) # (4)!
statement = select(Hero).where(Hero.name == "Captain North America") # (5)!
results = session.exec(statement) # (6)!
hero_2 = results.one() # (7)!
print("Hero 2:", hero_2) # (8)!
hero_1.age = 16 # (9)!
hero_1.name = "Spider-Youngster" # (10)!
session.add(hero_1) # (11)!
hero_2.name = "Captain North America Except Canada" # (12)!
hero_2.age = 110 # (13)!
session.add(hero_2) # (14)!
session.commit() # (15)!
session.refresh(hero_1) # (16)!
session.refresh(hero_2) # (17)!
print("Updated hero 1:", hero_1) # (18)!
print("Updated hero 2:", hero_2) # (19)!
# (20)!
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
| Hero |
python | django__django | django/db/backends/sqlite3/introspection.py | {
"start": 2035,
"end": 18068
} | class ____(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {
"BigIntegerField",
"IntegerField",
"SmallIntegerField",
}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return "AutoField"
if description.has_json_constraint:
return "JSONField"
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute(
"""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name"""
)
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute(
"PRAGMA table_xinfo(%s)" % self.connection.ops.quote_name(table_name)
)
table_info = cursor.fetchall()
if not table_info:
raise DatabaseError(f"Table {table_name} does not exist (empty pragma).")
collations = self._get_column_collations(cursor, table_name)
json_columns = set()
if self.connection.features.can_introspect_json_field:
for line in table_info:
column = line[1]
json_constraint_sql = '%%json_valid("%s")%%' % column
has_json_constraint = cursor.execute(
"""
SELECT sql
FROM sqlite_master
WHERE
type = 'table' AND
name = %s AND
sql LIKE %s
""",
[table_name, json_constraint_sql],
).fetchone()
if has_json_constraint:
json_columns.add(column)
table_description = [
FieldInfo(
name,
data_type,
get_field_size(data_type),
None,
None,
None,
not notnull,
default,
collations.get(name),
bool(pk),
name in json_columns,
)
for cid, name, data_type, notnull, default, pk, hidden in table_info
if hidden
in [
0, # Normal column.
2, # Virtual generated column.
3, # Stored generated column.
]
]
# If the primary key is composed of multiple columns they should not
# be individually marked as pk.
primary_key = [
index for index, field_info in enumerate(table_description) if field_info.pk
]
if len(primary_key) > 1:
for index in primary_key:
table_description[index] = table_description[index]._replace(pk=False)
return table_description
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{"table": table_name, "column": pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of
{column_name: (ref_column_name, ref_table_name, db_on_delete)}
representing all foreign keys in the given table.
"""
cursor.execute(
"PRAGMA foreign_key_list(%s)" % self.connection.ops.quote_name(table_name)
)
return {
column_name: (
ref_column_name,
ref_table_name,
self.on_delete_types.get(on_delete),
)
for (
_,
_,
ref_table_name,
column_name,
ref_column_name,
_,
on_delete,
*_,
) in cursor.fetchall()
}
def get_primary_key_columns(self, cursor, table_name):
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
return [name for _, name, *_, pk in cursor.fetchall() if pk]
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, "("):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ")"):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ","):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(
sqlparse.tokens.Keyword, "CONSTRAINT"
)
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, "UNIQUE"):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, "UNIQUE"):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, "CHECK"):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = (
{
"unique": True,
"columns": unique_columns,
"primary_key": False,
"foreign_key": None,
"check": False,
"index": False,
}
if unique_columns
else None
)
check_constraint = (
{
"check": True,
"columns": check_columns,
"primary_key": False,
"unique": False,
"foreign_key": None,
"index": False,
}
if check_columns
else None
)
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, "("):
break
# Parse columns and constraint definition
while True:
(
constraint_name,
unique,
check,
end_token,
) = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints[
"__unnamed_constraint_%s__" % unnamed_constrains_index
] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints[
"__unnamed_constraint_%s__" % unnamed_constrains_index
] = check
if end_token.match(sqlparse.tokens.Punctuation, ")"):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s",
[table_name],
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {
info.name for info in self.get_table_description(cursor, table_name)
}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute(
"PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)
)
for row in cursor.fetchall():
# Discard last 2 columns.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='index' AND name=%s",
[index],
)
# There's at most one row.
(sql,) = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute(
"PRAGMA index_info(%s)" % self.connection.ops.quote_name(index)
)
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]["columns"].append(column)
# Add type and column orders for indexes
if constraints[index]["index"]:
# SQLite doesn't support any index type other than b-tree
constraints[index]["type"] = Index.suffix
orders = self._get_index_columns_orders(sql)
if orders is not None:
constraints[index]["orders"] = orders
# Get the PK
pk_columns = self.get_primary_key_columns(cursor, table_name)
if pk_columns:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": pk_columns,
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
relations = enumerate(self.get_relations(cursor, table_name).items())
constraints.update(
{
f"fk_{index}": {
"columns": [column_name],
"primary_key": False,
"unique": False,
"foreign_key": (ref_table_name, ref_column_name),
"check": False,
"index": False,
}
for index, (
column_name,
(ref_column_name, ref_table_name, _),
) in relations
}
)
return constraints
def _get_index_columns_orders(self, sql):
tokens = sqlparse.parse(sql)[0]
for token in tokens:
if isinstance(token, sqlparse.sql.Parenthesis):
columns = str(token).strip("()").split(", ")
return ["DESC" if info.endswith("DESC") else "ASC" for info in columns]
return None
def _get_column_collations(self, cursor, table_name):
row = cursor.execute(
"""
SELECT sql
FROM sqlite_master
WHERE type = 'table' AND name = %s
""",
[table_name],
).fetchone()
if not row:
return {}
sql = row[0]
columns = str(sqlparse.parse(sql)[0][-1]).strip("()").split(", ")
collations = {}
for column in columns:
tokens = column[1:].split()
column_name = tokens[0].strip('"')
for index, token in enumerate(tokens):
if token == "COLLATE":
collation = tokens[index + 1]
break
else:
collation = None
collations[column_name] = collation
return collations
| DatabaseIntrospection |
python | getsentry__sentry | src/sentry/core/endpoints/organization_environments.py | {
"start": 1025,
"end": 3213
} | class ____(OrganizationEndpoint):
owner = ApiOwner.UNOWNED
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="List an Organization's Environments",
parameters=[GlobalParams.ORG_ID_OR_SLUG, EnvironmentParams.VISIBILITY],
responses={
200: inline_sentry_response_serializer(
"OrganizationEnvironmentResponse", list[EnvironmentSerializerResponse]
),
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=EnvironmentExamples.GET_ORGANIZATION_ENVIRONMENTS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Lists an organization's environments.
"""
visibility = request.GET.get("visibility", "visible")
if visibility not in environment_visibility_filter_options:
return Response(
{
"detail": "Invalid value for 'visibility', valid values are: {!r}".format(
sorted(environment_visibility_filter_options.keys())
)
},
status=400,
)
environment_projects = EnvironmentProject.objects.filter(
project__in=self.get_projects(request, organization)
)
add_visibility_filters = environment_visibility_filter_options[visibility]
environment_projects = add_visibility_filters(environment_projects).values("environment")
queryset = (
Environment.objects.filter(id__in=environment_projects)
.exclude(
# HACK(mattrobenolt): We don't want to surface the
# "No Environment" environment to the UI since it
# doesn't really exist. This might very likely change
# with new tagstore backend in the future, but until
# then, we're hiding it since it causes more problems
# than it's worth.
name=""
)
.order_by("name")
)
return Response(serialize(list(queryset), request.user))
| OrganizationEnvironmentsEndpoint |
python | jazzband__prettytable | tests/test_style.py | {
"start": 16692,
"end": 20891
} | class ____:
@pytest.mark.parametrize(
["pt", "expected_output", "test_type"],
[
(
lf("city_data"),
"""
+-----------+------+------------+-----------------+
| City name | Area | Population | Annual Rainfall |
+-----------+------+------------+-----------------+
| Adelaide | 1295 | 1158259 | 600.5 |
| Brisbane | 5905 | 1857594 | 1146.4 |
| Darwin | 112 | 120900 | 1714.7 |
| Hobart | 1357 | 205556 | 619.5 |
| Sydney | 2058 | 4336374 | 1214.8 |
| Melbourne | 1566 | 3806092 | 646.9 |
| Perth | 5386 | 1554769 | 869.4 |
+-----------+------+------------+-----------------+
""",
"English Table",
),
(
lf("japanese_pretty_table"),
"""
+--------+------------+----------+
| Kanji | Hiragana | English |
+--------+------------+----------+
| 神戸 | こうべ | Kobe |
| 京都 | きょうと | Kyoto |
| 長崎 | ながさき | Nagasaki |
| 名古屋 | なごや | Nagoya |
| 大阪 | おおさか | Osaka |
| 札幌 | さっぽろ | Sapporo |
| 東京 | とうきょう | Tokyo |
| 横浜 | よこはま | Yokohama |
+--------+------------+----------+
""",
"Japanese table",
),
(
lf("emoji_pretty_table"),
"""
+-----------------+-----------------+
| Thunderbolt | Lightning |
+-----------------+-----------------+
| \x1b[38;5;226m _`/""\x1b[38;5;250m.-. \x1b[0m | \x1b[38;5;240;1m .-. \x1b[0m |
| \x1b[38;5;226m ,\\_\x1b[38;5;250m( ). \x1b[0m | \x1b[38;5;240;1m ( ). \x1b[0m |
| \x1b[38;5;226m /\x1b[38;5;250m(___(__) \x1b[0m | \x1b[38;5;240;1m (___(__) \x1b[0m |
| \x1b[38;5;228;5m ⚡\x1b[38;5;111;25mʻ ʻ\x1b[38;5;228;5m⚡\x1b[38;5;111;25mʻ ʻ \x1b[0m | \x1b[38;5;21;1m ‚ʻ\x1b[38;5;228;5m⚡\x1b[38;5;21;25mʻ‚\x1b[38;5;228;5m⚡\x1b[38;5;21;25m‚ʻ \x1b[0m |
| \x1b[38;5;111m ʻ ʻ ʻ ʻ \x1b[0m | \x1b[38;5;21;1m ‚ʻ‚ʻ\x1b[38;5;228;5m⚡\x1b[38;5;21;25mʻ‚ʻ \x1b[0m |
+-----------------+-----------------+
""", # noqa: E501
"Emoji table",
),
],
)
def test_multi_pattern_outputs(
self, pt: PrettyTable, expected_output: str, test_type: str
) -> None:
assert (
pt.get_string().strip() == expected_output.strip()
), f"Error output for test output of type {test_type}"
def test_colored_table() -> None:
table = PrettyTable(field_names=["Namespace", "Count"])
table.title = "\x1b[34mHere be Table caption\x1b[39m"
assert (
table.get_string()
== """+-----------------------+
| \x1b[34mHere be Table caption\x1b[39m |
+-------------+---------+
| Namespace | Count |
+-------------+---------+
+-------------+---------+"""
)
def test_link_and_color() -> None:
table = PrettyTable(["Link", "Count"])
# Add link
text = "Click here"
table.add_row([f"\033]8;;https://example.com\033\\{text}\033]8;;\033\\", "1"])
table.add_row(["No link", "2"])
# Add link with colour
text = "Click \x1b[34mhere\x1b[39m"
table.add_row([f"\033]8;;https://example.com\033\\{text}\033]8;;\033\\", "3"])
assert (
table.get_string()
== """\
+------------+-------+
| Link | Count |
+------------+-------+
| \033]8;;https://example.com\033\\Click here\033]8;;\033\\ | 1 |
| No link | 2 |
| \033]8;;https://example.com\033\\Click \x1b[34mhere\x1b[39m\033]8;;\033\\ | 3 |
+------------+-------+"""
)
@pytest.mark.parametrize(
["test_input", "expected"],
[
("a", 1),
("abc", 3),
("abc def", 7),
("\x1b[34mblue\x1b[39m", 4),
("\033]8;;https://example.com\033\\link\033]8;;\033\\", 4),
# colour inside link
("\033]8;;https://example.com\033\\\x1b[34mblue link\x1b[39m\033]8;;\033\\", 9),
# link inside colour
("\x1b[34m\033]8;;https://example.com\033\\blue link\033]8;;\033\\\x1b[39m", 9),
],
)
def test__str_block_width(test_input: str, expected: int) -> None:
assert _str_block_width(test_input) == expected
| TestMultiPattern |
python | viewflow__viewflow | viewflow/fsm/base.py | {
"start": 9142,
"end": 10399
} | class ____:
do_not_call_in_templates = True
def __init__(self, state: State, func: TransitionFunction): # noqa D102
self._state = state
self._func = func
def __get__(
self, instance: object, owner: Optional[Type[object]] = None
) -> TransitionBoundMethod | TransitionMethod:
if instance:
return TransitionBoundMethod(
self._state,
self._func,
self.get_descriptor(instance.__class__),
instance,
)
else:
assert owner is not None # make mypy happy
return TransitionMethod(
self._state, self._func, self.get_descriptor(owner), owner
)
def get_descriptor(self, owner: Type[object]) -> TransitionDescriptor:
"""Lookup for the transition descriptor in the base classes."""
for cls in owner.__mro__[1:]:
if hasattr(cls, self._func.__name__):
super_method = getattr(cls, self._func.__name__)
if isinstance(super_method, TransitionMethod):
break
else:
raise ValueError("Base transition not found")
return super_method._descriptor
| SuperTransitionDescriptor |
python | pytorch__pytorch | torch/distributions/log_normal.py | {
"start": 343,
"end": 2274
} | class ____(TransformedDistribution):
r"""
Creates a log-normal distribution parameterized by
:attr:`loc` and :attr:`scale` where::
X ~ Normal(loc, scale)
Y = exp(X) ~ LogNormal(loc, scale)
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # log-normal distributed with mean=0 and stddev=1
tensor([ 0.1046])
Args:
loc (float or Tensor): mean of log of distribution
scale (float or Tensor): standard deviation of log of the distribution
"""
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
# pyrefly: ignore [bad-override]
support = constraints.positive
has_rsample = True
# pyrefly: ignore [bad-override]
base_dist: Normal
def __init__(
self,
loc: Union[Tensor, float],
scale: Union[Tensor, float],
validate_args: Optional[bool] = None,
) -> None:
base_dist = Normal(loc, scale, validate_args=validate_args)
super().__init__(base_dist, ExpTransform(), validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogNormal, _instance)
return super().expand(batch_shape, _instance=new)
@property
def loc(self) -> Tensor:
return self.base_dist.loc
@property
def scale(self) -> Tensor:
return self.base_dist.scale
@property
def mean(self) -> Tensor:
return (self.loc + self.scale.pow(2) / 2).exp()
@property
def mode(self) -> Tensor:
return (self.loc - self.scale.square()).exp()
@property
def variance(self) -> Tensor:
scale_sq = self.scale.pow(2)
return scale_sq.expm1() * (2 * self.loc + scale_sq).exp()
def entropy(self):
return self.base_dist.entropy() + self.loc
| LogNormal |
python | scipy__scipy | benchmarks/benchmarks/optimize_zeros.py | {
"start": 295,
"end": 689
} | class ____(Benchmark):
params = [
fstrings,
mstrings
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.a = .5
self.b = sqrt(3)
self.func = functions[fstrings.index(func)]
self.meth = methods[mstrings.index(meth)]
def time_zeros(self, func, meth):
self.meth(self.func, self.a, self.b)
| Zeros |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 0,
"end": 100
} | class ____(Exception):
"""A generic exception for all others to extend."""
pass
| HaystackError |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/backfill.py | {
"start": 28702,
"end": 29086
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "BackfillNotFoundError"
backfill_id = graphene.NonNull(graphene.String)
def __init__(self, backfill_id: str):
super().__init__()
self.backfill_id = backfill_id
self.message = f"Backfill {backfill_id} could not be found."
| GrapheneBackfillNotFoundError |
python | getsentry__sentry | tests/sentry/workflow_engine/test_integration.py | {
"start": 1665,
"end": 3945
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
(
self.workflow,
self.detector,
self.detector_workflow,
self.workflow_triggers,
) = self.create_detector_and_workflow(
name_prefix="e2e-test",
detector_type="metric_issue",
)
detector_conditions = self.create_data_condition_group()
self.create_data_condition(
condition_group=detector_conditions,
type=Condition.EQUAL,
condition_result=DetectorPriorityLevel.HIGH,
comparison=1,
)
self.detector.workflow_condition_group = detector_conditions
self.detector.save()
_, _, self.data_source, self.data_packet = self.create_test_query_data_source(self.detector)
self.action_group, self.action = self.create_workflow_action(workflow=self.workflow)
self.event = self.store_event(data={}, project_id=self.project.id)
occurrence_data = self.build_occurrence_data(
event_id=self.event.event_id,
project_id=self.project.id,
fingerprint=[f"detector-{self.detector.id}"],
evidence_data={"detector_id": self.detector.id},
type=MetricIssue.type_id,
)
self.occurrence, group_info = save_issue_occurrence(occurrence_data, self.event)
assert group_info is not None
self.group = Group.objects.get(grouphash__hash=self.occurrence.fingerprint[0])
assert self.group.type == MetricIssue.type_id
def call_post_process_group(
self,
group_id,
is_new=False,
is_regression=False,
is_new_group_environment=True,
cache_key=None,
eventstream_type=EventStreamEventType.Generic.value,
include_occurrence=True,
):
post_process_group(
is_new=is_new,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
cache_key=cache_key,
group_id=group_id,
occurrence_id=self.occurrence.id if include_occurrence else None,
project_id=self.project.id,
eventstream_type=eventstream_type,
)
return cache_key
| BaseWorkflowIntegrationTest |
python | sympy__sympy | sympy/polys/agca/homomorphisms.py | {
"start": 602,
"end": 13002
} | class ____:
"""
Abstract base class for module homomoprhisms. Do not instantiate.
Instead, use the ``homomorphism`` function:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [0, 1]])
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2
[0, 1]])
Attributes:
- ring - the ring over which we are considering modules
- domain - the domain module
- codomain - the codomain module
- _ker - cached kernel
- _img - cached image
Non-implemented methods:
- _kernel
- _image
- _restrict_domain
- _restrict_codomain
- _quotient_domain
- _quotient_codomain
- _apply
- _mul_scalar
- _compose
- _add
"""
def __init__(self, domain, codomain):
if not isinstance(domain, Module):
raise TypeError('Source must be a module, got %s' % domain)
if not isinstance(codomain, Module):
raise TypeError('Target must be a module, got %s' % codomain)
if domain.ring != codomain.ring:
raise ValueError('Source and codomain must be over same ring, '
'got %s != %s' % (domain, codomain))
self.domain = domain
self.codomain = codomain
self.ring = domain.ring
self._ker = None
self._img = None
def kernel(self):
r"""
Compute the kernel of ``self``.
That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute
`ker(\phi) = \{x \in M | \phi(x) = 0\}`. This is a submodule of `M`.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [x, 0]]).kernel()
<[x, -1]>
"""
if self._ker is None:
self._ker = self._kernel()
return self._ker
def image(self):
r"""
Compute the image of ``self``.
That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute
`im(\phi) = \{\phi(x) | x \in M \}`. This is a submodule of `N`.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [x, 0]]).image() == F.submodule([1, 0])
True
"""
if self._img is None:
self._img = self._image()
return self._img
def _kernel(self):
"""Compute the kernel of ``self``."""
raise NotImplementedError
def _image(self):
"""Compute the image of ``self``."""
raise NotImplementedError
def _restrict_domain(self, sm):
"""Implementation of domain restriction."""
raise NotImplementedError
def _restrict_codomain(self, sm):
"""Implementation of codomain restriction."""
raise NotImplementedError
def _quotient_domain(self, sm):
"""Implementation of domain quotient."""
raise NotImplementedError
def _quotient_codomain(self, sm):
"""Implementation of codomain quotient."""
raise NotImplementedError
def restrict_domain(self, sm):
"""
Return ``self``, with the domain restricted to ``sm``.
Here ``sm`` has to be a submodule of ``self.domain``.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.restrict_domain(F.submodule([1, 0]))
Matrix([
[1, x], : <[1, 0]> -> QQ[x]**2
[0, 0]])
This is the same as just composing on the right with the submodule
inclusion:
>>> h * F.submodule([1, 0]).inclusion_hom()
Matrix([
[1, x], : <[1, 0]> -> QQ[x]**2
[0, 0]])
"""
if not self.domain.is_submodule(sm):
raise ValueError('sm must be a submodule of %s, got %s'
% (self.domain, sm))
if sm == self.domain:
return self
return self._restrict_domain(sm)
def restrict_codomain(self, sm):
"""
Return ``self``, with codomain restricted to to ``sm``.
Here ``sm`` has to be a submodule of ``self.codomain`` containing the
image.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.restrict_codomain(F.submodule([1, 0]))
Matrix([
[1, x], : QQ[x]**2 -> <[1, 0]>
[0, 0]])
"""
if not sm.is_submodule(self.image()):
raise ValueError('the image %s must contain sm, got %s'
% (self.image(), sm))
if sm == self.codomain:
return self
return self._restrict_codomain(sm)
def quotient_domain(self, sm):
"""
Return ``self`` with domain replaced by ``domain/sm``.
Here ``sm`` must be a submodule of ``self.kernel()``.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.quotient_domain(F.submodule([-x, 1]))
Matrix([
[1, x], : QQ[x]**2/<[-x, 1]> -> QQ[x]**2
[0, 0]])
"""
if not self.kernel().is_submodule(sm):
raise ValueError('kernel %s must contain sm, got %s' %
(self.kernel(), sm))
if sm.is_zero():
return self
return self._quotient_domain(sm)
def quotient_codomain(self, sm):
"""
Return ``self`` with codomain replaced by ``codomain/sm``.
Here ``sm`` must be a submodule of ``self.codomain``.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.quotient_codomain(F.submodule([1, 1]))
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]>
[0, 0]])
This is the same as composing with the quotient map on the left:
>>> (F/[(1, 1)]).quotient_hom() * h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]>
[0, 0]])
"""
if not self.codomain.is_submodule(sm):
raise ValueError('sm must be a submodule of codomain %s, got %s'
% (self.codomain, sm))
if sm.is_zero():
return self
return self._quotient_codomain(sm)
def _apply(self, elem):
"""Apply ``self`` to ``elem``."""
raise NotImplementedError
def __call__(self, elem):
return self.codomain.convert(self._apply(self.domain.convert(elem)))
def _compose(self, oth):
"""
Compose ``self`` with ``oth``, that is, return the homomorphism
obtained by first applying then ``self``, then ``oth``.
(This method is private since in this syntax, it is non-obvious which
homomorphism is executed first.)
"""
raise NotImplementedError
def _mul_scalar(self, c):
"""Scalar multiplication. ``c`` is guaranteed in self.ring."""
raise NotImplementedError
def _add(self, oth):
"""
Homomorphism addition.
``oth`` is guaranteed to be a homomorphism with same domain/codomain.
"""
raise NotImplementedError
def _check_hom(self, oth):
"""Helper to check that oth is a homomorphism with same domain/codomain."""
if not isinstance(oth, ModuleHomomorphism):
return False
return oth.domain == self.domain and oth.codomain == self.codomain
def __mul__(self, oth):
if isinstance(oth, ModuleHomomorphism) and self.domain == oth.codomain:
return oth._compose(self)
try:
return self._mul_scalar(self.ring.convert(oth))
except CoercionFailed:
return NotImplemented
# NOTE: _compose will never be called from rmul
__rmul__ = __mul__
def __truediv__(self, oth):
try:
return self._mul_scalar(1/self.ring.convert(oth))
except CoercionFailed:
return NotImplemented
def __add__(self, oth):
if self._check_hom(oth):
return self._add(oth)
return NotImplemented
def __sub__(self, oth):
if self._check_hom(oth):
return self._add(oth._mul_scalar(self.ring.convert(-1)))
return NotImplemented
def is_injective(self):
"""
Return True if ``self`` is injective.
That is, check if the elements of the domain are mapped to the same
codomain element.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_injective()
False
>>> h.quotient_domain(h.kernel()).is_injective()
True
"""
return self.kernel().is_zero()
def is_surjective(self):
"""
Return True if ``self`` is surjective.
That is, check if every element of the codomain has at least one
preimage.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_surjective()
False
>>> h.restrict_codomain(h.image()).is_surjective()
True
"""
return self.image() == self.codomain
def is_isomorphism(self):
"""
Return True if ``self`` is an isomorphism.
That is, check if every element of the codomain has precisely one
preimage. Equivalently, ``self`` is both injective and surjective.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h = h.restrict_codomain(h.image())
>>> h.is_isomorphism()
False
>>> h.quotient_domain(h.kernel()).is_isomorphism()
True
"""
return self.is_injective() and self.is_surjective()
def is_zero(self):
"""
Return True if ``self`` is a zero morphism.
That is, check if every element of the domain is mapped to zero
under self.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_zero()
False
>>> h.restrict_domain(F.submodule()).is_zero()
True
>>> h.quotient_codomain(h.image()).is_zero()
True
"""
return self.image().is_zero()
def __eq__(self, oth):
try:
return (self - oth).is_zero()
except TypeError:
return False
def __ne__(self, oth):
return not (self == oth)
| ModuleHomomorphism |
python | fluentpython__example-code | 14-it-generator/sentence.py | {
"start": 98,
"end": 445
} | class ____:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text) # <1>
def __getitem__(self, index):
return self.words[index] # <2>
def __len__(self): # <3>
return len(self.words)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text) # <4>
| Sentence |
python | numba__numba | numba/cuda/tests/cudapy/cache_with_cpu_usecases.py | {
"start": 159,
"end": 586
} | class ____(UseCase):
def _call(self, ret, *args):
self._func(ret, *args)
# Using the same function as a cached CPU and CUDA-jitted function
def target_shared_assign(r, x):
r[()] = x[()]
assign_cuda_kernel = cuda.jit(cache=True)(target_shared_assign)
assign_cuda = CUDAUseCase(assign_cuda_kernel)
assign_cpu_jitted = njit(cache=True)(target_shared_assign)
assign_cpu = CPUUseCase(assign_cpu_jitted)
| CPUUseCase |
python | readthedocs__readthedocs.org | readthedocs/builds/managers.py | {
"start": 3443,
"end": 3809
} | class ____(models.Manager):
"""
Build manager that only includes internal version builds.
It will exclude pull request/merge request version builds from the queries
and only include BRANCH, TAG, UNKNOWN type Version builds.
"""
def get_queryset(self):
return super().get_queryset().exclude(version__type=EXTERNAL)
| InternalBuildManager |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 7679,
"end": 12941
} | class ____(QObject):
"""
Interface used to retrieve inputs to run on a code executor.
This API needs to be implemented by any plugin that wants to provide
an input/file to a code runner, e.g. editor files to be executed in
the IPythonConsole. It also needs to be covariant with respect to
:class:`spyder.api.plugins.SpyderDockablePlugin`.
"""
def get_run_configuration(self, uuid: str) -> RunConfiguration:
"""
Return the run information for the specified identifier.
Arguments
---------
uuid: str
The unique identifier for the requested run configuration. Such
id should have been registered previously via
`register_run_configuration_metadata` on the Run plugin.
Returns
-------
configuration: RunConfiguration
A dictionary containing the information required by the run
executor.
"""
raise NotImplementedError(f'{type(self)} must implement '
'get_run_configuration')
def get_run_configuration_per_context(
self,
context: str,
extra_action_name: Optional[str] = None,
context_modificator: Optional[str] = None,
re_run: bool = False
) -> Optional[RunConfiguration]:
"""
Return the run information for the given context.
The run configuration requested must be returned based on the
currently focused file/object/etc.
Arguments
---------
context: str
The context identifier for which the run configuration
is requested.
extra_action_name: Optional[str]
If not None, the name of the action that the provider should take
after gathering the run configuration input. Else, no action needs
to take place.
context_modificator: Optional[str]
str describing how to alter the context, e.g. run selection
<from line>.
re_run: bool
If True, then the requested configuration should correspond to the
last executed configuration for the given context.
Returns
-------
configuration: Optional[RunConfiguration]
A dictionary containing the information required by the run
executor. If None, then the provider indicates to the Run plugin
that the input needs to be discarded.
"""
raise NotImplementedError(f'{type(self)} must implement '
'get_run_configuration_per_context')
def focus_run_configuration(self, uuid: str):
"""
Switch the focus of the run configuration provider to
the run configuration given by parameter.
Arguments
---------
uuid: str
The unique identifier for the run configuration that should be
focused on. Such id should have been registered previously via
`register_run_configuration_metadata` on the Run plugin.
"""
raise NotImplementedError(f'{type(self)} must implement '
'focus_run_configuration')
RunExecuteFunc = Callable[
[RunConfiguration, ExtendedRunExecutionParameters],
List[PossibleRunResult]]
def run_execute(
func: RunExecuteFunc = None,
extension: Optional[Union[str, List[str]]] = None,
context: Optional[Union[str, List[str]]] = None
) -> RunExecuteFunc:
"""
Method decorator used to mark a method as an executor for a given file
extension and context.
The methods that use this decorator must have the following signature:
.. code-block:: python
def execution_handler(
input: RunConfiguration,
conf: ExtendedRunExecutionParameters) -> List[PossibleRunResult]:
...
Arguments
---------
func: RunExecuteFunc
Method to mark as an executor handler. Given by default when applying
the decorator.
extension: Optional[Union[str, List[str]]]
The file extension or list of file extensions that the executor
should handle. If None then the method will handle all extensions.
context: Optional[Union[str, List[str]]]
The execution context or list of contexts that the executor should
handle. If None then the method will handle all contexts.
Returns
-------
func: RunExecuteFunc
The same method that was given as input.
Notes
-----
The method must not crash or raise an exception, instead the
`RunResult` must wrap a `RunResultError` structure.
"""
if func is None:
return functools.partial(
run_execute, extension=extension, context=context)
if extension is None:
extension = '__extension'
if context is None:
context = '__context'
if isinstance(extension, str):
extension = [extension]
if isinstance(context, str):
context = [context]
run_exec_list = []
for ext in extension:
for ctx in context:
run_exec_list.append((ext, ctx))
func._run_exec = run_exec_list
return func
| RunConfigurationProvider |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_captions.py | {
"start": 10861,
"end": 20446
} | class ____(util.MdCase):
"""Test Blocks caption cases with enabled `auto`."""
extension = ['pymdownx.blocks.caption', 'md_in_html']
extension_configs = {
'pymdownx.blocks.caption': {
'auto': False
}
}
def test_caption(self):
"""Test basic caption with `auto`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption | 1
This is the caption.
///
''',
R'''
<figure id="__figure-caption_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> This is the caption.</p>
</figcaption>
</figure>
''',
True
)
def test_consecutive_captions(self):
"""Test consecutive captions with `auto`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption | 1
This is the caption.
///
A paragraph with a caption.
/// figure-caption | 2
This is the caption.
///
''',
R'''
<figure id="__figure-caption_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> This is the caption.</p>
</figcaption>
</figure>
<figure id="__figure-caption_2">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 2.</span> This is the caption.</p>
</figcaption>
</figure>
''',
True
)
def test_nested_captions(self):
"""Test nested captions with `auto`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption | 1.1.1
Level 3 caption.
///
/// figure-caption | 1.1
Level 2 caption.
///
/// figure-caption | 1
Level 1 caption.
///
''',
R'''
<figure id="__figure-caption_1">
<figure id="__figure-caption_1_1">
<figure id="__figure-caption_1_1_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.1.</span> Level 3 caption.</p>
</figcaption>
</figure>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p>
</figcaption>
</figure>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p>
</figcaption>
</figure>
''',
True
)
def test_nested_consecutive_captions(self):
"""Test nested captions with `auto`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption | 1.1.1
Level 3 caption.
///
/// figure-caption | 1.1
Level 2 caption.
///
/// figure-caption | 1
Level 1 caption.
///
A paragraph with a caption.
/// figure-caption | 2.1
Level 2 caption.
///
/// figure-caption | 2
Level 1 caption.
///
''',
R'''
<figure id="__figure-caption_1">
<figure id="__figure-caption_1_1">
<figure id="__figure-caption_1_1_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.1.</span> Level 3 caption.</p>
</figcaption>
</figure>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p>
</figcaption>
</figure>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p>
</figcaption>
</figure>
<figure id="__figure-caption_2">
<figure id="__figure-caption_2_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure 2.1.</span> Level 2 caption.</p>
</figcaption>
</figure>
<figcaption>
<p><span class="caption-prefix">Figure 2.</span> Level 1 caption.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_inline_id(self):
"""Test caption with inline shorthand ID."""
self.check_markdown(
R'''
Paragraph
/// figure-caption | #custom-id
Caption text.
///
''',
R'''
<figure id="custom-id">
<p>Paragraph</p>
<figcaption>
<p>Caption text.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_inline_id_prepend(self):
"""Test caption with inline shorthand ID and prepend marker."""
self.check_markdown(
R'''
Text
/// figure-caption | < #custom-prepend
Prepended caption.
///
''',
R'''
<figure id="custom-prepend">
<figcaption>
<p>Prepended caption.</p>
</figcaption>
<p>Text</p>
</figure>
''',
True
)
def test_inject_new_p_in_caption(self):
"""Test `auto` cases that require the prefix to be injected in a new paragraph."""
self.check_markdown(
R"""
Test
/// figure-caption | 1
///
Test
/// figure-caption | 2
> blockquote
///
""",
R"""
<figure id="__figure-caption_1">
<p>Test</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span></p>
</figcaption>
</figure>
<figure id="__figure-caption_2">
<p>Test</p>
<figcaption>
<p><span class="caption-prefix">Figure 2.</span></p>
<blockquote>
<p>blockquote</p>
</blockquote>
</figcaption>
</figure>
""",
True
)
def test_empty_paragraph(self):
"""Test `auto` cases that require prefix to inject a new paragraph."""
self.check_markdown(
R"""
Test
/// figure-caption | 1
<p markdown></p>
///
""",
R"""
<figure id="__figure-caption_1">
<p>Test</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.</span></p>
</figcaption>
</figure>
""",
True
)
def test_manual_prepend(self):
"""Test manual prepend."""
self.check_markdown(
R"""
Text
/// figure-caption | < 2
Prepended
///
Text
/// figure-caption | > 5
Appended
///
""",
R"""
<figure id="__figure-caption_2">
<figcaption>
<p><span class="caption-prefix">Figure 2.</span> Prepended</p>
</figcaption>
<p>Text</p>
</figure>
<figure id="__figure-caption_5">
<p>Text</p>
<figcaption>
<p><span class="caption-prefix">Figure 5.</span> Appended</p>
</figcaption>
</figure>
""",
True
)
def test_depth(self):
"""Depth is not really supported in manual, so a generic response is expected."""
self.check_markdown(
R"""
Paragraph
/// figure-caption
Caption 1
///
Paragraph
/// figure-caption | ^1
Caption 1.1
///
Paragraph
/// figure-caption | ^2
Caption 1.1.1
///
Paragraph
/// figure-caption | ^3
Caption 1.1.1.1
///
""",
"""
<figure>
<p>Paragraph</p>
<figcaption>
<p>Caption 1</p>
</figcaption>
</figure>
<figure id="__figure-caption_1_1">
<p>Paragraph</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.</span> Caption 1.1</p>
</figcaption>
</figure>
<figure id="__figure-caption_1_1_1">
<p>Paragraph</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.1.</span> Caption 1.1.1</p>
</figcaption>
</figure>
<figure id="__figure-caption_1_1_1_1">
<p>Paragraph</p>
<figcaption>
<p><span class="caption-prefix">Figure 1.1.1.1.</span> Caption 1.1.1.1</p>
</figcaption>
</figure>
""",
True
)
| TestBlocksCaptionPrefix |
python | ansible__ansible | test/lib/ansible_test/_internal/data.py | {
"start": 1074,
"end": 9532
} | class ____:
"""Data context providing details about the current execution environment for ansible-test."""
def __init__(self) -> None:
content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
current_path = os.getcwd()
layout_providers = get_path_provider_classes(LayoutProvider)
source_providers = get_path_provider_classes(SourceProvider)
self.__layout_providers = layout_providers
self.__source_providers = source_providers
self.__ansible_source: t.Optional[tuple[tuple[str, str], ...]] = None
self.payload_callbacks: list[c.Callable[[PayloadConfig], None]] = []
if content_path:
content, source_provider = self.__create_content_layout(layout_providers, source_providers, content_path, False)
elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
content, source_provider = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
else:
content, source_provider = self.__create_content_layout(layout_providers, source_providers, current_path, True)
self.content: ContentLayout = content
self.source_provider = source_provider
def create_collection_layouts(self) -> list[ContentLayout]:
"""
Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
An empty list is returned if the current content layout is not a collection layout.
"""
layout = self.content
collection = layout.collection
if not collection:
return []
root_path = os.path.join(collection.root, 'ansible_collections')
display.info('Scanning collection root: %s' % root_path, verbosity=1)
namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
collections = []
for namespace_name in namespace_names:
namespace_path = os.path.join(root_path, namespace_name)
collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
for collection_name in collection_names:
collection_path = os.path.join(namespace_path, collection_name)
if collection_path == os.path.join(collection.root, collection.directory):
collection_layout = layout
else:
collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)[0]
file_count = len(collection_layout.all_files())
if not file_count:
continue
display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
collections.append(collection_layout)
return collections
@staticmethod
def __create_content_layout(
layout_providers: list[t.Type[LayoutProvider]],
source_providers: list[t.Type[SourceProvider]],
root: str,
walk: bool,
) -> t.Tuple[ContentLayout, SourceProvider]:
"""Create a content layout using the given providers and root path."""
try:
layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
except ProviderNotFoundForPath:
layout_provider = UnsupportedLayout(root)
try:
# Begin the search for the source provider at the layout provider root.
# This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
# Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
# It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
if isinstance(layout_provider, UnsupportedLayout):
source_provider: SourceProvider = UnsupportedSource(layout_provider.root)
else:
source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(layout_provider.root)
layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
return layout, source_provider
def __create_ansible_source(self):
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not ANSIBLE_SOURCE_ROOT:
sources = []
source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
for path in source_provider.get_paths(source_provider.root))
source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
for path in source_provider.get_paths(source_provider.root))
return tuple(sources)
if self.content.is_ansible:
return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
try:
source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
@property
def ansible_source(self) -> tuple[tuple[str, str], ...]:
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not self.__ansible_source:
self.__ansible_source = self.__create_ansible_source()
return self.__ansible_source
def register_payload_callback(self, callback: c.Callable[[PayloadConfig], None]) -> None:
"""Register the given payload callback."""
self.payload_callbacks.append(callback)
def check_layout(self) -> None:
"""Report an error if the layout is unsupported."""
if self.content.unsupported:
raise ApplicationError(self.explain_working_directory())
def explain_working_directory(self) -> str:
"""Return a message explaining the working directory requirements."""
blocks = [
'The current working directory must be within the source tree being tested.',
'',
]
if ANSIBLE_SOURCE_ROOT:
blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/')
blocks.append('')
cwd = os.getcwd()
blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/')
blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/')
blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/')
blocks.append('')
blocks.append(f'Current working directory: {cwd}/')
if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections':
blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/')
elif os.path.basename(cwd) == 'ansible_collections':
blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/')
elif 'ansible_collections' not in cwd.split(os.path.sep):
blocks.append('No "ansible_collections" parent directory was found.')
if isinstance(self.content.unsupported, list):
blocks.extend(self.content.unsupported)
message = '\n'.join(blocks)
return message
@cache
def data_context() -> DataContext:
"""Initialize provider plugins."""
provider_types = (
'layout',
'source',
)
for provider_type in provider_types:
import_plugins('provider/%s' % provider_type)
context = DataContext()
return context
@dataclasses.dataclass(frozen=True)
| DataContext |
python | un33k__django-uuslug | uuslug/apps.py | {
"start": 110,
"end": 272
} | class ____(DjangoAppConfig):
"""
Configuration entry point for the uuslug app
"""
label = name = 'uuslug'
verbose_name = _("uuslug app")
| AppConfig |
python | scipy__scipy | scipy/stats/tests/test_qmc.py | {
"start": 55291,
"end": 58233
} | class ____:
def test_lloyd(self):
# quite sensible seed as it can go up before going further down
rng = np.random.RandomState(1809831)
sample = rng.uniform(0, 1, size=(128, 2))
base_l1 = _l1_norm(sample)
base_l2 = l2_norm(sample)
for _ in range(4):
sample_lloyd = _lloyd_centroidal_voronoi_tessellation(
sample, maxiter=1,
)
curr_l1 = _l1_norm(sample_lloyd)
curr_l2 = l2_norm(sample_lloyd)
# higher is better for the distance measures
assert base_l1 < curr_l1
assert base_l2 < curr_l2
base_l1 = curr_l1
base_l2 = curr_l2
sample = sample_lloyd
def test_lloyd_non_mutating(self):
"""
Verify that the input samples are not mutated in place and that they do
not share memory with the output.
"""
sample_orig = np.array([[0.1, 0.1],
[0.1, 0.2],
[0.2, 0.1],
[0.2, 0.2]])
sample_copy = sample_orig.copy()
new_sample = _lloyd_centroidal_voronoi_tessellation(
sample=sample_orig
)
assert_allclose(sample_orig, sample_copy)
assert not np.may_share_memory(sample_orig, new_sample)
def test_lloyd_errors(self):
with pytest.raises(ValueError, match=r"`sample` is not a 2D array"):
sample = [0, 1, 0.5]
_lloyd_centroidal_voronoi_tessellation(sample)
msg = r"`sample` dimension is not >= 2"
with pytest.raises(ValueError, match=msg):
sample = [[0], [0.4], [1]]
_lloyd_centroidal_voronoi_tessellation(sample)
msg = r"`sample` is not in unit hypercube"
with pytest.raises(ValueError, match=msg):
sample = [[-1.1, 0], [0.1, 0.4], [1, 2]]
_lloyd_centroidal_voronoi_tessellation(sample)
# mindist
def l2_norm(sample):
return distance.pdist(sample).min()
@pytest.mark.parametrize('engine', [qmc.Halton, qmc.Sobol,
qmc.LatinHypercube, qmc.PoissonDisk])
def test_deterministic(engine):
seed_number = 2359834584
rng = np.random.RandomState(seed_number)
res1 = engine(d=1, seed=rng).random(4)
rng = np.random.RandomState(seed_number)
res2 = engine(d=1, seed=rng).random(4)
assert_equal(res1, res2)
rng = np.random.default_rng(seed_number)
res1 = engine(d=1, seed=rng).random(4)
res2 = engine(d=1, rng=seed_number).random(4)
assert_equal(res1, res2)
rng = np.random.default_rng(seed_number)
res3 = engine(d=1, rng=rng).random(4)
assert_equal(res2, res1)
assert_equal(res3, res1)
message = "got multiple values for argument now known as `rng`"
with pytest.raises(TypeError, match=message):
engine(d=1, rng=seed_number, seed=seed_number)
| TestLloyd |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 24577,
"end": 27577
} | class ____(test_util.TensorFlowTestCase):
def testPartials(self):
"""Test that previously revealed a bug in buffer forwarding for AddN."""
partials = []
for _ in range(98):
partials.append(math_ops.add_n([constant_op.constant(1)]))
partials.append(
math_ops.add_n([constant_op.constant(1),
constant_op.constant(1)]))
res = math_ops.add_n(partials) + constant_op.constant(0)
with test_util.use_gpu():
self.assertAllEqual(res, 100)
def testFloat(self):
np.random.seed(12345)
for num_inputs in range(1, 10):
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
tf_x = ops.convert_n_to_tensor(x)
with test_util.use_gpu():
self.assertAllClose(sum(x), math_ops.add_n(tf_x))
self.assertAllClose(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs))
def testInt(self):
np.random.seed(54321)
for num_inputs in range(1, 10):
x = [
np.random.randint(-128, 128, (5, 4, 3, 2, 1))
for _ in range(num_inputs)
]
tf_x = ops.convert_n_to_tensor(x)
with test_util.use_gpu():
self.assertAllEqual(sum(x), math_ops.add_n(tf_x))
self.assertAllEqual(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs))
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with test_util.use_gpu():
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
self.evaluate(variables.global_variables_initializer())
if context.executing_eagerly():
with backprop.GradientTape() as tape:
tape.watch(input_vars)
addn = math_ops.add_n(input_vars)
add_n_grad = tape.gradient(addn, input_vars)
else:
addn = math_ops.add_n(input_vars)
add_n_grad = gradients.gradients(addn, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[self.evaluate(g) for g in add_n_grad])
def testIndexedSlices(self):
slc = indexed_slices.IndexedSlices(
array_ops.constant([1, 2], shape=[1, 2]), array_ops.constant([1]),
array_ops.constant([2, 2]))
slc_as_dense = np.array([[0, 0], [1, 2]])
with test_util.use_gpu():
# add_n currently always converts IndexedSlices to dense
self.assertAllEqual(slc_as_dense, math_ops.add_n([slc]))
self.assertAllEqual(2 * slc_as_dense, math_ops.add_n([slc, slc]))
def test_iterable(self):
"""Test that add_n supports iterables (e.g. generators and dict values)."""
def fn():
yield 1
yield 2
values_dict = {"a": 1, "b": 2}
with test_util.use_gpu():
self.assertAllEqual(3, math_ops.add_n(fn()))
self.assertAllEqual(3, math_ops.add_n(values_dict.values()))
@test_util.run_all_in_graph_and_eager_modes
| AddNTest |
python | dagster-io__dagster | python_modules/automation/automation/dagster_docs/public_api_validator.py | {
"start": 1057,
"end": 16765
} | class ____:
"""Validates consistency between @public decorators and RST documentation."""
def __init__(self, dagster_root: Path):
self.dagster_root = dagster_root
self.python_modules_dir = dagster_root / "python_modules"
self.rst_docs_dir = dagster_root / "docs" / "sphinx" / "sections" / "api" / "apidocs"
self._package_paths_cache: Optional[dict[str, Path]] = None
def _discover_packages(self) -> dict[str, Path]:
"""Discover all available packages and their filesystem paths.
Returns:
Dict mapping package names (with underscores) to their filesystem paths
"""
if self._package_paths_cache is not None:
return self._package_paths_cache
packages = get_public_dagster_packages(self.dagster_root)
self._package_paths_cache = {pkg.module_name: pkg.path for pkg in packages}
return self._package_paths_cache
def find_public_symbols(self, exclude_modules: Optional[set[str]] = None) -> list[PublicSymbol]:
"""Find all symbols marked with @public decorator in dagster modules.
Args:
exclude_modules: Set of module paths to exclude from scanning
Returns:
List of PublicSymbol objects
"""
exclude_modules = exclude_modules or set()
public_symbols = []
# Scan dagster core module
dagster_dir = self.python_modules_dir / "dagster" / "dagster"
public_symbols.extend(
self._scan_directory_for_public(dagster_dir, "dagster", exclude_modules)
)
# Scan library modules
libraries_dir = self.python_modules_dir / "libraries"
if libraries_dir.exists():
for lib_dir in libraries_dir.iterdir():
if lib_dir.is_dir() and lib_dir.name.startswith("dagster-"):
lib_package_dir = lib_dir / lib_dir.name.replace("-", "_")
if lib_package_dir.exists():
public_symbols.extend(
self._scan_directory_for_public(
lib_package_dir, lib_dir.name.replace("-", "_"), exclude_modules
)
)
return public_symbols
def _scan_directory_for_public(
self, directory: Path, base_module: str, exclude_modules: set[str]
) -> list[PublicSymbol]:
"""Recursively scan a directory for @public decorated symbols."""
public_symbols = []
for py_file in directory.rglob("*.py"):
if py_file.name.startswith("_") and py_file.name != "__init__.py":
continue
relative_path = py_file.relative_to(directory)
module_parts = [base_module] + list(relative_path.with_suffix("").parts)
if relative_path.name == "__init__.py":
module_parts = module_parts[:-1]
module_path = ".".join(module_parts)
if module_path in exclude_modules:
continue
# Skip dagster_airbyte generated classes
if self._is_dagster_airbyte_generated(module_path):
continue
try:
symbols = self._extract_public_symbols_from_file(py_file, module_path)
public_symbols.extend(symbols)
except Exception:
# Skip files that can't be parsed
continue
return public_symbols
def _extract_public_symbols_from_file(
self, file_path: Path, module_path: str
) -> list[PublicSymbol]:
"""Extract @public decorated symbols from a Python file."""
public_symbols = []
try:
with open(file_path, encoding="utf-8") as f:
content = f.read()
except Exception:
return public_symbols
try:
tree = ast.parse(content)
except SyntaxError:
return public_symbols
# Look for @public decorated symbols at module level only
# We exclude methods since they don't need to be individually documented in RST
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
if self._has_public_decorator(node) and self._is_module_level_symbol(tree, node):
symbol_type = "class" if isinstance(node, ast.ClassDef) else "function"
# Check if this symbol is exported at top-level
is_exported = self._is_symbol_exported(module_path, node.name)
# Use the exported module path if the symbol is exported
exported_module_path = self._get_exported_module_path(
module_path, node.name, is_exported
)
public_symbols.append(
PublicSymbol(
module_path=exported_module_path,
symbol_name=node.name,
symbol_type=symbol_type,
is_exported=is_exported,
source_file=str(file_path),
)
)
return public_symbols
def _has_public_decorator(
self, node: Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]
) -> bool:
"""Check if a node has @public decorator."""
for decorator in node.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "public":
return True
elif isinstance(decorator, ast.Attribute) and decorator.attr == "public":
return True
return False
def _is_module_level_symbol(self, tree: ast.Module, node: ast.AST) -> bool:
"""Check if a node is defined at module level (not inside a class)."""
# Check if the node is directly in the module body
return node in tree.body
def _is_dagster_airbyte_generated(self, module_path: str) -> bool:
"""Check if this is a dagster_airbyte generated module that should be excluded."""
return module_path.startswith(
"dagster_airbyte.managed.generated.sources"
) or module_path.startswith("dagster_airbyte.managed.generated.destinations")
def _is_symbol_exported(self, module_path: str, symbol_name: str) -> bool:
"""Check if a symbol is available as a top-level export."""
try:
if module_path.startswith("dagster."):
# Check if it's exported from main dagster module
return self._check_module_export("dagster", symbol_name)
elif module_path.startswith("dagster_"):
# Check if it's exported from the library's top-level
lib_name = module_path.split(".")[0]
return self._check_module_export(lib_name, symbol_name)
except Exception:
pass
return False
def _get_exported_module_path(
self, internal_module_path: str, symbol_name: str, is_exported: bool
) -> str:
"""Get the canonical exported module path for a symbol.
If a symbol is exported to a top-level module (dagster or library),
use that as the canonical path instead of the internal implementation path.
"""
if not is_exported:
return internal_module_path
# Check if it's exported from main dagster module
if internal_module_path.startswith("dagster.") and self._check_module_export(
"dagster", symbol_name
):
return "dagster"
# Check if it's exported from a library top-level
elif internal_module_path.startswith("dagster_"):
lib_name = internal_module_path.split(".")[0]
if self._check_module_export(lib_name, symbol_name):
return lib_name
# Fallback to internal path if not clearly exported elsewhere
return internal_module_path
def _check_module_export(self, module_name: str, symbol_name: str) -> bool:
"""Check if symbol is exported from the specified module using dynamic import."""
try:
import importlib
import sys
# Use discovered packages to find the correct path
packages = self._discover_packages()
if module_name not in packages:
return False
package_path = str(packages[module_name])
if package_path not in sys.path:
sys.path.insert(0, package_path)
# Import the module and check if symbol exists
module = importlib.import_module(module_name)
return hasattr(module, symbol_name)
except Exception:
# Don't fall back to regex - fail the test to surface import issues
return False
def find_rst_documented_symbols(
self, exclude_files: Optional[set[str]] = None
) -> list[RstSymbol]:
"""Find all symbols documented in RST files.
Args:
exclude_files: Set of RST file paths to exclude
Returns:
List of RstSymbol objects
"""
exclude_files = exclude_files or set()
rst_symbols = []
for rst_file in self.rst_docs_dir.rglob("*.rst"):
if str(rst_file) in exclude_files:
continue
try:
symbols = self._extract_symbols_from_rst(rst_file)
rst_symbols.extend(symbols)
except Exception:
# Skip files that can't be processed
continue
return rst_symbols
def _extract_symbols_from_rst(self, rst_file: Path) -> list[RstSymbol]:
"""Extract documented symbols from an RST file."""
rst_symbols = []
try:
with open(rst_file, encoding="utf-8") as f:
content = f.read()
except Exception:
return rst_symbols
# Look for Sphinx autodoc directives
patterns = [
(r"^\.\. autoclass:: ([^\s]+)", "autoclass"),
(r"^\.\. autofunction:: ([^\s]+)", "autofunction"),
(r"^\.\. autodecorator:: ([^\s]+)", "autodecorator"),
]
for pattern, directive in patterns:
matches = re.finditer(pattern, content, re.MULTILINE)
for match in matches:
symbol_path = match.group(1)
# Parse module and symbol name
if "." in symbol_path:
parts = symbol_path.split(".")
module_path = ".".join(parts[:-1])
symbol_name = parts[-1]
else:
# Assume it's in current module context
module_path = self._infer_module_from_rst_path(rst_file)
symbol_name = symbol_path
rst_symbols.append(
RstSymbol(
module_path=module_path,
symbol_name=symbol_name,
rst_directive=directive,
rst_file=str(rst_file),
)
)
return rst_symbols
def _infer_module_from_rst_path(self, rst_file: Path) -> str:
"""Infer the module path from RST file location.
For libraries/dagster-some-library.rst files, we assume symbols are exported
at the top-level of that library (dagster_some_library).
"""
relative_path = rst_file.relative_to(self.rst_docs_dir)
if relative_path.parts[0] == "dagster":
return "dagster"
elif relative_path.parts[0] == "libraries":
if len(relative_path.parts) > 1:
# For library RST files like libraries/dagster-airlift.rst,
# assume symbols are exported at library top-level: dagster_airlift
lib_file = relative_path.parts[1]
if lib_file.endswith(".rst"):
lib_name = lib_file[:-4].replace("-", "_") # Remove .rst and convert dashes
return lib_name
else:
lib_name = lib_file.replace("-", "_")
return lib_name
return "unknown"
def validate_public_in_rst(
self,
public_symbols: list[PublicSymbol],
rst_symbols: list[RstSymbol],
exclude_symbols: Optional[set[str]] = None,
) -> list[ValidationIssue]:
"""Validate that @public symbols are documented in RST files.
Args:
public_symbols: List of symbols marked with @public
rst_symbols: List of symbols documented in RST
exclude_symbols: Set of symbols to exclude from validation
Returns:
List of validation issues found
"""
exclude_symbols = exclude_symbols or set()
issues = []
# Create lookup for RST symbols
rst_lookup = {(sym.module_path, sym.symbol_name) for sym in rst_symbols}
for pub_sym in public_symbols:
symbol_key = f"{pub_sym.module_path}.{pub_sym.symbol_name}"
if symbol_key in exclude_symbols:
continue
# Check if @public symbol has RST documentation
if (pub_sym.module_path, pub_sym.symbol_name) not in rst_lookup:
issues.append(
ValidationIssue(
issue_type="missing_rst",
symbol_name=pub_sym.symbol_name,
module_path=pub_sym.module_path,
details="Symbol marked @public but not documented in RST files",
)
)
# Check if @public symbol is exported at top-level
if not pub_sym.is_exported:
issues.append(
ValidationIssue(
issue_type="missing_export",
symbol_name=pub_sym.symbol_name,
module_path=pub_sym.module_path,
details="Symbol marked @public but not available as top-level export",
)
)
return issues
def validate_rst_has_public(
self,
rst_symbols: list[RstSymbol],
public_symbols: list[PublicSymbol],
exclude_symbols: Optional[set[str]] = None,
) -> list[ValidationIssue]:
"""Validate that RST documented symbols have @public decorators.
Args:
rst_symbols: List of symbols documented in RST
public_symbols: List of symbols marked with @public
exclude_symbols: Set of symbols to exclude from validation
Returns:
List of validation issues found
"""
exclude_symbols = exclude_symbols or set()
issues = []
# Create lookup for @public symbols
public_lookup = {(sym.module_path, sym.symbol_name) for sym in public_symbols}
for rst_sym in rst_symbols:
symbol_key = f"{rst_sym.module_path}.{rst_sym.symbol_name}"
if symbol_key in exclude_symbols:
continue
# Check if RST documented symbol has @public decorator
if (rst_sym.module_path, rst_sym.symbol_name) not in public_lookup:
issues.append(
ValidationIssue(
issue_type="missing_public",
symbol_name=rst_sym.symbol_name,
module_path=rst_sym.module_path,
details="Symbol documented in RST but missing @public decorator",
)
)
return issues
| PublicApiValidator |
python | python-rapidjson__python-rapidjson | tests/test_streams.py | {
"start": 503,
"end": 1329
} | class ____(io.StringIO):
def __init__(self):
super().__init__()
self.chunks = []
def write(self, s):
super().write(s)
self.chunks.append(s)
def test_chunked_stream():
stream = ChunkedStream()
rj.dump('1234567890', stream)
assert len(stream.chunks) == 1
stream = ChunkedStream()
rj.dump('1234567890', stream, chunk_size=4)
assert len(stream.chunks) == 3
assert stream.chunks == ['"123', '4567', '890"']
stream = ChunkedStream()
rj.dump('~𓆙~', stream, ensure_ascii=False, chunk_size=4)
assert len(stream.chunks) == 3
assert stream.chunks == ['"~', '𓆙', '~"']
stream = ChunkedStream()
rj.dump('~𓆙~', stream, chunk_size=4)
assert len(stream.chunks) == 4
assert stream.chunks == ['"~\\u', 'D80C', '\\uDD', '99~"']
| ChunkedStream |
python | google__jax | jax/experimental/mosaic/gpu/profiler.py | {
"start": 10422,
"end": 14668
} | class ____:
def __init__(
self,
spec: ProfilerSpec,
smem_buffer: ir.Value,
gmem_buffer: ir.Value,
wrap_in_custom_primitive: bool,
):
i32 = ir.IntegerType.get_signless(32)
index = ir.IndexType.get()
self.spec = spec
self.entries_per_wg = spec.entries_per_warpgroup
self.wrap_in_custom_primitive = wrap_in_custom_primitive
wg_idx = warpgroup_idx(sync=False)
wg_offset = arith.index_cast(
index, arith.muli(wg_idx, c(self.entries_per_wg, i32))
)
smem_buffer = memref_slice(smem_buffer, ds(wg_offset, self.entries_per_wg))
is_profiling_thread = arith.cmpi(
arith.CmpIPredicate.eq,
arith.remui(thread_idx(), c(WARPGROUP_SIZE, i32)),
c(0, i32),
)
# Hopefully mem2reg will remove the allocation.
offset = memref.alloca(ir.MemRefType.get((), index), [], [])
memref.store(c(0, index), offset, [])
self.ctx = _ProfilerCtx(
start=globaltimer("low"),
is_profiling_thread=is_profiling_thread,
smem_buffer=smem_buffer,
gmem_buffer=gmem_buffer,
offset=offset,
)
@contextlib.contextmanager
def _profiler_ctx(self):
if not self.wrap_in_custom_primitive:
yield self.ctx
return
def fields(obj) -> list[ir.Value]:
return [getattr(obj, field.name) for field in dataclasses.fields(obj)]
op = dialect.CustomPrimitiveOp(
result=[],
operands_=fields(self.ctx),
in_layouts=[],
in_transforms=[ir.ArrayAttr.get([])],
out_layouts=[],
)
args_ty = [arg.type for arg in op.operands_]
block = op.body.blocks.append(*args_ty)
with ir.InsertionPoint(block):
yield _ProfilerCtx(*block.arguments)
dialect.return_([])
@contextlib.contextmanager
def record(self, name: str):
i32 = ir.IntegerType.get_signless(32)
index = ir.IndexType.get()
name_id = self.spec.intern_name(name)
def store(modifier):
with self._profiler_ctx() as ctx:
# smem_buffer[offset] = modifier | name_id
# smem_buffer[offset + 1] = %clock
# offset += 2
offset = memref.load(ctx.offset, [])
base_ref = memref_slice(ctx.smem_buffer, offset)
base_ptr = memref_ptr(base_ref, memory_space=3)
i64 = ir.IntegerType.get_signless(64)
base_addr = llvm.ptrtoint(i64, base_ptr)
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[ctx.is_profiling_thread, base_addr, c(modifier | name_id, i32)],
"""
@$0 st.shared.v2.u32 [$1], {$2, %clock};
""",
"b,l,r",
has_side_effects=True,
)
new_offset = arith.addi(offset, c(2, index))
memref.store(new_offset, ctx.offset, [])
store(ProfilerSpec.ENTER)
yield
store(ProfilerSpec.EXIT)
def finalize(self, grid: tuple[int, ...], block: tuple[int, ...]):
index = ir.IndexType.get()
i32 = ir.IntegerType.get_signless(32)
with self._profiler_ctx() as ctx:
gpu.barrier() # Make sure all warpgroups are done.
block_idx = c(0, index)
for dim in gpu.Dimension: # pytype: disable=wrong-arg-types
block_idx = arith.addi(
arith.muli(block_idx, gpu.grid_dim(dim)), gpu.block_id(dim)
)
wg_idx = warpgroup_idx(sync=False)
wg_per_block = math.prod(block) // WARPGROUP_SIZE
global_wg_idx = arith.addi(
arith.muli(block_idx, c(wg_per_block, index)),
arith.index_cast(index, wg_idx),
)
start_offset = arith.muli(global_wg_idx, c(self.entries_per_wg, index))
wg_gmem_buffer = memref_slice(
ctx.gmem_buffer, ds(start_offset, self.entries_per_wg)
)
with when(ctx.is_profiling_thread):
memref.store(ctx.start, wg_gmem_buffer, [c(0, index)])
memref.store(smid(), wg_gmem_buffer, [c(1, index)])
num_traces = arith.index_cast(i32, memref.load(ctx.offset, []))
memref.store(num_traces, wg_gmem_buffer, [c(2, index)])
traces = vector.load(
ir.VectorType.get((self.entries_per_wg - 3,), i32),
ctx.smem_buffer,
[c(0, index)],
)
vector.store(traces, wg_gmem_buffer, [c(3, index)])
| OnDeviceProfiler |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-agentql/tests/test_browser_spec.py | {
"start": 516,
"end": 2423
} | class ____:
@pytest.fixture(autouse=True)
async def agentql_browser_tool(self):
test_data = get_testing_data()
# Use playwright tool to navigate to the test url
async_browser = await PlaywrightToolSpec.create_async_playwright_browser()
playwright_tool = PlaywrightToolSpec.from_async_browser(async_browser)
await playwright_tool.navigate_to(test_data["TEST_URL"])
# initialize extract data browser tool
agentql_browser_tool = AgentQLBrowserToolSpec(async_browser=async_browser)
yield agentql_browser_tool
await async_browser.close()
@pytest.fixture
def agent(self, agentql_browser_tool):
return FunctionAgent(
tools=agentql_browser_tool.to_tool_list(),
llm=OpenAI(model="gpt-4o"),
)
@pytest.mark.skipif(
"OPENAI_API_KEY" not in os.environ or "AGENTQL_API_KEY" not in os.environ,
reason="OPENAI_API_KEY or AGENTQL_API_KEY is not set",
)
@pytest.mark.asyncio
async def test_extract_web_data_browser_tool_call(self, agent):
test_data = get_testing_data()
res = await agent.run(
f"""
extract data with the following agentql query: {test_data["TEST_QUERY"]}
"""
)
tool_output = res.tool_calls[0]
assert tool_output.tool_name == "extract_web_data_from_browser"
assert tool_output.tool_kwargs == {
"query": test_data["TEST_QUERY"],
}
@pytest.mark.skipif(
"AGENTQL_API_KEY" not in os.environ,
reason="AGENTQL_API_KEY is not set",
)
async def test_get_web_element_browser_tool_call(self, agentql_browser_tool):
next_page_button = await agentql_browser_tool.get_web_element_from_browser(
prompt="button for buying it now",
)
assert next_page_button == "[tf623_id='965']"
| TestExtractDataBrowserTool |
python | pypa__setuptools | setuptools/_vendor/typeguard/_exceptions.py | {
"start": 617,
"end": 1121
} | class ____(Exception):
"""
Raised by typeguard's type checkers when a type mismatch is detected.
"""
def __init__(self, message: str):
super().__init__(message)
self._path: Deque[str] = deque()
def append_path_element(self, element: str) -> None:
self._path.append(element)
def __str__(self) -> str:
if self._path:
return " of ".join(self._path) + " " + str(self.args[0])
else:
return str(self.args[0])
| TypeCheckError |
python | tensorflow__tensorflow | tensorflow/python/distribute/moving_averages_test.py | {
"start": 2085,
"end": 7417
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(all_combinations)
def testReplicaModeWithoutZeroDebias(self, distribution):
replica_id = [0]
def replica_fn():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]])
replica_id[0] += 1
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
return var, assign
with distribution.scope():
var, assign = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
2.0 - 0.5 * (replica_id[0] - 1)]
val_weight = 1.0 - 0.25
self.assertAllClose(
[10.0 * 0.25 + average_val[0] * val_weight,
11.0 * 0.25 + average_val[1] * val_weight],
self.evaluate(var))
@combinations.generate(all_combinations)
def testReplicaMode(self, distribution):
replica_id = [0]
def replica_fn():
var = variables.Variable([0.0, 0.0])
val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]])
replica_id[0] += 1
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
return var, assign.op
with distribution.scope():
var, assign_op = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign_op))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
2.0 - 0.5 * (replica_id[0] - 1)]
self.assertAllClose(average_val, self.evaluate(var))
@combinations.generate(all_combinations)
def testCrossDeviceWithoutZeroDebias(self, distribution):
with distribution.scope():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0])
decay = 0.25
# NOTE(josh11b): We currently generate an error if val is a PerReplica
# value.
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(assign)
average_val = [1.0, 2.0]
val_weight = 1.0 - 0.25
self.assertAllClose(
[10.0 * 0.25 + average_val[0] * val_weight,
11.0 * 0.25 + average_val[1] * val_weight],
self.evaluate(var))
# Also try assign.op.
self.evaluate(assign.op)
orig_weight = 0.25 * 0.25
val_weight = 1.0 - orig_weight
self.assertAllClose(
[10.0 * orig_weight + average_val[0] * val_weight,
11.0 * orig_weight + average_val[1] * val_weight],
self.evaluate(var))
@combinations.generate(all_combinations)
def testCrossDevice(self, distribution):
with distribution.scope():
var = variables.Variable([0.0, 0.0])
val = variables.Variable([1.0, 2.0])
decay = 0.25
# NOTE(josh11b): We currently generate an error if val is a PerReplica
# value.
assign = moving_averages.assign_moving_average(var, val, decay)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
self.evaluate(assign)
self.assertAllClose([1.0, 2.0], self.evaluate(var))
@combinations.generate(all_combinations_eager)
def testUpdateContext(self, distribution, use_function):
with distribution.scope():
var1 = variables.Variable([0.0, 0.0])
var2 = variables.Variable([0.0, 0.0])
var3 = variables.Variable([0.0, 0.0])
def update_fn(v, value):
v.assign_add(value)
moving_averages.assign_moving_average(var2, [2.0, 4.0], decay=0.25)
moving_averages.assign_moving_average(
var3, [2.0, 4.0], decay=0.25, zero_debias=False)
distribution.extended.update(var1, update_fn, ([1.0, 1.0],))
self.assertAllClose([2.0, 4.0], var2.read_value())
self.assertAllClose([1.5, 3.0], var3.read_value())
@combinations.generate(all_combinations)
def testAssignVariable(self, distribution):
def replica_fn():
var = variables.Variable([10.0, 11.0])
# Here we expect to check the case when input value are variable.
val = variables.Variable([1., 2.])
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
return var, assign
with distribution.scope():
var, assign = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign))
self.assertAllClose(
[10 * 0.25 + 1. * (1 - 0.25), 11 * 0.25 + 2. * (1 - 0.25)],
self.evaluate(var))
| AssignMovingAveragesTest |
python | doocs__leetcode | solution/2800-2899/2843.Count Symmetric Integers/Solution.py | {
"start": 0,
"end": 340
} | class ____:
def countSymmetricIntegers(self, low: int, high: int) -> int:
def f(x: int) -> bool:
s = str(x)
if len(s) & 1:
return False
n = len(s) // 2
return sum(map(int, s[:n])) == sum(map(int, s[n:]))
return sum(f(x) for x in range(low, high + 1))
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 219299,
"end": 219534
} | class ____(VegaLiteSchema):
"""ConditionalAxisColor schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisColor"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalAxisColor |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axis_artist.py | {
"start": 3725,
"end": 6888
} | class ____(AttributeCopier, Line2D):
"""
Ticks are derived from `.Line2D`, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
`set_ticksize`. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
``set_tick_out(False)``
"""
def __init__(self, ticksize, tick_out=False, *, axis=None, **kwargs):
self._ticksize = ticksize
self.locs_angles_labels = []
self.set_tick_out(tick_out)
self._axis = axis
if self._axis is not None:
if "color" not in kwargs:
kwargs["color"] = "auto"
if "mew" not in kwargs and "markeredgewidth" not in kwargs:
kwargs["markeredgewidth"] = "auto"
Line2D.__init__(self, [0.], [0.], **kwargs)
self.set_snap(True)
def get_ref_artist(self):
# docstring inherited
return self._axis.majorTicks[0].tick1line
def set_color(self, color):
# docstring inherited
# Unlike the base Line2D.set_color, this also supports "auto".
if not cbook._str_equal(color, "auto"):
mcolors._check_color_like(color=color)
self._color = color
self.stale = True
def get_color(self):
return self.get_attribute_from_ref_artist("color")
def get_markeredgecolor(self):
return self.get_attribute_from_ref_artist("markeredgecolor")
def get_markeredgewidth(self):
return self.get_attribute_from_ref_artist("markeredgewidth")
def set_tick_out(self, b):
"""Set whether ticks are drawn inside or outside the axes."""
self._tick_out = b
def get_tick_out(self):
"""Return whether ticks are drawn inside or outside the axes."""
return self._tick_out
def set_ticksize(self, ticksize):
"""Set length of the ticks in points."""
self._ticksize = ticksize
def get_ticksize(self):
"""Return length of the ticks in points."""
return self._ticksize
def set_locs_angles(self, locs_angles):
self.locs_angles = locs_angles
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer):
if not self.get_visible():
return
gc = renderer.new_gc()
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self.get_markeredgewidth())
gc.set_alpha(self._alpha)
path_trans = self.get_transform()
marker_transform = (Affine2D()
.scale(renderer.points_to_pixels(self._ticksize)))
if self.get_tick_out():
marker_transform.rotate_deg(180)
for loc, angle in self.locs_angles:
locs = path_trans.transform_non_affine(np.array([loc]))
if self.axes and not self.axes.viewLim.contains(*locs[0]):
continue
renderer.draw_markers(
gc, self._tickvert_path,
marker_transform + Affine2D().rotate_deg(angle),
Path(locs), path_trans.get_affine())
gc.restore()
| Ticks |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/serializers/test_workflow_serializer.py | {
"start": 496,
"end": 5660
} | class ____(TestCase):
def test_serialize_simple(self) -> None:
workflow = self.create_workflow(
name="hojicha",
organization_id=self.organization.id,
config={},
)
result = serialize(workflow)
assert result == {
"id": str(workflow.id),
"name": str(workflow.name),
"organizationId": str(self.organization.id),
"config": {},
"createdBy": None,
"dateCreated": workflow.date_added,
"dateUpdated": workflow.date_updated,
"triggers": None,
"actionFilters": [],
"environment": None,
"detectorIds": [],
"enabled": workflow.enabled,
"lastTriggered": None,
}
def test_serialize_full(self) -> None:
when_condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ANY,
)
trigger_condition = self.create_data_condition(
condition_group=when_condition_group,
type=Condition.FIRST_SEEN_EVENT,
comparison=True,
condition_result=True,
)
workflow = self.create_workflow(
name="hojicha",
organization_id=self.organization.id,
config={},
when_condition_group=when_condition_group,
environment=self.environment,
created_by_id=self.user.id,
)
condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ALL,
)
action = self.create_action(
type=Action.Type.EMAIL,
data={},
config={
"target_identifier": "123",
"target_type": ActionTarget.USER.value,
},
)
self.create_data_condition_group_action(condition_group=condition_group, action=action)
condition = self.create_data_condition(
condition_group=condition_group,
type=Condition.GREATER,
comparison=100,
condition_result=DetectorPriorityLevel.HIGH,
)
self.create_workflow_data_condition_group(
condition_group=condition_group,
workflow=workflow,
)
detector = self.create_detector()
self.create_detector_workflow(
detector=detector,
workflow=workflow,
)
history = WorkflowFireHistory.objects.create(
workflow=workflow,
group=self.group,
event_id=self.event.event_id,
)
# Too old, shouldn't be used.
WorkflowFireHistory.objects.create(
workflow=workflow,
group=self.group,
event_id=self.event.event_id,
)
history.date_added = workflow.date_added + timedelta(seconds=1)
history.save()
result = serialize(workflow)
assert result == {
"id": str(workflow.id),
"name": str(workflow.name),
"organizationId": str(self.organization.id),
"config": {},
"createdBy": str(self.user.id),
"dateCreated": workflow.date_added,
"dateUpdated": workflow.date_updated,
"triggers": {
"id": str(when_condition_group.id),
"organizationId": str(self.organization.id),
"logicType": DataConditionGroup.Type.ANY.value,
"conditions": [
{
"id": str(trigger_condition.id),
"type": "first_seen_event",
"comparison": True,
"conditionResult": True,
}
],
"actions": [],
},
"actionFilters": [
{
"id": str(condition_group.id),
"organizationId": str(self.organization.id),
"logicType": DataConditionGroup.Type.ALL.value,
"conditions": [
{
"id": str(condition.id),
"type": "gt",
"comparison": 100,
"conditionResult": DetectorPriorityLevel.HIGH.value,
}
],
"actions": [
{
"id": str(action.id),
"type": "email",
"data": {},
"integrationId": None,
"config": {"targetType": "user", "targetIdentifier": "123"},
"status": "active",
}
],
},
],
"environment": self.environment.name,
"detectorIds": [str(detector.id)],
"enabled": workflow.enabled,
"lastTriggered": history.date_added,
}
| TestWorkflowSerializer |
python | getsentry__sentry | src/sentry/snuba/dataset.py | {
"start": 1652,
"end": 2372
} | class ____(Enum):
Events = "events"
Sessions = "sessions"
Spans = "spans"
EAPItemsSpan = "eap_items_span"
EAPItems = "eap_items"
Transactions = "transactions"
MetricsSets = "metrics_sets"
MetricsCounters = "metrics_counters"
OrgMetricsCounters = "org_metrics_counters"
MetricsDistributions = "metrics_distributions"
GenericMetricsDistributions = "generic_metrics_distributions"
GenericMetricsSets = "generic_metrics_sets"
GenericMetricsCounters = "generic_metrics_counters"
GenericMetricsGauges = "generic_metrics_gauges"
GenericOrgMetricsCounters = "generic_org_metrics_counters"
IssuePlatform = "search_issues"
Functions = "functions"
@unique
| EntityKey |
python | gevent__gevent | src/gevent/backdoor.py | {
"start": 847,
"end": 2295
} | class ____(Greenlet):
# A greenlet that replaces sys.std[in/out/err] while running.
__slots__ = (
'stdin',
'stdout',
'prev_stdin',
'prev_stdout',
'prev_stderr',
)
def __init__(self, *args, **kwargs):
Greenlet.__init__(self, *args, **kwargs)
self.stdin = None
self.stdout = None
self.prev_stdin = None
self.prev_stdout = None
self.prev_stderr = None
def switch(self, *args, **kw):
if self.stdin is not None:
self.switch_in()
Greenlet.switch(self, *args, **kw)
def switch_in(self):
self.prev_stdin = sys.stdin
self.prev_stdout = sys.stdout
self.prev_stderr = sys.stderr
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.stderr = self.stdout
def switch_out(self):
sys.stdin = self.prev_stdin
sys.stdout = self.prev_stdout
sys.stderr = self.prev_stderr
self.prev_stdin = self.prev_stdout = self.prev_stderr = None
def throw(self, *args, **kwargs):
# pylint:disable=arguments-differ
if self.prev_stdin is None and self.stdin is not None:
self.switch_in()
Greenlet.throw(self, *args, **kwargs)
def run(self):
try:
return Greenlet.run(self)
finally:
# Make sure to restore the originals.
self.switch_out()
| _Greenlet_stdreplace |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol8.py | {
"start": 369,
"end": 550
} | class ____(_BaseClass):
def __init__(self, p1: str, p2: str): ...
# This should generate an error because the
# parameter types don't match.
func1(_Class1)
func1(_Class2)
| _Class2 |
python | fastai__fastai | fastai/medical/imaging.py | {
"start": 6081,
"end": 14837
} | class ____(PILBase): _open_args,_tensor_cls,_show_args = {},TensorCTScan,TensorCTScan._show_args
# %% ../../nbs/60_medical.imaging.ipynb 50
@patch
@delegates(show_image)
def show(self:DcmDataset, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Display a normalized dicom image by default"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
show_image(px, cmap=cmap, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 54
@patch
def show(self:DcmDataset, frames=1, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Adds functionality to view dicom images where each file may have more than 1 frame"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
if px.ndim > 2:
gh=[]
p = px.shape; print(f'{p[0]} frames per file')
for i in range(frames): u = px[i]; gh.append(u)
show_images(gh, **kwargs)
else: show_image(px, cmap=cmap, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 56
@patch
def pct_in_window(dcm:DcmDataset, w, l):
"% of pixels in the window `(w,l)`"
px = dcm.scaled_px
return ((px > l-w//2) & (px < l+w//2)).float().mean().item()
# %% ../../nbs/60_medical.imaging.ipynb 59
def uniform_blur2d(x,s):
"Uniformly apply blurring"
w = x.new_ones(1,1,1,s)/s
# Factor 2d conv into 2 1d convs
x = unsqueeze(x, dim=0, n=4-x.dim())
r = (F.conv2d(x, w, padding=s//2))
r = (F.conv2d(r, w.transpose(-1,-2), padding=s//2)).cpu()[:,0]
return r.squeeze()
# %% ../../nbs/60_medical.imaging.ipynb 61
def gauss_blur2d(x,s):
"Apply gaussian_blur2d kornia filter"
s2 = int(s/4)*2+1
x2 = unsqueeze(x, dim=0, n=4-x.dim())
res = kornia.filters.gaussian_blur2d(x2, (s2,s2), (s,s), 'replicate')
return res.squeeze()
# %% ../../nbs/60_medical.imaging.ipynb 64
@patch
def mask_from_blur(x:Tensor, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
p = x.windowed(*window)
if remove_max: p[p==1] = 0
return gauss_blur2d(p, s=sigma*x.shape[-1])>thresh
# %% ../../nbs/60_medical.imaging.ipynb 65
@patch
def mask_from_blur(x:DcmDataset, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
return to_device(x.scaled_px).mask_from_blur(window, sigma, thresh, remove_max=remove_max)
# %% ../../nbs/60_medical.imaging.ipynb 67
def _px_bounds(x, dim):
c = x.sum(dim).nonzero().cpu()
idxs,vals = torch.unique(c[:,0],return_counts=True)
vs = torch.split_with_sizes(c[:,1],tuple(vals))
d = {k.item():v for k,v in zip(idxs,vs)}
default_u = tensor([0,x.shape[-1]-1])
b = [d.get(o,default_u) for o in range(x.shape[0])]
b = [tensor([o.min(),o.max()]) for o in b]
return torch.stack(b)
# %% ../../nbs/60_medical.imaging.ipynb 68
def mask2bbox(mask):
no_batch = mask.dim()==2
if no_batch: mask = mask[None]
bb1 = _px_bounds(mask,-1).t()
bb2 = _px_bounds(mask,-2).t()
res = torch.stack([bb1,bb2],dim=1).to(mask.device)
return res[...,0] if no_batch else res
# %% ../../nbs/60_medical.imaging.ipynb 70
def _bbs2sizes(crops, init_sz, use_square=True):
bb = crops.flip(1)
szs = (bb[1]-bb[0])
if use_square: szs = szs.max(0)[0][None].repeat((2,1))
overs = (szs+bb[0])>init_sz
bb[0][overs] = init_sz-szs[overs]
lows = (bb[0]/float(init_sz))
return lows,szs/float(init_sz)
# %% ../../nbs/60_medical.imaging.ipynb 71
def crop_resize(x, crops, new_sz):
# NB assumes square inputs. Not tested for non-square anythings!
bs = x.shape[0]
lows,szs = _bbs2sizes(crops, x.shape[-1])
if not isinstance(new_sz,(list,tuple)): new_sz = (new_sz,new_sz)
id_mat = tensor([[1.,0,0],[0,1,0]])[None].repeat((bs,1,1)).to(x.device)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
sp = F.affine_grid(id_mat, (bs,1,*new_sz))+1.
grid = sp*unsqueeze(szs.t(),1,n=2)+unsqueeze(lows.t()*2.,1,n=2)
return F.grid_sample(x.unsqueeze(1), grid-1)
# %% ../../nbs/60_medical.imaging.ipynb 75
@patch
def to_nchan(x:Tensor, wins, bins=None):
res = [x.windowed(*win) for win in wins]
if not isinstance(bins,int) or bins!=0: res.append(x.hist_scaled(bins).clamp(0,1))
dim = [0,1][x.dim()==3]
return TensorCTScan(torch.stack(res, dim=dim))
# %% ../../nbs/60_medical.imaging.ipynb 76
@patch
def to_nchan(x:DcmDataset, wins, bins=None):
return x.scaled_px.to_nchan(wins, bins)
# %% ../../nbs/60_medical.imaging.ipynb 80
@patch
def to_3chan(x:Tensor, win1, win2, bins=None):
return x.to_nchan([win1,win2],bins=bins)
# %% ../../nbs/60_medical.imaging.ipynb 81
@patch
def to_3chan(x:DcmDataset, win1, win2, bins=None):
return x.scaled_px.to_3chan(win1, win2, bins)
# %% ../../nbs/60_medical.imaging.ipynb 83
@patch
def save_jpg(x:Tensor|DcmDataset, path, wins, bins=None, quality=90):
"Save tensor or dicom image into `jpg` format"
fn = Path(path).with_suffix('.jpg')
x = (x.to_nchan(wins, bins)*255).byte()
im = Image.fromarray(x.permute(1,2,0).numpy(), mode=['RGB','CMYK'][x.shape[0]==4])
im.save(fn, quality=quality)
# %% ../../nbs/60_medical.imaging.ipynb 84
@patch
def to_uint16(x:Tensor|DcmDataset, bins=None):
"Convert into a unit16 array"
d = x.hist_scaled(bins).clamp(0,1) * 2**16
return d.numpy().astype(np.uint16)
# %% ../../nbs/60_medical.imaging.ipynb 85
@patch
def save_tif16(x:Tensor|DcmDataset, path, bins=None, compress=True):
"Save tensor or dicom image into `tiff` format"
fn = Path(path).with_suffix('.tif')
Image.fromarray(x.to_uint16(bins)).save(str(fn), compression='tiff_deflate' if compress else None)
# %% ../../nbs/60_medical.imaging.ipynb 87
@patch
def set_pixels(self:DcmDataset, px):
self.PixelData = px.tobytes()
self.Rows,self.Columns = px.shape
DcmDataset.pixel_array = property(DcmDataset.pixel_array.fget, set_pixels)
# %% ../../nbs/60_medical.imaging.ipynb 88
@patch
def zoom(self:DcmDataset, ratio):
"Zoom image by specified ratio"
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self.set_pixels(ndimage.zoom(self.pixel_array, ratio))
# %% ../../nbs/60_medical.imaging.ipynb 92
@patch
def zoom_to(self:DcmDataset, sz):
"Change image size to specified pixel size"
if not isinstance(sz,(list,tuple)): sz=(sz,sz)
rows,cols = sz
self.zoom((rows/self.Rows,cols/self.Columns))
# %% ../../nbs/60_medical.imaging.ipynb 94
@patch(as_prop=True)
def shape(self:DcmDataset):
"Returns the shape of a dicom image as rows and columns"
return self.Rows,self.Columns
# %% ../../nbs/60_medical.imaging.ipynb 97
def _cast_dicom_special(x):
cls = type(x)
if not cls.__module__.startswith('pydicom'): return x
if cls.__base__ == object: return x
return cls.__base__(x)
def _split_elem(vals):
res = dict()
for val in vals:
k, v = val.keyword, val.value
if not isinstance(v,DcmMultiValue):
res[k] = v
continue
res[f'Multi{k}'] = 1
for i,o in enumerate(v): res[f'{k}{"" if i==0 else i}'] = o
return {k: _cast_dicom_special(v) for k, v in res.items()}
# %% ../../nbs/60_medical.imaging.ipynb 98
@patch
def as_dict(self:DcmDataset, px_summ=True, window=dicom_windows.brain):
"Convert the header of a dicom into a dictionary"
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
res = _split_elem(vals)
res['fname'] = self.filename
if not px_summ: return res
stats = 'min','max','mean','std'
try:
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs,f)()
res['img_pct_window'] = self.pct_in_window(*window)
except Exception as e:
for f in stats: res['img_'+f] = 0
print(res,e)
return res
# %% ../../nbs/60_medical.imaging.ipynb 101
def _dcm2dict(fn, window=dicom_windows.brain, px_summ=True, **kwargs):
return fn.dcmread().as_dict(window=window, px_summ=px_summ, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 102
@delegates(parallel)
def _from_dicoms(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms = classmethod(_from_dicoms)
# %% ../../nbs/60_medical.imaging.ipynb 105
| PILCTScan |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 104363,
"end": 112663
} | class ____(GeneratedAirbyteDestination):
class OAuth20:
@public
def __init__(
self,
access_token: str,
refresh_token: str,
auth_type: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.client_id = check.opt_str_param(client_id, "client_id")
self.client_secret = check.opt_str_param(client_secret, "client_secret")
self.access_token = check.str_param(access_token, "access_token")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class KeyPairAuthentication:
@public
def __init__(
self,
private_key: str,
auth_type: Optional[str] = None,
private_key_password: Optional[str] = None,
):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.private_key = check.str_param(private_key, "private_key")
self.private_key_password = check.opt_str_param(
private_key_password, "private_key_password"
)
class UsernameAndPassword:
@public
def __init__(self, password: str):
self.password = check.str_param(password, "password")
class SelectAnotherOption:
@public
def __init__(self, method: str):
self.method = check.str_param(method, "method")
class RecommendedInternalStaging:
@public
def __init__(self, method: str):
self.method = check.str_param(method, "method")
class NoEncryption:
@public
def __init__(
self,
):
self.encryption_type = "none"
class AESCBCEnvelopeEncryption:
@public
def __init__(self, key_encrypting_key: Optional[str] = None):
self.encryption_type = "aes_cbc_envelope"
self.key_encrypting_key = check.opt_str_param(key_encrypting_key, "key_encrypting_key")
class AWSS3Staging:
@public
def __init__(
self,
method: str,
s3_bucket_name: str,
access_key_id: str,
secret_access_key: str,
encryption: Union[
"SnowflakeDestination.NoEncryption", "SnowflakeDestination.AESCBCEnvelopeEncryption"
],
s3_bucket_region: Optional[str] = None,
purge_staging_data: Optional[bool] = None,
file_name_pattern: Optional[str] = None,
):
self.method = check.str_param(method, "method")
self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")
self.s3_bucket_region = check.opt_str_param(s3_bucket_region, "s3_bucket_region")
self.access_key_id = check.str_param(access_key_id, "access_key_id")
self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")
self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")
self.encryption = check.inst_param(
encryption,
"encryption",
(SnowflakeDestination.NoEncryption, SnowflakeDestination.AESCBCEnvelopeEncryption),
)
self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
class GoogleCloudStorageStaging:
@public
def __init__(self, method: str, project_id: str, bucket_name: str, credentials_json: str):
self.method = check.str_param(method, "method")
self.project_id = check.str_param(project_id, "project_id")
self.bucket_name = check.str_param(bucket_name, "bucket_name")
self.credentials_json = check.str_param(credentials_json, "credentials_json")
class AzureBlobStorageStaging:
@public
def __init__(
self,
method: str,
azure_blob_storage_account_name: str,
azure_blob_storage_container_name: str,
azure_blob_storage_sas_token: str,
azure_blob_storage_endpoint_domain_name: Optional[str] = None,
):
self.method = check.str_param(method, "method")
self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(
azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"
)
self.azure_blob_storage_account_name = check.str_param(
azure_blob_storage_account_name, "azure_blob_storage_account_name"
)
self.azure_blob_storage_container_name = check.str_param(
azure_blob_storage_container_name, "azure_blob_storage_container_name"
)
self.azure_blob_storage_sas_token = check.str_param(
azure_blob_storage_sas_token, "azure_blob_storage_sas_token"
)
@public
def __init__(
self,
name: str,
host: str,
role: str,
warehouse: str,
database: str,
schema: str,
username: str,
credentials: Union[
"SnowflakeDestination.OAuth20",
"SnowflakeDestination.KeyPairAuthentication",
"SnowflakeDestination.UsernameAndPassword",
],
loading_method: Union[
"SnowflakeDestination.SelectAnotherOption",
"SnowflakeDestination.RecommendedInternalStaging",
"SnowflakeDestination.AWSS3Staging",
"SnowflakeDestination.GoogleCloudStorageStaging",
"SnowflakeDestination.AzureBlobStorageStaging",
],
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Destination for Snowflake.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/snowflake
Args:
name (str): The name of the destination.
host (str): Enter your Snowflake account's locator (in the format ...snowflakecomputing.com)
role (str): Enter the role that you want to use to access Snowflake
warehouse (str): Enter the name of the warehouse that you want to sync data into
database (str): Enter the name of the database you want to sync data into
schema (str): Enter the name of the default schema
username (str): Enter the name of the user you want to use to access the database
jdbc_url_params (Optional[str]): Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3
loading_method (Union[SnowflakeDestination.SelectAnotherOption, SnowflakeDestination.RecommendedInternalStaging, SnowflakeDestination.AWSS3Staging, SnowflakeDestination.GoogleCloudStorageStaging, SnowflakeDestination.AzureBlobStorageStaging]): Select a data staging method
"""
self.host = check.str_param(host, "host")
self.role = check.str_param(role, "role")
self.warehouse = check.str_param(warehouse, "warehouse")
self.database = check.str_param(database, "database")
self.schema = check.str_param(schema, "schema")
self.username = check.str_param(username, "username")
self.credentials = check.inst_param(
credentials,
"credentials",
(
SnowflakeDestination.OAuth20,
SnowflakeDestination.KeyPairAuthentication,
SnowflakeDestination.UsernameAndPassword,
),
)
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.loading_method = check.inst_param(
loading_method,
"loading_method",
(
SnowflakeDestination.SelectAnotherOption,
SnowflakeDestination.RecommendedInternalStaging,
SnowflakeDestination.AWSS3Staging,
SnowflakeDestination.GoogleCloudStorageStaging,
SnowflakeDestination.AzureBlobStorageStaging,
),
)
super().__init__("Snowflake", name)
| SnowflakeDestination |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/explorer.py | {
"start": 5782,
"end": 63579
} | class ____(QTreeView, SpyderWidgetMixin):
"""Base file/directory tree view."""
# Signals
sig_file_created = Signal(str)
"""
This signal is emitted when a file is created
Parameters
----------
module: str
Path to the created file.
"""
sig_open_interpreter_requested = Signal(str)
"""
This signal is emitted when the interpreter opened is requested
Parameters
----------
module: str
Path to use as working directory of interpreter.
"""
sig_module_created = Signal(str)
"""
This signal is emitted when a new python module is created.
Parameters
----------
module: str
Path to the new module created.
"""
sig_redirect_stdio_requested = Signal(bool)
"""
This signal is emitted when redirect stdio is requested.
Parameters
----------
enable: bool
Enable/Disable standard input/output redirection.
"""
sig_removed = Signal(str)
"""
This signal is emitted when a file is removed.
Parameters
----------
path: str
File path removed.
"""
sig_renamed = Signal(str, str)
"""
This signal is emitted when a file is renamed.
Parameters
----------
old_path: str
Old path for renamed file.
new_path: str
New path for renamed file.
"""
sig_run_requested = Signal(str)
"""
This signal is emitted to request running a file.
Parameters
----------
path: str
File path to run.
"""
sig_tree_removed = Signal(str)
"""
This signal is emitted when a folder is removed.
Parameters
----------
path: str
Folder to remove.
"""
sig_tree_renamed = Signal(str, str)
"""
This signal is emitted when a folder is renamed.
Parameters
----------
old_path: str
Old path for renamed folder.
new_path: str
New path for renamed folder.
"""
sig_open_file_requested = Signal(str)
"""
This signal is emitted to request opening a new file with Spyder.
Parameters
----------
path: str
File path to run.
"""
def __init__(self, parent=None):
"""Initialize the DirView.
Parameters
----------
parent: QWidget
Parent QWidget of the widget.
"""
if not PYSIDE2:
super().__init__(parent=parent, class_parent=parent)
else:
QTreeView.__init__(self, parent)
SpyderWidgetMixin.__init__(self, class_parent=parent)
# Attributes
self._parent = parent
self._last_column = 0
self._last_order = True
self._scrollbar_positions = None
self._to_be_loaded = None
self.__expanded_state = None
self.common_actions = None
self.filter_on = False
self.expanded_or_colapsed_by_mouse = False
# Widgets
self.fsmodel = None
self.menu = None
self.header_menu = None
header = self.header()
# Signals
header.customContextMenuRequested.connect(self.show_header_menu)
# Style adjustments
self._style = DirViewStyle(None)
self._style.setParent(self)
self.setStyle(self._style)
self.setItemDelegate(DirViewItemDelegate(self))
# Setup
self.setup_fs_model()
self.setSelectionMode(
QAbstractItemView.SelectionMode.ExtendedSelection
)
header.setContextMenuPolicy(Qt.CustomContextMenu)
# Track mouse movements. This activates the mouseMoveEvent declared
# below.
self.setMouseTracking(True)
# ---- SpyderWidgetMixin API
# ------------------------------------------------------------------------
def setup(self):
self.setup_view()
# New actions
new_file_action = self.create_action(
DirViewActions.NewFile,
text=_("File..."),
icon=self.create_icon('TextFileIcon'),
triggered=self.new_file,
)
new_module_action = self.create_action(
DirViewActions.NewModule,
text=_("Python file..."),
icon=self.create_icon('python'),
triggered=self.new_module,
)
new_folder_action = self.create_action(
DirViewActions.NewFolder,
text=_("Folder..."),
icon=self.create_icon('folder_new'),
triggered=self.new_folder,
)
new_package_action = self.create_action(
DirViewActions.NewPackage,
text=_("Python package..."),
icon=self.create_icon('package_new'),
triggered=self.new_package,
)
# Open actions
self.open_with_spyder_action = self.create_action(
DirViewActions.OpenWithSpyder,
text=_("Open in Spyder"),
icon=self.create_icon('edit'),
triggered=self.open,
)
self.open_external_action = self.create_action(
DirViewActions.OpenWithSystem,
text=_("Open externally"),
triggered=self.open_external,
)
self.open_external_action_2 = self.create_action(
DirViewActions.OpenWithSystem2,
text=_("Default external application"),
triggered=self.open_external,
register_shortcut=False,
)
# File management actions
delete_action = self.create_action(
DirViewActions.Delete,
text=_("Delete..."),
icon=self.create_icon('editdelete'),
triggered=self.delete,
)
rename_action = self.create_action(
DirViewActions.Rename,
text=_("Rename..."),
icon=self.create_icon('rename'),
triggered=self.rename,
)
self.move_action = self.create_action(
DirViewActions.Move,
text=_("Move..."),
icon=self.create_icon('move'),
triggered=self.move,
)
# Copy/Paste actions
self.copy_action = self.create_action(
DirViewActions.Copy,
text=_("Copy"),
icon=self.create_icon('editcopy'),
triggered=self.copy_file_clipboard,
register_shortcut=True
)
self.paste_action = self.create_action(
DirViewActions.Paste,
text=_("Paste"),
icon=self.create_icon('editpaste'),
triggered=self.save_file_clipboard,
register_shortcut=True,
)
self.copy_absolute_path_action = self.create_action(
DirViewActions.CopyAbsolutePath,
text=_("Copy absolute path"),
triggered=self.copy_absolute_path,
register_shortcut=True,
)
self.copy_relative_path_action = self.create_action(
DirViewActions.CopyRelativePath,
text=_("Copy relative path"),
triggered=self.copy_relative_path,
register_shortcut=True
)
# Show actions
if sys.platform == 'darwin':
show_in_finder_text = _("Show in Finder")
else:
show_in_finder_text = _("Show in folder")
show_in_system_explorer_action = self.create_action(
DirViewActions.ShowInSystemExplorer,
text=show_in_finder_text,
triggered=self.show_in_external_file_explorer,
)
# Version control actions
self.vcs_commit_action = self.create_action(
DirViewActions.VersionControlCommit,
# Don't translate this text because it makes little sense in
# languages other than English.
# Fixes spyder-ide/spyder#21959
text="Git commit",
icon=self.create_icon('vcs_commit'),
triggered=lambda: self.vcs_command('commit'),
)
self.vcs_log_action = self.create_action(
DirViewActions.VersionControlBrowse,
text=_("Browse Git repository"),
icon=self.create_icon('vcs_browse'),
triggered=lambda: self.vcs_command('browse'),
)
# Common actions
self.hidden_action = self.create_action(
DirViewActions.ToggleHiddenFiles,
text=_("Show hidden files"),
toggled=True,
initial=self.get_conf('show_hidden'),
option='show_hidden'
)
self.create_action(
DirViewActions.ToggleSingleClick,
text=_("Single click to open"),
toggled=True,
initial=self.get_conf('single_click_to_open'),
option='single_click_to_open'
)
# IPython console actions
# TODO: Move this option to the ipython console setup
self.open_interpreter_action = self.create_action(
DirViewActions.OpenInterpreter,
text=_("Open IPython console here"),
icon=self.create_icon('ipython_console'),
triggered=self.open_interpreter,
)
# TODO: Move this option to the ipython console setup
run_action = self.create_action(
DirViewActions.Run,
text=_("Run"),
icon=self.create_icon('run'),
triggered=self.run,
)
# Notebook Actions
ipynb_convert_action = self.create_action(
DirViewActions.ConvertNotebook,
_("Convert to Python file"),
icon=ima.icon('python'),
triggered=self.convert_notebooks
)
# Header Actions
size_column_action = self.create_action(
DirViewActions.ToggleSizeColumn,
text=_('Size'),
toggled=True,
initial=self.get_conf('size_column'),
register_shortcut=False,
option='size_column'
)
type_column_action = self.create_action(
DirViewActions.ToggleTypeColumn,
text=_('Type') if sys.platform == 'darwin' else _('Type'),
toggled=True,
initial=self.get_conf('type_column'),
register_shortcut=False,
option='type_column'
)
date_column_action = self.create_action(
DirViewActions.ToggleDateColumn,
text=_("Date modified"),
toggled=True,
initial=self.get_conf('date_column'),
register_shortcut=False,
option='date_column'
)
# Header Context Menu
self.header_menu = self.create_menu(DirViewMenus.Header)
for item in [size_column_action, type_column_action,
date_column_action]:
self.add_item_to_menu(
item,
menu=self.header_menu,
section=DirViewHeaderMenuSections.Main,
)
# New submenu
new_submenu = self.create_menu(
DirViewMenus.New,
_('New'),
)
for item in [new_file_action, new_folder_action]:
self.add_item_to_menu(
item,
menu=new_submenu,
section=DirViewNewSubMenuSections.General,
)
for item in [new_module_action, new_package_action]:
self.add_item_to_menu(
item,
menu=new_submenu,
section=DirViewNewSubMenuSections.Language,
)
# Open with submenu
self.open_with_submenu = self.create_menu(
DirViewMenus.OpenWith,
_('Open with'),
)
# Context submenu
self.context_menu = self.create_menu(DirViewMenus.Context)
for item in [new_submenu, run_action,
self.open_with_spyder_action,
self.open_with_submenu,
self.open_external_action,
delete_action, rename_action, self.move_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=DirViewContextMenuSections.New,
)
# Copy/Paste section
for item in [self.copy_action, self.paste_action,
self.copy_absolute_path_action,
self.copy_relative_path_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=DirViewContextMenuSections.CopyPaste,
)
self.add_item_to_menu(
show_in_system_explorer_action,
menu=self.context_menu,
section=DirViewContextMenuSections.System,
)
# Version control section
for item in [self.vcs_commit_action, self.vcs_log_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=DirViewContextMenuSections.VersionControl
)
for item in [self.open_interpreter_action, ipynb_convert_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=DirViewContextMenuSections.Extras,
)
# Signals
self.context_menu.aboutToShow.connect(self.update_actions)
@on_conf_change(option=['size_column', 'type_column', 'date_column',
'name_filters', 'show_hidden',
'single_click_to_open'])
def on_conf_update(self, option, value):
if option == 'size_column':
self.setColumnHidden(DirViewColumns.Size, not value)
elif option == 'type_column':
self.setColumnHidden(DirViewColumns.Type, not value)
elif option == 'date_column':
self.setColumnHidden(DirViewColumns.Date, not value)
elif option == 'name_filters':
if self.filter_on:
self.filter_files(value)
elif option == 'show_hidden':
self.set_show_hidden(value)
elif option == 'single_click_to_open':
self.set_single_click_to_open(value)
def update_actions(self):
fnames = self.get_selected_filenames()
if fnames:
if osp.isdir(fnames[0]):
dirname = fnames[0]
else:
dirname = osp.dirname(fnames[0])
basedir = fixpath(osp.dirname(fnames[0]))
only_dirs = fnames and all([osp.isdir(fname) for fname in fnames])
only_files = all([osp.isfile(fname) for fname in fnames])
only_valid = all([encoding.is_text_file(fna) for fna in fnames])
else:
only_files = False
only_valid = False
only_dirs = False
dirname = ''
basedir = ''
vcs_visible = vcs.is_vcs_repository(dirname)
# Make actions visible conditionally
self.move_action.setVisible(
all(
[fixpath(osp.dirname(fname)) == basedir for fname in fnames])
and only_files
)
self.open_external_action.setVisible(False)
self.open_interpreter_action.setVisible(only_dirs)
self.open_with_spyder_action.setVisible(only_files and only_valid)
self.open_with_submenu.menuAction().setVisible(False)
clipboard = QApplication.clipboard()
has_urls = clipboard.mimeData().hasUrls()
self.paste_action.setDisabled(not has_urls)
# VCS support is quite limited for now, so we are enabling the VCS
# related actions only when a single file/folder is selected:
self.vcs_commit_action.setVisible(vcs_visible)
self.vcs_log_action.setVisible(vcs_visible)
if only_files:
if len(fnames) == 1:
assoc = self.get_file_associations(fnames[0])
elif len(fnames) > 1:
assoc = self.get_common_file_associations(fnames)
if len(assoc) >= 1:
actions = self._create_file_associations_actions()
self.open_with_submenu.menuAction().setVisible(True)
self.open_with_submenu.clear_actions()
for action in actions:
self.add_item_to_menu(
action,
menu=self.open_with_submenu,
section=DirViewOpenWithSubMenuSections.Main,
)
else:
self.open_external_action.setVisible(True)
fnames = self.get_selected_filenames()
only_notebooks = all([osp.splitext(fname)[1] == '.ipynb'
for fname in fnames])
only_modules = all([osp.splitext(fname)[1] in ('.py', '.pyw', '.ipy')
for fname in fnames])
nb_visible = only_notebooks and nbexporter is not None
self.get_action(DirViewActions.ConvertNotebook).setVisible(nb_visible)
self.get_action(DirViewActions.Run).setVisible(only_modules)
def _create_file_associations_actions(self, fnames=None):
"""
Create file association actions.
"""
if fnames is None:
fnames = self.get_selected_filenames()
actions = []
only_files = all([osp.isfile(fname) for fname in fnames])
if only_files:
if len(fnames) == 1:
assoc = self.get_file_associations(fnames[0])
elif len(fnames) > 1:
assoc = self.get_common_file_associations(fnames)
if len(assoc) >= 1:
for app_name, fpath in assoc:
text = app_name
if not (os.path.isfile(fpath) or os.path.isdir(fpath)):
text += _(' (Application not found!)')
try:
# Action might have been created already
open_assoc = self.open_association
open_with_action = self.create_action(
app_name,
text=text,
triggered=lambda x, y=fpath: open_assoc(y),
register_shortcut=False,
)
except Exception:
open_with_action = self.get_action(app_name)
# Disconnect previous signal in case the app path
# changed
try:
open_with_action.triggered.disconnect()
except Exception:
pass
# Reconnect the trigger signal
open_with_action.triggered.connect(
lambda x, y=fpath: self.open_association(y)
)
if not (os.path.isfile(fpath) or os.path.isdir(fpath)):
open_with_action.setDisabled(True)
actions.append(open_with_action)
actions.append(self.open_external_action_2)
return actions
# ---- Qt overrides
# ------------------------------------------------------------------------
def sortByColumn(self, column, order=Qt.AscendingOrder):
"""Override Qt method."""
header = self.header()
header.setSortIndicatorShown(True)
QTreeView.sortByColumn(self, column, order)
header.setSortIndicator(0, order)
self._last_column = column
self._last_order = not self._last_order
def viewportEvent(self, event):
"""Reimplement Qt method"""
# Prevent Qt from crashing or showing warnings like:
# "QSortFilterProxyModel: index from wrong model passed to
# mapFromSource", probably due to the fact that the file system model
# is being built. See spyder-ide/spyder#1250.
#
# This workaround was inspired by the following KDE bug:
# https://bugs.kde.org/show_bug.cgi?id=172198
#
# Apparently, this is a bug from Qt itself.
self.executeDelayedItemsLayout()
return QTreeView.viewportEvent(self, event)
def contextMenuEvent(self, event):
"""Override Qt method"""
# Needed to handle not initialized menu.
# See spyder-ide/spyder#6975
try:
self.context_menu.popup(event.globalPos())
except AttributeError:
pass
def keyPressEvent(self, event):
"""Handle keyboard shortcuts and special keys."""
key_seq = keyevent_to_keysequence_str(event)
if event.key() in (Qt.Key_Enter, Qt.Key_Return):
self.clicked()
elif event.key() == Qt.Key_F2:
self.rename()
elif event.key() == Qt.Key_Delete:
self.delete()
elif event.key() == Qt.Key_Backspace:
self.go_to_parent_directory()
elif key_seq == self.copy_action.shortcut().toString():
self.copy_file_clipboard()
elif key_seq == self.paste_action.shortcut().toString():
self.save_file_clipboard()
elif key_seq == self.copy_absolute_path_action.shortcut().toString():
self.copy_absolute_path()
elif key_seq == self.copy_relative_path_action.shortcut().toString():
self.copy_relative_path()
else:
QTreeView.keyPressEvent(self, event)
def mouseDoubleClickEvent(self, event):
"""Handle double clicks."""
super().mouseDoubleClickEvent(event)
if not self.get_conf('single_click_to_open'):
self.clicked(index=self.indexAt(event.pos()))
def mousePressEvent(self, event):
"""
Detect when a directory was expanded or collapsed by clicking
on its arrow.
Taken from https://stackoverflow.com/a/13142586/438386
"""
clicked_index = self.indexAt(event.pos())
if clicked_index.isValid():
vrect = self.visualRect(clicked_index)
item_identation = vrect.x() - self.visualRect(self.rootIndex()).x()
if event.pos().x() < item_identation:
self.expanded_or_colapsed_by_mouse = True
else:
self.expanded_or_colapsed_by_mouse = False
else:
# Clear selection if users click on an empty region. This improves
# the context menu UX because it makes the current directory to be
# used for its operations (i.e. creating a new folder or directory,
# copying its path, etc).
self.selectionModel().clear()
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
"""Handle single clicks."""
super().mouseReleaseEvent(event)
if self.get_conf('single_click_to_open'):
self.clicked(index=self.indexAt(event.pos()))
def mouseMoveEvent(self, event):
"""Actions to take with mouse movements."""
# To hide previous tooltip
QToolTip.hideText()
index = self.indexAt(event.pos())
if index.isValid():
if self.get_conf('single_click_to_open'):
vrect = self.visualRect(index)
item_identation = (
vrect.x() - self.visualRect(self.rootIndex()).x()
)
if event.pos().x() > item_identation:
# When hovering over directories or files
self.setCursor(Qt.PointingHandCursor)
else:
# On every other element
self.setCursor(Qt.ArrowCursor)
self.setToolTip(self.get_filename(index))
super().mouseMoveEvent(event)
def dragEnterEvent(self, event):
"""Drag and Drop - Enter event"""
event.setAccepted(event.mimeData().hasFormat("text/plain"))
def dragMoveEvent(self, event):
"""Drag and Drop - Move event"""
if (event.mimeData().hasFormat("text/plain")):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def startDrag(self, dropActions):
"""Reimplement Qt Method - handle drag event"""
data = QMimeData()
data.setUrls(
[
QUrl.fromLocalFile(fname)
for fname in self.get_selected_filenames()
]
)
drag = QDrag(self)
drag.setMimeData(data)
drag.exec_()
# ---- Model
# ------------------------------------------------------------------------
def setup_fs_model(self):
"""Setup filesystem model"""
self.fsmodel = QFileSystemModel(self)
self.fsmodel.setNameFilterDisables(False)
def install_model(self):
"""Install filesystem model"""
self.setModel(self.fsmodel)
def setup_view(self):
"""Setup view"""
self.install_model()
self.fsmodel.directoryLoaded.connect(
lambda: self.resizeColumnToContents(0))
self.setAnimated(False)
self.setSortingEnabled(True)
self.sortByColumn(0, Qt.AscendingOrder)
self.fsmodel.modelReset.connect(self.reset_icon_provider)
self.reset_icon_provider()
# ---- File/Dir Helpers
# ------------------------------------------------------------------------
def get_filename(self, index):
"""Return filename associated with *index*"""
if index:
return osp.normpath(str(self.fsmodel.filePath(index)))
else:
return osp.normpath(str(self.fsmodel.rootPath()))
def get_index(self, filename):
"""Return index associated with filename"""
return self.fsmodel.index(filename)
def get_selected_filenames(self):
"""Return selected filenames"""
fnames = []
if (
self.selectionMode()
== QAbstractItemView.SelectionMode.ExtendedSelection
):
if self.selectionModel() is not None:
fnames = [self.get_filename(idx) for idx in
self.selectionModel().selectedRows()]
else:
fnames = [self.get_filename(self.currentIndex())]
if not fnames:
fnames = [self.get_filename(self.currentIndex())]
return fnames
def get_dirname(self, index):
"""Return dirname associated with *index*"""
fname = self.get_filename(index)
if fname:
if osp.isdir(fname):
return fname
else:
return osp.dirname(fname)
# ---- General actions API
# ------------------------------------------------------------------------
def show_header_menu(self, pos):
"""Display header menu."""
self.header_menu.popup(self.mapToGlobal(pos))
def clicked(self, index=None):
"""
Selected item was single/double-clicked or enter/return was pressed.
"""
fnames = self.get_selected_filenames()
# Don't do anything when clicking on the arrow next to a directory
# to expand/collapse it. If clicking on its name, use it as `fnames`.
if index and index.isValid():
fname = self.get_filename(index)
if osp.isdir(fname):
if self.expanded_or_colapsed_by_mouse:
return
else:
fnames = [fname]
# Open files or directories
for fname in fnames:
if osp.isdir(fname):
self.directory_clicked(fname, index)
else:
if len(fnames) == 1:
assoc = self.get_file_associations(fnames[0])
elif len(fnames) > 1:
assoc = self.get_common_file_associations(fnames)
if assoc:
self.open_association(assoc[0][-1])
else:
self.open([fname])
def directory_clicked(self, dirname, index):
"""
Handle directories being clicked.
Parameters
----------
dirname: str
Path to the clicked directory.
index: QModelIndex
Index of the directory.
"""
raise NotImplementedError('To be implemented by subclasses')
@Slot()
def open(self, fnames=None):
"""Open files with the appropriate application"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
for fname in fnames:
if osp.isfile(fname) and encoding.is_text_file(fname):
self.sig_open_file_requested.emit(fname)
else:
self.open_outside_spyder([fname])
@Slot()
def open_association(self, app_path):
"""Open files with given application executable path."""
if not (os.path.isdir(app_path) or os.path.isfile(app_path)):
return_codes = {app_path: 1}
app_path = None
else:
return_codes = {}
if app_path:
fnames = self.get_selected_filenames()
return_codes = programs.open_files_with_application(app_path,
fnames)
self.check_launch_error_codes(return_codes)
@Slot()
def open_external(self, fnames=None):
"""Open files with default application"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
for fname in fnames:
self.open_outside_spyder([fname])
def open_outside_spyder(self, fnames):
"""
Open file outside Spyder with the appropriate application.
If this does not work, opening unknown file in Spyder, as text file.
"""
for path in sorted(fnames):
path = file_uri(path)
ok = start_file(path)
if not ok and encoding.is_text_file(path):
self.sig_open_file_requested.emit(path)
def remove_tree(self, dirname):
"""
Remove whole directory tree
Reimplemented in project explorer widget
"""
while osp.exists(dirname):
QFile.moveToTrash(dirname)
def delete_file(self, fname, multiple, yes_to_all):
"""Delete file"""
if multiple:
buttons = (QMessageBox.Yes | QMessageBox.YesToAll |
QMessageBox.No | QMessageBox.Cancel)
else:
buttons = QMessageBox.Yes | QMessageBox.No
if yes_to_all is None:
answer = QMessageBox.warning(
self, _("Delete"),
_("Do you really want to delete <b>%s</b>?\n"
"<br><br>"
"<b>Note</b>: This file or directory will be moved to the "
"trash can."
) % osp.basename(fname), buttons)
if answer == QMessageBox.No:
return yes_to_all
elif answer == QMessageBox.Cancel:
return False
elif answer == QMessageBox.YesToAll:
yes_to_all = True
try:
if osp.isfile(fname):
misc.remove_file(fname)
self.sig_removed.emit(fname)
else:
self.remove_tree(fname)
self.sig_tree_removed.emit(fname)
return yes_to_all
except EnvironmentError as error:
action_str = _('delete')
QMessageBox.critical(
self, _("Project Explorer"),
_("<b>Unable to %s <i>%s</i></b><br><br>Error message:<br>%s"
) % (action_str, fname, str(error)))
return False
@Slot()
def delete(self, fnames=None):
"""Delete files"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
multiple = len(fnames) > 1
yes_to_all = None
for fname in fnames:
spyproject_path = osp.join(fname, '.spyproject')
if osp.isdir(fname) and osp.exists(spyproject_path):
QMessageBox.information(
self, _('File Explorer'),
_("The current directory contains a project.<br><br>"
"If you want to delete the project, please go to "
"<b>Projects</b> » <b>Delete Project</b>"))
else:
yes_to_all = self.delete_file(fname, multiple, yes_to_all)
if yes_to_all is not None and not yes_to_all:
# Canceled
break
def rename_file(self, fname):
"""Rename file"""
path, valid = QInputDialog.getText(
self, _('Rename'), _('New name:'), QLineEdit.Normal,
osp.basename(fname))
if valid:
path = osp.join(osp.dirname(fname), str(path))
if path == fname:
return
if osp.exists(path):
answer = QMessageBox.warning(
self, _("Rename"),
_("Do you really want to rename <b>%s</b> and "
"overwrite the existing file <b>%s</b>?"
) % (osp.basename(fname), osp.basename(path)),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.No:
return
try:
misc.rename_file(fname, path)
if osp.isfile(path):
self.sig_renamed.emit(fname, path)
else:
self.sig_tree_renamed.emit(fname, path)
return path
except EnvironmentError as error:
QMessageBox.critical(
self, _("Rename"),
_("<b>Unable to rename file <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(fname), str(error)))
@Slot()
def show_in_external_file_explorer(self, fnames=None):
"""Show file in external file explorer"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
try:
show_in_external_file_explorer(fnames)
except FileNotFoundError as error:
if "xdg-open" in str(error):
msg_title = _("Error")
msg = _(
"Spyder can't show this file in the external file "
"explorer because the <tt>xdg-utils</tt> package is not "
"available on your system."
)
QMessageBox.critical(
self._parent, msg_title, msg, QMessageBox.Ok
)
@Slot()
def rename(self, fnames=None):
"""Rename files"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
self.rename_file(fname)
@Slot()
def move(self, fnames=None, directory=None):
"""Move files/directories"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
orig = fixpath(osp.dirname(fnames[0]))
while True:
self.sig_redirect_stdio_requested.emit(False)
if directory is None:
folder = getexistingdirectory(
self, _("Select directory"), orig)
else:
folder = directory
self.sig_redirect_stdio_requested.emit(True)
if folder:
folder = fixpath(folder)
if folder != orig:
break
else:
return
for fname in fnames:
basename = osp.basename(fname)
try:
misc.move_file(fname, osp.join(folder, basename))
except EnvironmentError as error:
QMessageBox.critical(
self, _("Error"),
_("<b>Unable to move <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (basename, str(error)))
def create_new_folder(self, current_path, title, subtitle, is_package):
"""Create new folder"""
if current_path is None:
current_path = ''
if osp.isfile(current_path):
current_path = osp.dirname(current_path)
name, valid = QInputDialog.getText(
self, title, subtitle, QLineEdit.Normal, ""
)
if valid:
dirname = osp.join(current_path, str(name))
try:
os.mkdir(dirname)
except OSError as error:
QMessageBox.critical(
self,
title,
_(
"<b>Unable to create folder <i>%s</i></b>"
"<br><br>Error message:<br>%s"
)
% (dirname, str(error)),
)
finally:
if is_package:
fname = osp.join(dirname, '__init__.py')
try:
with open(fname, 'wb') as f:
f.write(b'#')
except OSError as error:
QMessageBox.critical(
self,
title,
_(
"<b>Unable to create file <i>%s</i></b>"
"<br><br>Error message:<br>%s"
)
% (fname, str(error)),
)
def get_selected_dir(self):
""" Get selected dir
If file is selected the directory containing file is returned.
If multiple items are selected, first item is chosen.
"""
selected_path = self.get_selected_filenames()[0]
if osp.isfile(selected_path):
selected_path = osp.dirname(selected_path)
return fixpath(selected_path)
@Slot()
def new_folder(self, basedir=None):
"""New folder."""
if basedir is None or isinstance(basedir, bool):
basedir = self.get_selected_dir()
title = _('New folder')
subtitle = _('Folder name:')
self.create_new_folder(basedir, title, subtitle, is_package=False)
def create_new_file(self, current_path, title, filters, create_func):
"""Create new file
Returns True if successful"""
if current_path is None:
current_path = ''
if osp.isfile(current_path):
current_path = osp.dirname(current_path)
self.sig_redirect_stdio_requested.emit(False)
fname, _selfilter = getsavefilename(self, title, current_path, filters)
self.sig_redirect_stdio_requested.emit(True)
if fname:
try:
create_func(fname)
return fname
except EnvironmentError as error:
QMessageBox.critical(
self, _("New file"),
_("<b>Unable to create file <i>%s</i>"
"</b><br><br>Error message:<br>%s"
) % (fname, str(error)))
@Slot()
def new_file(self, basedir=None):
"""New file"""
if basedir is None or isinstance(basedir, bool):
basedir = self.get_selected_dir()
title = _("New file")
filters = _("All files")+" (*)"
def create_func(fname):
"""File creation callback"""
if osp.splitext(fname)[1] in ('.py', '.pyw', '.ipy'):
create_script(fname)
else:
with open(fname, 'wb') as f:
f.write(b'')
fname = self.create_new_file(basedir, title, filters, create_func)
if fname is not None:
self.open([fname])
@Slot()
def run(self, fnames=None):
"""Run Python scripts"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
for fname in fnames:
self.sig_run_requested.emit(fname)
def copy_path(self, fnames=None, method="absolute"):
"""Copy absolute or relative path to given file(s)/folders(s)."""
cb = QApplication.clipboard()
explorer_dir = self.fsmodel.rootPath()
if fnames is None:
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
fnames = [_fn.replace(os.sep, "/") for _fn in fnames]
if len(fnames) > 1:
if method == "absolute":
clipboard_files = ',\n'.join('"' + _fn + '"' for _fn in fnames)
elif method == "relative":
clipboard_files = ',\n'.join('"' +
osp.relpath(_fn, explorer_dir).
replace(os.sep, "/") + '"'
for _fn in fnames)
else:
if method == "absolute":
clipboard_files = fnames[0]
elif method == "relative":
clipboard_files = (osp.relpath(fnames[0], explorer_dir).
replace(os.sep, "/"))
copied_from = self._parent.__class__.__name__
if copied_from == 'ProjectExplorerWidget' and method == 'relative':
clipboard_files = [path.strip(',"') for path in
clipboard_files.splitlines()]
clipboard_files = ['/'.join(path.strip('/').split('/')[1:]) for
path in clipboard_files]
if len(clipboard_files) > 1:
clipboard_files = ',\n'.join('"' + _fn + '"' for _fn in
clipboard_files)
else:
clipboard_files = clipboard_files[0]
cb.setText(clipboard_files, mode=QClipboard.Mode.Clipboard)
@Slot()
def copy_absolute_path(self):
"""Copy absolute paths of named files/directories to the clipboard."""
self.copy_path(method="absolute")
@Slot()
def copy_relative_path(self):
"""Copy relative paths of named files/directories to the clipboard."""
self.copy_path(method="relative")
@Slot()
def copy_file_clipboard(self, fnames=None):
"""Copy file(s)/folders(s) to clipboard."""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
try:
file_content = QMimeData()
file_content.setUrls([QUrl.fromLocalFile(_fn) for _fn in fnames])
cb = QApplication.clipboard()
cb.setMimeData(file_content, mode=QClipboard.Mode.Clipboard)
except Exception as e:
QMessageBox.critical(
self, _('File/Folder copy error'),
_("Cannot copy this type of file(s) or "
"folder(s). The error was:\n\n") + str(e))
@Slot()
def save_file_clipboard(self, fnames=None):
"""Paste file from clipboard into file/project explorer directory."""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
if len(fnames) >= 1:
selected_item = osp.commonpath(fnames)
if osp.isfile(selected_item):
parent_path = osp.dirname(selected_item)
else:
parent_path = osp.normpath(selected_item)
cb_data = QApplication.clipboard().mimeData()
if cb_data.hasUrls():
urls = cb_data.urls()
for url in urls:
source_name = url.toLocalFile()
base_name = osp.basename(source_name)
if osp.isfile(source_name):
try:
while base_name in os.listdir(parent_path):
file_no_ext, file_ext = osp.splitext(base_name)
end_number = re.search(r'\d+$', file_no_ext)
if end_number:
new_number = int(end_number.group()) + 1
else:
new_number = 1
left_string = re.sub(r'\d+$', '', file_no_ext)
left_string += str(new_number)
base_name = left_string + file_ext
destination = osp.join(parent_path, base_name)
else:
destination = osp.join(parent_path, base_name)
shutil.copy(source_name, destination)
except Exception as e:
QMessageBox.critical(self, _('Error pasting file'),
_("Unsupported copy operation"
". The error was:\n\n")
+ str(e))
else:
try:
while base_name in os.listdir(parent_path):
end_number = re.search(r'\d+$', base_name)
if end_number:
new_number = int(end_number.group()) + 1
else:
new_number = 1
left_string = re.sub(r'\d+$', '', base_name)
base_name = left_string + str(new_number)
destination = osp.join(parent_path, base_name)
else:
destination = osp.join(parent_path, base_name)
if osp.realpath(destination).startswith(
osp.realpath(source_name) + os.sep):
QMessageBox.critical(self,
_('Recursive copy'),
_("Source is an ancestor"
" of destination"
" folder."))
continue
shutil.copytree(source_name, destination)
except Exception as e:
QMessageBox.critical(self,
_('Error pasting folder'),
_("Unsupported copy"
" operation. The error was:"
"\n\n") + str(e))
else:
QMessageBox.critical(self, _("No file in clipboard"),
_("No file in the clipboard. Please copy"
" a file to the clipboard first."))
else:
if QApplication.clipboard().mimeData().hasUrls():
QMessageBox.critical(self, _('Blank area'),
_("Cannot paste in the blank area."))
else:
pass
@Slot()
def open_interpreter(self, fnames=None):
"""Open interpreter"""
if fnames is None or isinstance(fnames, bool):
fnames = self.get_selected_filenames()
for path in sorted(fnames):
self.sig_open_interpreter_requested.emit(path)
def filter_files(self, name_filters=None):
"""Filter files given the defined list of filters."""
if name_filters is None:
name_filters = self.get_conf('name_filters')
if self.filter_on:
self.fsmodel.setNameFilters(name_filters)
else:
self.fsmodel.setNameFilters([])
# ---- File Associations
# ------------------------------------------------------------------------
def get_common_file_associations(self, fnames):
"""
Return the list of common matching file associations for all fnames.
"""
all_values = []
for fname in fnames:
values = self.get_file_associations(fname)
all_values.append(values)
common = set(all_values[0])
for index in range(1, len(all_values)):
common = common.intersection(all_values[index])
return list(sorted(common))
def get_file_associations(self, fname):
"""Return the list of matching file associations for `fname`."""
for exts, values in self.get_conf('file_associations', {}).items():
clean_exts = [ext.strip() for ext in exts.split(',')]
for ext in clean_exts:
if fname.endswith((ext, ext[1:])):
values = values
break
else:
continue # Only excecuted if the inner loop did not break
break # Only excecuted if the inner loop did break
else:
values = []
return values
# ---- File/Directory actions
# ------------------------------------------------------------------------
def check_launch_error_codes(self, return_codes):
"""Check return codes and display message box if errors found."""
errors = [cmd for cmd, code in return_codes.items() if code != 0]
if errors:
if len(errors) == 1:
msg = _('The following command did not launch successfully:')
else:
msg = _('The following commands did not launch successfully:')
msg += '<br><br>' if len(errors) == 1 else '<br><br><ul>'
for error in errors:
if len(errors) == 1:
msg += '<code>{}</code>'.format(error)
else:
msg += '<li><code>{}</code></li>'.format(error)
msg += '' if len(errors) == 1 else '</ul>'
QMessageBox.warning(self, 'Application', msg, QMessageBox.Ok)
return not bool(errors)
# ---- VCS actions
# ------------------------------------------------------------------------
def vcs_command(self, action):
"""VCS action (commit, browse)"""
fnames = self.get_selected_filenames()
# Get dirname of selection
if osp.isdir(fnames[0]):
dirname = fnames[0]
else:
dirname = osp.dirname(fnames[0])
# Run action
try:
for path in sorted(fnames):
vcs.run_vcs_tool(dirname, action)
except vcs.ActionToolNotFound as error:
msg = _("For %s support, please install one of the<br/> "
"following tools:<br/><br/> %s")\
% (error.vcsname, ', '.join(error.tools))
QMessageBox.critical(
self, _("Error"),
_("""<b>Unable to find external program.</b><br><br>%s"""
) % str(msg))
# ---- Settings
# ------------------------------------------------------------------------
def get_scrollbar_position(self):
"""Return scrollbar positions"""
return (self.horizontalScrollBar().value(),
self.verticalScrollBar().value())
def set_scrollbar_position(self, position):
"""Set scrollbar positions"""
# Scrollbars will be restored after the expanded state
self._scrollbar_positions = position
if self._to_be_loaded is not None and len(self._to_be_loaded) == 0:
self.restore_scrollbar_positions()
def restore_scrollbar_positions(self):
"""Restore scrollbar positions once tree is loaded"""
hor, ver = self._scrollbar_positions
self.horizontalScrollBar().setValue(hor)
self.verticalScrollBar().setValue(ver)
def get_expanded_state(self):
"""Return expanded state"""
self.save_expanded_state()
return self.__expanded_state
def set_expanded_state(self, state):
"""Set expanded state"""
self.__expanded_state = state
self.restore_expanded_state()
def save_expanded_state(self):
"""Save all items expanded state"""
model = self.model()
# If model is not installed, 'model' will be None: this happens when
# using the Project Explorer without having selected a workspace yet
if model is not None:
self.__expanded_state = []
for idx in model.persistentIndexList():
if self.isExpanded(idx):
self.__expanded_state.append(self.get_filename(idx))
def restore_directory_state(self, fname):
"""Restore directory expanded state"""
root = osp.normpath(str(fname))
if not osp.exists(root):
# Directory has been (re)moved outside Spyder
return
for basename in os.listdir(root):
path = osp.normpath(osp.join(root, basename))
if osp.isdir(path) and path in self.__expanded_state:
self.__expanded_state.pop(self.__expanded_state.index(path))
if self._to_be_loaded is None:
self._to_be_loaded = []
self._to_be_loaded.append(path)
self.setExpanded(self.get_index(path), True)
if not self.__expanded_state:
self.fsmodel.directoryLoaded.disconnect(
self.restore_directory_state)
def follow_directories_loaded(self, fname):
"""Follow directories loaded during startup"""
if self._to_be_loaded is None:
return
path = osp.normpath(str(fname))
if path in self._to_be_loaded:
self._to_be_loaded.remove(path)
if self._to_be_loaded is not None and len(self._to_be_loaded) == 0:
self.fsmodel.directoryLoaded.disconnect(
self.follow_directories_loaded)
if self._scrollbar_positions is not None:
# The tree view need some time to render branches:
QTimer.singleShot(50, self.restore_scrollbar_positions)
def restore_expanded_state(self):
"""Restore all items expanded state"""
if self.__expanded_state is not None:
# In the old project explorer, the expanded state was a
# dictionary:
if isinstance(self.__expanded_state, list):
self.fsmodel.directoryLoaded.connect(
self.restore_directory_state)
self.fsmodel.directoryLoaded.connect(
self.follow_directories_loaded)
# ---- Options
# ------------------------------------------------------------------------
def set_single_click_to_open(self, value):
"""Set single click to open items."""
# Reset cursor shape
if not value:
self.unsetCursor()
def set_file_associations(self, value):
"""Set file associations open items."""
self.set_conf('file_associations', value)
def set_name_filters(self, name_filters):
"""Set name filters"""
if self.get_conf('name_filters') == ['']:
self.set_conf('name_filters', [])
else:
self.set_conf('name_filters', name_filters)
def set_show_hidden(self, state):
"""Toggle 'show hidden files' state"""
filters = (QDir.AllDirs | QDir.Files | QDir.Drives |
QDir.NoDotAndDotDot)
if state:
filters = (QDir.AllDirs | QDir.Files | QDir.Drives |
QDir.NoDotAndDotDot | QDir.Hidden)
self.fsmodel.setFilter(filters)
def reset_icon_provider(self):
"""Reset file system model icon provider
The purpose of this is to refresh files/directories icons"""
self.fsmodel.setIconProvider(IconProvider())
def convert_notebook(self, fname):
"""Convert an IPython notebook to a Python script in editor"""
try:
script = nbexporter().from_filename(fname)[0]
except Exception as e:
QMessageBox.critical(
self, _('Conversion error'),
_("It was not possible to convert this "
"notebook. The error is:\n\n") + str(e))
return
self.sig_file_created.emit(script)
@Slot()
def convert_notebooks(self):
"""Convert IPython notebooks to Python scripts in editor"""
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
self.convert_notebook(fname)
@Slot()
def new_package(self, basedir=None):
"""New package"""
if basedir is None or isinstance(basedir, bool):
basedir = self.get_selected_dir()
title = _('New package')
subtitle = _('Package name:')
self.create_new_folder(basedir, title, subtitle, is_package=True)
@Slot()
def new_module(self, basedir=None):
"""New module"""
if basedir is None or isinstance(basedir, bool):
basedir = self.get_selected_dir()
title = _("New module")
filters = _("Python files")+" (*.py *.pyw *.ipy)"
def create_func(fname):
self.sig_module_created.emit(fname)
self.create_new_file(basedir, title, filters, create_func)
def go_to_parent_directory(self):
pass
| DirView |
python | plotly__plotly.py | codegen/utils.py | {
"start": 29828,
"end": 31597
} | class ____(PlotlyNode):
"""
Class representing datatypes in the layout hierarchy
"""
# Constructor
def __init__(self, plotly_schema, node_path=(), parent=None):
# Get main layout properties
layout = plotly_schema["layout"]["layoutAttributes"]
# Get list of additional layout properties for each trace
trace_layouts = [
plotly_schema["traces"][trace].get("layoutAttributes", {})
for trace in plotly_schema["traces"]
if trace != "barpolar"
]
extra_polar_nodes = plotly_schema["traces"]["barpolar"].get(
"layoutAttributes", {}
)
layout["polar"].update(extra_polar_nodes)
# Chain together into layout_data
self.layout_data = ChainMap(layout, *trace_layouts)
# Call superclass constructor
super().__init__(plotly_schema, node_path, parent)
@property
def name_base_datatype(self):
if len(self.node_path) == 0:
return "BaseLayoutType"
else:
return "BaseLayoutHierarchyType"
@property
def root_name(self):
return "layout"
@property
def plotly_name(self) -> str:
if len(self.node_path) == 0:
return self.root_name
else:
return self.node_path[-1]
# Description
@property
def description(self) -> str:
desc = self.node_data.get("description", "")
if isinstance(desc, list):
desc = "".join(desc)
return format_description(desc)
# Raw data
@property
def node_data(self) -> dict:
node_data = self.layout_data
for prop_name in self.node_path:
node_data = node_data[prop_name]
return node_data
| LayoutNode |
python | qdrant__qdrant-client | qdrant_client/http/api/collections_api.py | {
"start": 1311,
"end": 5466
} | class ____:
def __init__(self, api_client: "Union[ApiClient, AsyncApiClient]"):
self.api_client = api_client
def _build_for_collection_exists(
self,
collection_name: str,
):
"""
Returns \"true\" if the given collection name exists, and \"false\" otherwise
"""
path_params = {
"collection_name": str(collection_name),
}
headers = {}
return self.api_client.request(
type_=m.InlineResponse2006,
method="GET",
url="/collections/{collection_name}/exists",
headers=headers if headers else None,
path_params=path_params,
)
def _build_for_create_collection(
self,
collection_name: str,
timeout: int = None,
create_collection: m.CreateCollection = None,
):
"""
Create new collection with given parameters
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(create_collection)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse200,
method="PUT",
url="/collections/{collection_name}",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
def _build_for_delete_collection(
self,
collection_name: str,
timeout: int = None,
):
"""
Drop collection and all associated data
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
return self.api_client.request(
type_=m.InlineResponse200,
method="DELETE",
url="/collections/{collection_name}",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
)
def _build_for_get_collection(
self,
collection_name: str,
):
"""
Get detailed information about specified existing collection
"""
path_params = {
"collection_name": str(collection_name),
}
headers = {}
return self.api_client.request(
type_=m.InlineResponse2004,
method="GET",
url="/collections/{collection_name}",
headers=headers if headers else None,
path_params=path_params,
)
def _build_for_get_collections(
self,
):
"""
Get list name of all existing collections
"""
headers = {}
return self.api_client.request(
type_=m.InlineResponse2003,
method="GET",
url="/collections",
headers=headers if headers else None,
)
def _build_for_update_collection(
self,
collection_name: str,
timeout: int = None,
update_collection: m.UpdateCollection = None,
):
"""
Update parameters of the existing collection
"""
path_params = {
"collection_name": str(collection_name),
}
query_params = {}
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(update_collection)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse200,
method="PATCH",
url="/collections/{collection_name}",
headers=headers if headers else None,
path_params=path_params,
params=query_params,
content=body,
)
| _CollectionsApi |
python | HIPS__autograd | autograd/tracer.py | {
"start": 2715,
"end": 2923
} | class ____:
def __init__(self):
self.top = -1
@contextmanager
def new_trace(self):
self.top += 1
yield self.top
self.top -= 1
trace_stack = TraceStack()
| TraceStack |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 22416,
"end": 23260
} | class ____(Type[str]):
def __init__(self, repo_url_key: str) -> None:
super().__init__(str)
self.repo_url_key = repo_url_key
def post_validation(self, config: Config, key_name: str):
repo_name = config.get(key_name)
repo_url = config.get(self.repo_url_key)
# derive repo_name from repo_url if unset
if repo_url is not None and repo_name is None:
repo_host = urlsplit(config['repo_url']).netloc.lower()
if repo_host == 'github.com':
repo_name = 'GitHub'
elif repo_host == 'bitbucket.org':
repo_name = 'Bitbucket'
elif repo_host == 'gitlab.com':
repo_name = 'GitLab'
else:
repo_name = repo_host.split('.')[0].title()
config[key_name] = repo_name
| RepoName |
python | getsentry__sentry | tests/sentry/uptime/test_grouptype.py | {
"start": 10075,
"end": 14080
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.uptime_monitor = self.create_uptime_detector()
def test_detector_correct_schema(self) -> None:
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": UptimeMonitorMode.MANUAL,
"environment": "hi",
"recovery_threshold": 1,
"downtime_threshold": 3,
},
)
def test_incorrect_config(self) -> None:
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config=["some", "stuff"],
)
def test_mismatched_schema(self) -> None:
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": "hi",
"environment": "hi",
},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": UptimeMonitorMode.MANUAL,
"environment": 1,
},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": 0,
"environment": "hi",
},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"bad_mode": UptimeMonitorMode.MANUAL,
"environment": "hi",
},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": UptimeMonitorMode.MANUAL,
"environment": "hi",
"junk": "hi",
},
)
def test_missing_required(self) -> None:
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={
"mode": UptimeMonitorMode.MANUAL,
},
)
with pytest.raises(ValidationError):
self.create_detector(
name=self.uptime_monitor.name,
project_id=self.project.id,
type=UptimeDomainCheckFailure.slug,
config={"environment": "hi"},
)
| TestUptimeDomainCheckFailureDetectorConfig |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_quota.py | {
"start": 383,
"end": 7286
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ResourceQuotaSpec',
'status': 'V1ResourceQuotaStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceQuota - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1ResourceQuota. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ResourceQuota. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ResourceQuota.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ResourceQuota. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ResourceQuota. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ResourceQuota. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ResourceQuota.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ResourceQuota. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ResourceQuota. # noqa: E501
:return: The metadata of this V1ResourceQuota. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ResourceQuota.
:param metadata: The metadata of this V1ResourceQuota. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ResourceQuota. # noqa: E501
:return: The spec of this V1ResourceQuota. # noqa: E501
:rtype: V1ResourceQuotaSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ResourceQuota.
:param spec: The spec of this V1ResourceQuota. # noqa: E501
:type: V1ResourceQuotaSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1ResourceQuota. # noqa: E501
:return: The status of this V1ResourceQuota. # noqa: E501
:rtype: V1ResourceQuotaStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ResourceQuota.
:param status: The status of this V1ResourceQuota. # noqa: E501
:type: V1ResourceQuotaStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceQuota):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceQuota):
return True
return self.to_dict() != other.to_dict()
| V1ResourceQuota |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/named_types.py | {
"start": 950,
"end": 4646
} | class ____(schema.SchemaVisitable, sqltypes.TypeEngine):
"""Base for named types."""
__abstract__ = True
DDLGenerator: Type[NamedTypeGenerator]
DDLDropper: Type[NamedTypeDropper]
create_type: bool
def create(
self, bind: _CreateDropBind, checkfirst: bool = True, **kw: Any
) -> None:
"""Emit ``CREATE`` DDL for this type.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
bind._run_ddl_visitor(self.DDLGenerator, self, checkfirst=checkfirst)
def drop(
self, bind: _CreateDropBind, checkfirst: bool = True, **kw: Any
) -> None:
"""Emit ``DROP`` DDL for this type.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
bind._run_ddl_visitor(self.DDLDropper, self, checkfirst=checkfirst)
def _check_for_name_in_memos(
self, checkfirst: CheckFirst, kw: Dict[str, Any]
) -> bool:
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named type is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if "_ddl_runner" in kw:
ddl_runner = kw["_ddl_runner"]
type_name = f"pg_{self.__visit_name__}"
if type_name in ddl_runner.memo:
existing = ddl_runner.memo[type_name]
else:
existing = ddl_runner.memo[type_name] = set()
present = (self.schema, self.name) in existing
existing.add((self.schema, self.name))
return present
else:
return False
def _on_table_create(
self,
target: schema.Table,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
**kw: Any,
) -> None:
checkfirst = CheckFirst(checkfirst) & CheckFirst.TYPES
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=bool(checkfirst))
def _on_table_drop(
self,
target: Any,
bind: _CreateDropBind,
checkfirst: CheckFirst = CheckFirst.NONE,
**kw: Any,
) -> None:
# do nothing since the enum is attached to a metadata
assert self.metadata is not None
def _on_metadata_create(
self,
target: schema.MetaData,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
**kw: Any,
) -> None:
checkfirst = CheckFirst(checkfirst) & CheckFirst.TYPES
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=bool(checkfirst))
def _on_metadata_drop(
self,
target: schema.MetaData,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
**kw: Any,
) -> None:
checkfirst = CheckFirst(checkfirst) & CheckFirst.TYPES
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=bool(checkfirst))
| NamedType |
python | spyder-ide__spyder | spyder/widgets/tabs.py | {
"start": 8562,
"end": 15197
} | class ____(QTabBar):
"""Tabs base class with drag and drop support"""
sig_move_tab = Signal((int, int), (str, int, int))
sig_name_changed = Signal(str)
def __init__(self, parent, ancestor, rename_tabs=False, split_char='',
split_index=0):
QTabBar.__init__(self, parent)
self.ancestor = ancestor
self.setObjectName('pane-tabbar')
# Dragging tabs
self.__drag_start_pos = QPoint()
self.setAcceptDrops(True)
self.setUsesScrollButtons(True)
self.setMovable(True)
# Tab name editor
self.rename_tabs = rename_tabs
if self.rename_tabs:
# Creates tab name editor
self.tab_name_editor = EditTabNamePopup(self, split_char,
split_index)
else:
self.tab_name_editor = None
self.close_btn_side = QTabBar.LeftSide if MAC else QTabBar.RightSide
# Signals
self.currentChanged.connect(self._on_tab_changed)
self.tabMoved.connect(self._on_tab_moved)
def refresh_style(self):
"""Refresh the widget style."""
self._on_tab_changed(self.currentIndex())
def _on_tab_changed(self, index):
"""Actions to take when the current tab has changed."""
# Repaint background color of close buttons
for i in range(self.count()):
close_btn: CloseTabButton = self.tabButton(i, self.close_btn_side)
if close_btn:
close_btn.index = i
if i == index:
close_btn.set_selected_color()
else:
close_btn.set_not_selected_color()
def _on_tab_moved(self, index_from, index_to):
"""Actions to take when drag and drop a tab to a different place."""
# Repaint background color of switched buttons
close_btn_from = self.tabButton(index_from, self.close_btn_side)
close_btn_to = self.tabButton(index_to, self.close_btn_side)
close_btn_from.index, close_btn_to.index = index_from, index_to
close_btn_from.set_not_selected_color()
close_btn_to.set_selected_color()
def mousePressEvent(self, event):
"""Reimplement Qt method"""
if event.button() == Qt.LeftButton:
self.__drag_start_pos = QPoint(event.pos())
QTabBar.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
"""Override Qt method"""
# FIXME: This was added by Pierre presumably to move tabs
# between plugins, but righit now it's breaking the regular
# Qt drag behavior for tabs, so we're commenting it for
# now
#if event.buttons() == Qt.MouseButtons(Qt.LeftButton) and \
# (event.pos() - self.__drag_start_pos).manhattanLength() > \
# QApplication.startDragDistance():
# drag = QDrag(self)
# mimeData = QMimeData()#
# ancestor_id = to_text_string(id(self.ancestor))
# parent_widget_id = to_text_string(id(self.parentWidget()))
# self_id = to_text_string(id(self))
# source_index = to_text_string(self.tabAt(self.__drag_start_pos))
# mimeData.setData("parent-id", to_binary_string(ancestor_id))
# mimeData.setData("tabwidget-id",
# to_binary_string(parent_widget_id))
# mimeData.setData("tabbar-id", to_binary_string(self_id))
# mimeData.setData("source-index", to_binary_string(source_index))
# drag.setMimeData(mimeData)
# drag.exec_()
QTabBar.mouseMoveEvent(self, event)
def dragEnterEvent(self, event):
"""Override Qt method"""
mimeData = event.mimeData()
formats = list(mimeData.formats())
if (
"parent-id" in formats
and int(mimeData.data("parent-id")) == id(self.ancestor)
):
event.acceptProposedAction()
QTabBar.dragEnterEvent(self, event)
def dropEvent(self, event):
"""Override Qt method"""
mimeData = event.mimeData()
index_from = int(mimeData.data("source-index"))
index_to = self.tabAt(event.pos())
if index_to == -1:
index_to = self.count()
if int(mimeData.data("tabbar-id")) != id(self):
tabwidget_from = str(mimeData.data("tabwidget-id"))
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.sig_move_tab[(str, int, int)].emit(tabwidget_from, index_from,
index_to)
event.acceptProposedAction()
elif index_from != index_to:
self.sig_move_tab.emit(index_from, index_to)
event.acceptProposedAction()
QTabBar.dropEvent(self, event)
def mouseDoubleClickEvent(self, event):
"""Override Qt method to trigger the tab name editor."""
if self.rename_tabs is True and event.button() == Qt.LeftButton:
# Tab index
index = self.tabAt(event.pos())
if index >= 0:
# Tab is valid, call tab name editor
self.tab_name_editor.edit_tab(index)
else:
# Event is not interesting, raise to parent
QTabBar.mouseDoubleClickEvent(self, event)
def tabInserted(self, index):
"""Actions to take when a new tab is added or inserted."""
# Use our own close button because we can style it to our needs.
close_button = CloseTabButton(self, index)
# Request to close the tab when the close button is clicked
close_button.sig_clicked.connect(self.tabCloseRequested)
# Set close button
self.setTabButton(index, self.close_btn_side, close_button)
def tabRemoved(self, index):
"""Actions to take when a tab is removed."""
# A tab removal makes the following ones to change their `index` (`-1`)
# Following that, there is a need to update the `index` attribute that
# the custom close button instances have. Otherwise, for example on the
# Editor, an `IndexError` can be raised.
# See spyder-ide/spyder#22033
for i in range(index, self.count()):
close_btn: CloseTabButton = self.tabButton(i, self.close_btn_side)
if close_btn:
close_btn.index = i
| TabBar |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 4033,
"end": 5572
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneAssetHealthMaterializationDegradedPartitionedMeta,
GrapheneAssetHealthMaterializationHealthyPartitionedMeta,
GrapheneAssetHealthMaterializationDegradedNotPartitionedMeta,
)
name = "AssetHealthMaterializationMeta"
@staticmethod
def from_metadata_class(
metadata: AssetHealthMaterializationMetadata,
) -> "GrapheneAssetHealthMaterializationMeta":
if isinstance(metadata, AssetHealthMaterializationDegradedNotPartitionedMeta):
return GrapheneAssetHealthMaterializationDegradedNotPartitionedMeta(
failedRunId=metadata.failed_run_id
)
elif isinstance(metadata, AssetHealthMaterializationHealthyPartitionedMeta):
return GrapheneAssetHealthMaterializationHealthyPartitionedMeta(
numMissingPartitions=metadata.num_missing_partitions,
totalNumPartitions=metadata.total_num_partitions,
)
elif isinstance(metadata, AssetHealthMaterializationDegradedPartitionedMeta):
return GrapheneAssetHealthMaterializationDegradedPartitionedMeta(
numFailedPartitions=metadata.num_failed_partitions,
numMissingPartitions=metadata.num_missing_partitions,
totalNumPartitions=metadata.total_num_partitions,
)
else:
raise ValueError(f"Unknown metadata class: {type(metadata)}")
| GrapheneAssetHealthMaterializationMeta |
python | spack__spack | lib/spack/spack/install_test.py | {
"start": 41662,
"end": 42113
} | class ____(spack.error.SpackError):
"""Raised when package tests have failed for an installation."""
def __init__(self, failures: List[TestFailureType]):
# Failures are all exceptions
num = len(failures)
msg = "{} failed.\n".format(plural(num, "test"))
for failure, message in failures:
msg += "\n\n%s\n" % str(failure)
msg += "\n%s\n" % message
super().__init__(msg)
| TestFailure |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_check_commands.py | {
"start": 199,
"end": 8598
} | class ____:
"""Test suite for check docstrings commands."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_check_docstrings_symbol_dagster_asset(self):
"""Test validating dagster.asset symbol docstring."""
result = self.runner.invoke(check, ["docstrings", "--symbol", "dagster.asset"])
# Should complete successfully (dagster.asset should have good docstring)
assert result.exit_code == 0
assert "Validating docstring for: dagster.asset" in result.output
assert "✓" in result.output # Success indicator
def test_check_docstrings_symbol_dagster_op(self):
"""Test validating dagster.op symbol docstring."""
result = self.runner.invoke(check, ["docstrings", "--symbol", "dagster.op"])
# Should complete successfully
assert result.exit_code == 0
assert "Validating docstring for: dagster.op" in result.output
def test_check_docstrings_symbol_dagster_job(self):
"""Test validating dagster.job symbol docstring."""
result = self.runner.invoke(check, ["docstrings", "--symbol", "dagster.job"])
# Should complete successfully
assert result.exit_code == 0
assert "Validating docstring for: dagster.job" in result.output
def test_check_docstrings_symbol_automation_validator(self):
"""Test validating automation docstring validator symbol."""
result = self.runner.invoke(
check,
["docstrings", "--symbol", "automation.dagster_docs.validator.SymbolImporter"],
)
# Should complete successfully
assert result.exit_code == 0
assert (
"Validating docstring for: automation.dagster_docs.validator.SymbolImporter"
in result.output
)
def test_check_docstrings_symbol_nonexistent(self):
"""Test validating nonexistent symbol should fail."""
result = self.runner.invoke(check, ["docstrings", "--symbol", "nonexistent.symbol"])
# Should fail with exit code 1
assert result.exit_code == 1
assert "Error:" in result.output or "ERRORS:" in result.output or "✗" in result.output
def test_check_docstrings_package_automation(self):
"""Test validating all docstrings in automation.dagster_docs package."""
result = self.runner.invoke(check, ["docstrings", "--package", "automation.dagster_docs"])
# Should complete successfully
assert result.exit_code == 0
assert "Validating" in result.output
assert "public symbols in automation.dagster_docs" in result.output
assert "Summary:" in result.output
def test_check_docstrings_package_dagster_subset(self):
"""Test validating docstrings in a small dagster subpackage."""
# Use a smaller package to avoid long test times
result = self.runner.invoke(check, ["docstrings", "--package", "dagster._core.errors"])
# Should complete (may have warnings/errors but should not crash)
assert result.exit_code in [0, 1] # 0 for success, 1 for validation errors
assert "Validating" in result.output
assert "Summary:" in result.output
def test_check_docstrings_package_nonexistent(self):
"""Test validating nonexistent package should fail."""
result = self.runner.invoke(check, ["docstrings", "--package", "nonexistent.package"])
# Should fail with exit code 1
assert result.exit_code == 1
assert "Error: Could not import package" in result.output
@patch("automation.dagster_docs.commands.check.git_changed_files")
@patch("automation.dagster_docs.commands.check._find_git_root")
def test_check_docstrings_changed_no_files(self, mock_find_git_root, mock_git_changed_files):
"""Test validating changed files when no files are changed."""
# Mock git root and no changed files
mock_find_git_root.return_value = Path("/fake/git/root")
mock_git_changed_files.return_value = []
result = self.runner.invoke(check, ["docstrings", "--changed"])
# Should complete successfully with no files message
assert result.exit_code == 0
assert "No changed Python files found" in result.output
@patch("automation.dagster_docs.commands.check._find_git_root")
def test_check_docstrings_changed_no_git_repo(self, mock_find_git_root):
"""Test validating changed files when not in git repo."""
# Mock no git root found
mock_find_git_root.return_value = None
result = self.runner.invoke(check, ["docstrings", "--changed"])
# Should fail with exit code 2 (special code for no git repo)
assert result.exit_code == 2
assert "Error: Not in a git repository" in result.output
def test_check_docstrings_no_options_fails(self):
"""Test that check docstrings without options fails."""
result = self.runner.invoke(check, ["docstrings"])
# Should fail with exit code 1
assert result.exit_code == 1
assert (
"Error: Exactly one of --changed, --symbol, --all, or --package must be provided"
in result.output
)
def test_check_docstrings_multiple_options_fails(self):
"""Test that check docstrings with multiple options fails."""
result = self.runner.invoke(
check, ["docstrings", "--symbol", "dagster.asset", "--package", "dagster"]
)
# Should fail with exit code 1
assert result.exit_code == 1
assert (
"Error: Exactly one of --changed, --symbol, --all, or --package must be provided"
in result.output
)
def test_check_docstrings_all_runs_successfully(self):
"""Test that check docstrings --all executes without crashing."""
result = self.runner.invoke(check, ["docstrings", "--all"])
# Should complete (may have warnings/errors but should not crash)
assert result.exit_code in [0, 1] # 0 for success, 1 for validation errors
assert "Validating docstrings across" in result.output
assert "public Dagster packages" in result.output
assert "Overall Summary:" in result.output
def test_check_docstrings_all_shows_summary_statistics(self):
"""Test that --all shows package count and symbol statistics."""
result = self.runner.invoke(check, ["docstrings", "--all"])
# Should show statistics about packages and symbols processed
assert result.exit_code in [0, 1]
assert "symbols processed across" in result.output
assert "packages" in result.output
assert "Total:" in result.output
# Should show counts for errors and warnings
assert "errors," in result.output
assert "warnings" in result.output
def test_check_docstrings_all_respects_exclude_lists_by_default(self):
"""Test that exclude lists are applied by default in --all mode."""
result = self.runner.invoke(check, ["docstrings", "--all"])
# Should complete and may show exclusion information
assert result.exit_code in [0, 1]
# If exclusions exist, should mention them
# Note: We can't assert specific exclusion counts as they may change,
# but we can check the format is correct
if "excluded from validation" in result.output:
assert "symbols excluded from validation" in result.output
def test_check_docstrings_all_handles_import_errors_gracefully(self):
"""Test graceful handling when some packages fail to import."""
result = self.runner.invoke(check, ["docstrings", "--all"])
# Should complete successfully even if some packages fail to import
assert result.exit_code in [0, 1]
# May show warnings about import failures, but should continue
if "Warning: Could not import package" in result.output:
# Should still show overall summary despite import failures
assert "Overall Summary:" in result.output
def test_check_help_command(self):
"""Test that check help works."""
result = self.runner.invoke(check, ["--help"])
assert result.exit_code == 0
assert "Check documentation aspects" in result.output
assert "docstrings" in result.output
| TestCheckDocstringsCommands |
python | davidhalter__parso | parso/python/pep8.py | {
"start": 5278,
"end": 32876
} | class ____(ErrorFinder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._previous_part = None
self._previous_leaf = None
self._on_newline = True
self._newline_count = 0
self._wanted_newline_count = None
self._max_new_lines_in_prefix = 0
self._new_statement = True
self._implicit_indentation_possible = False
# The top of stack of the indentation nodes.
self._indentation_tos = self._last_indentation_tos = \
IndentationNode(self._config, indentation='')
self._in_suite_introducer = False
if ' ' in self._config.indentation:
self._indentation_type = 'spaces'
self._wrong_indentation_char = '\t'
else:
self._indentation_type = 'tabs'
self._wrong_indentation_char = ' '
@contextmanager
def visit_node(self, node):
with super().visit_node(node):
with self._visit_node(node):
yield
@contextmanager
def _visit_node(self, node):
typ = node.type
if typ in 'import_name':
names = node.get_defined_names()
if len(names) > 1:
for name in names[:1]:
self.add_issue(name, 401, 'Multiple imports on one line')
elif typ == 'lambdef':
expr_stmt = node.parent
# Check if it's simply defining a single name, not something like
# foo.bar or x[1], where using a lambda could make more sense.
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name'
for n in expr_stmt.children[:-2:2]):
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
elif typ == 'try_stmt':
for child in node.children:
# Here we can simply check if it's an except, because otherwise
# it would be an except_clause.
if child.type == 'keyword' and child.value == 'except':
self.add_issue(child, 722, 'Do not use bare except, specify exception instead')
elif typ == 'comparison':
for child in node.children:
if child.type not in ('atom_expr', 'power'):
continue
if len(child.children) > 2:
continue
trailer = child.children[1]
atom = child.children[0]
if trailer.type == 'trailer' and atom.type == 'name' \
and atom.value == 'type':
self.add_issue(node, 721, "Do not compare types, use 'isinstance()")
break
elif typ == 'file_input':
endmarker = node.children[-1]
prev = endmarker.get_previous_leaf()
prefix = endmarker.prefix
if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})):
self.add_issue(endmarker, 292, "No newline at end of file")
if typ in _IMPORT_TYPES:
simple_stmt = node.parent
module = simple_stmt.parent
if module.type == 'file_input':
index = module.children.index(simple_stmt)
for child in module.children[:index]:
children = [child]
if child.type == 'simple_stmt':
# Remove the newline.
children = child.children[:-1]
found_docstring = False
for c in children:
if c.type == 'string' and not found_docstring:
continue
found_docstring = True
if c.type == 'expr_stmt' and \
all(_is_magic_name(n) for n in c.get_defined_names()):
continue
if c.type in _IMPORT_TYPES or isinstance(c, Flow):
continue
self.add_issue(node, 402, 'Module level import not at top of file')
break
else:
continue
break
implicit_indentation_possible = typ in _IMPLICIT_INDENTATION_TYPES
in_introducer = typ in _SUITE_INTRODUCERS
if in_introducer:
self._in_suite_introducer = True
elif typ == 'suite':
if self._indentation_tos.type == IndentationTypes.BACKSLASH:
self._indentation_tos = self._indentation_tos.parent
self._indentation_tos = IndentationNode(
self._config,
self._indentation_tos.indentation + self._config.indentation,
parent=self._indentation_tos
)
elif implicit_indentation_possible:
self._implicit_indentation_possible = True
yield
if typ == 'suite':
assert self._indentation_tos.type == IndentationTypes.SUITE
self._indentation_tos = self._indentation_tos.parent
# If we dedent, no lines are needed anymore.
self._wanted_newline_count = None
elif implicit_indentation_possible:
self._implicit_indentation_possible = False
if self._indentation_tos.type == IndentationTypes.IMPLICIT:
self._indentation_tos = self._indentation_tos.parent
elif in_introducer:
self._in_suite_introducer = False
if typ in ('classdef', 'funcdef'):
self._wanted_newline_count = self._get_wanted_blank_lines_count()
def _check_tabs_spaces(self, spacing):
if self._wrong_indentation_char in spacing.value:
self.add_issue(spacing, 101, 'Indentation contains ' + self._indentation_type)
return True
return False
def _get_wanted_blank_lines_count(self):
suite_node = self._indentation_tos.get_latest_suite_node()
return int(suite_node.parent is None) + 1
def _reset_newlines(self, spacing, leaf, is_comment=False):
self._max_new_lines_in_prefix = \
max(self._max_new_lines_in_prefix, self._newline_count)
wanted = self._wanted_newline_count
if wanted is not None:
# Need to substract one
blank_lines = self._newline_count - 1
if wanted > blank_lines and leaf.type != 'endmarker':
# In case of a comment we don't need to add the issue, yet.
if not is_comment:
# TODO end_pos wrong.
code = 302 if wanted == 2 else 301
message = "expected %s blank line, found %s" \
% (wanted, blank_lines)
self.add_issue(spacing, code, message)
self._wanted_newline_count = None
else:
self._wanted_newline_count = None
if not is_comment:
wanted = self._get_wanted_blank_lines_count()
actual = self._max_new_lines_in_prefix - 1
val = leaf.value
needs_lines = (
val == '@' and leaf.parent.type == 'decorator'
or (
val == 'class'
or val == 'async' and leaf.get_next_leaf() == 'def'
or val == 'def' and self._previous_leaf != 'async'
) and leaf.parent.parent.type != 'decorated'
)
if needs_lines and actual < wanted:
func_or_cls = leaf.parent
suite = func_or_cls.parent
if suite.type == 'decorated':
suite = suite.parent
# The first leaf of a file or a suite should not need blank
# lines.
if suite.children[int(suite.type == 'suite')] != func_or_cls:
code = 302 if wanted == 2 else 301
message = "expected %s blank line, found %s" \
% (wanted, actual)
self.add_issue(spacing, code, message)
self._max_new_lines_in_prefix = 0
self._newline_count = 0
def visit_leaf(self, leaf):
super().visit_leaf(leaf)
for part in leaf._split_prefix():
if part.type == 'spacing':
# This part is used for the part call after for.
break
self._visit_part(part, part.create_spacing_part(), leaf)
self._analyse_non_prefix(leaf)
self._visit_part(leaf, part, leaf)
# Cleanup
self._last_indentation_tos = self._indentation_tos
self._new_statement = leaf.type == 'newline'
# TODO does this work? with brackets and stuff?
if leaf.type == 'newline' and \
self._indentation_tos.type == IndentationTypes.BACKSLASH:
self._indentation_tos = self._indentation_tos.parent
if leaf.value == ':' and leaf.parent.type in _SUITE_INTRODUCERS:
self._in_suite_introducer = False
elif leaf.value == 'elif':
self._in_suite_introducer = True
if not self._new_statement:
self._reset_newlines(part, leaf)
self._max_blank_lines = 0
self._previous_leaf = leaf
return leaf.value
def _visit_part(self, part, spacing, leaf):
value = part.value
type_ = part.type
if type_ == 'error_leaf':
return
if value == ',' and part.parent.type == 'dictorsetmaker':
self._indentation_tos = self._indentation_tos.parent
node = self._indentation_tos
if type_ == 'comment':
if value.startswith('##'):
# Whole blocks of # should not raise an error.
if value.lstrip('#'):
self.add_issue(part, 266, "Too many leading '#' for block comment.")
elif self._on_newline:
if not re.match(r'#:? ', value) and not value == '#' \
and not (value.startswith('#!') and part.start_pos == (1, 0)):
self.add_issue(part, 265, "Block comment should start with '# '")
else:
if not re.match(r'#:? [^ ]', value):
self.add_issue(part, 262, "Inline comment should start with '# '")
self._reset_newlines(spacing, leaf, is_comment=True)
elif type_ == 'newline':
if self._newline_count > self._get_wanted_blank_lines_count():
self.add_issue(part, 303, "Too many blank lines (%s)" % self._newline_count)
elif leaf in ('def', 'class') \
and leaf.parent.parent.type == 'decorated':
self.add_issue(part, 304, "Blank lines found after function decorator")
self._newline_count += 1
if type_ == 'backslash':
# TODO is this enough checking? What about ==?
if node.type != IndentationTypes.BACKSLASH:
if node.type != IndentationTypes.SUITE:
self.add_issue(part, 502, 'The backslash is redundant between brackets')
else:
indentation = node.indentation
if self._in_suite_introducer and node.type == IndentationTypes.SUITE:
indentation += self._config.indentation
self._indentation_tos = BackslashNode(
self._config,
indentation,
part,
spacing,
parent=self._indentation_tos
)
elif self._on_newline:
indentation = spacing.value
if node.type == IndentationTypes.BACKSLASH \
and self._previous_part.type == 'newline':
self._indentation_tos = self._indentation_tos.parent
if not self._check_tabs_spaces(spacing):
should_be_indentation = node.indentation
if type_ == 'comment':
# Comments can be dedented. So we have to care for that.
n = self._last_indentation_tos
while True:
if len(indentation) > len(n.indentation):
break
should_be_indentation = n.indentation
self._last_indentation_tos = n
if n == node:
break
n = n.parent
if self._new_statement:
if type_ == 'newline':
if indentation:
self.add_issue(spacing, 291, 'Trailing whitespace')
elif indentation != should_be_indentation:
s = '%s %s' % (len(self._config.indentation), self._indentation_type)
self.add_issue(part, 111, 'Indentation is not a multiple of ' + s)
else:
if value in '])}':
should_be_indentation = node.bracket_indentation
else:
should_be_indentation = node.indentation
if self._in_suite_introducer and indentation == \
node.get_latest_suite_node().indentation \
+ self._config.indentation:
self.add_issue(part, 129, "Line with same indent as next logical block")
elif indentation != should_be_indentation:
if not self._check_tabs_spaces(spacing) and part.value not in \
{'\n', '\r\n', '\r'}:
if value in '])}':
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
124,
"Closing bracket does not match visual indentation"
)
else:
self.add_issue(
part,
123,
"Losing bracket does not match "
"indentation of opening bracket's line"
)
else:
if len(indentation) < len(should_be_indentation):
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
128,
'Continuation line under-indented for visual indent'
)
elif node.type == IndentationTypes.BACKSLASH:
self.add_issue(
part,
122,
'Continuation line missing indentation or outdented'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 135, 'xxx')
else:
self.add_issue(
part,
121,
'Continuation line under-indented for hanging indent'
)
else:
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
127,
'Continuation line over-indented for visual indent'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 136, 'xxx')
else:
self.add_issue(
part,
126,
'Continuation line over-indented for hanging indent'
)
else:
self._check_spacing(part, spacing)
self._check_line_length(part, spacing)
# -------------------------------
# Finalizing. Updating the state.
# -------------------------------
if value and value in '()[]{}' and type_ != 'error_leaf' \
and part.parent.type != 'error_node':
if value in _OPENING_BRACKETS:
self._indentation_tos = BracketNode(
self._config, part,
parent=self._indentation_tos,
in_suite_introducer=self._in_suite_introducer
)
else:
assert node.type != IndentationTypes.IMPLICIT
self._indentation_tos = self._indentation_tos.parent
elif value in ('=', ':') and self._implicit_indentation_possible \
and part.parent.type in _IMPLICIT_INDENTATION_TYPES:
indentation = node.indentation
self._indentation_tos = ImplicitNode(
self._config, part, parent=self._indentation_tos
)
self._on_newline = type_ in ('newline', 'backslash', 'bom')
self._previous_part = part
self._previous_spacing = spacing
def _check_line_length(self, part, spacing):
if part.type == 'backslash':
last_column = part.start_pos[1] + 1
else:
last_column = part.end_pos[1]
if last_column > self._config.max_characters \
and spacing.start_pos[1] <= self._config.max_characters:
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
report = True
if part.type == 'comment':
splitted = part.value[1:].split()
if len(splitted) == 1 \
and (part.end_pos[1] - len(splitted[0])) < 72:
report = False
if report:
self.add_issue(
part,
501,
'Line too long (%s > %s characters)' %
(last_column, self._config.max_characters),
)
def _check_spacing(self, part, spacing):
def add_if_spaces(*args):
if spaces:
return self.add_issue(*args)
def add_not_spaces(*args):
if not spaces:
return self.add_issue(*args)
spaces = spacing.value
prev = self._previous_part
if prev is not None and prev.type == 'error_leaf' or part.type == 'error_leaf':
return
type_ = part.type
if '\t' in spaces:
self.add_issue(spacing, 223, 'Used tab to separate tokens')
elif type_ == 'comment':
if len(spaces) < self._config.spaces_before_comment:
self.add_issue(spacing, 261, 'At least two spaces before inline comment')
elif type_ == 'newline':
add_if_spaces(spacing, 291, 'Trailing whitespace')
elif len(spaces) > 1:
self.add_issue(spacing, 221, 'Multiple spaces used')
else:
if prev in _OPENING_BRACKETS:
message = "Whitespace after '%s'" % part.value
add_if_spaces(spacing, 201, message)
elif part in _CLOSING_BRACKETS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 202, message)
elif part in (',', ';') or part == ':' \
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 203, message)
elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS:
pass # TODO
elif prev in (',', ';', ':'):
add_not_spaces(spacing, 231, "missing whitespace after '%s'")
elif part == ':': # Is a subscript
# TODO
pass
elif part in ('*', '**') and part.parent.type not in _NON_STAR_TYPES \
or prev in ('*', '**') \
and prev.parent.type not in _NON_STAR_TYPES:
# TODO
pass
elif prev in _FACTOR and prev.parent.type == 'factor':
pass
elif prev == '@' and prev.parent.type == 'decorator':
pass # TODO should probably raise an error if there's a space here
elif part in _NEEDS_SPACE or prev in _NEEDS_SPACE:
if part == '=' and part.parent.type in ('argument', 'param') \
or prev == '=' and prev.parent.type in ('argument', 'param'):
if part == '=':
param = part.parent
else:
param = prev.parent
if param.type == 'param' and param.annotation:
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
else:
add_if_spaces(
spacing,
251,
'Unexpected spaces around keyword / parameter equals'
)
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
add_not_spaces(
spacing,
227,
'Missing whitespace around bitwise or shift operator'
)
elif part == '%' or prev == '%':
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
else:
message_225 = 'Missing whitespace between tokens'
add_not_spaces(spacing, 225, message_225)
elif type_ == 'keyword' or prev.type == 'keyword':
add_not_spaces(spacing, 275, 'Missing whitespace around keyword')
else:
prev_spacing = self._previous_spacing
if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
and '\n' not in self._previous_leaf.prefix \
and '\r' not in self._previous_leaf.prefix:
message = "Whitespace before operator doesn't match with whitespace after"
self.add_issue(spacing, 229, message)
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
message_225 = 'Missing whitespace between tokens'
# self.add_issue(spacing, 225, message_225)
# TODO why only brackets?
if part in _OPENING_BRACKETS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 211, message)
def _analyse_non_prefix(self, leaf):
typ = leaf.type
if typ == 'name' and leaf.value in ('l', 'O', 'I'):
if leaf.is_definition():
message = "Do not define %s named 'l', 'O', or 'I' one line"
if leaf.parent.type == 'class' and leaf.parent.name == leaf:
self.add_issue(leaf, 742, message % 'classes')
elif leaf.parent.type == 'function' and leaf.parent.name == leaf:
self.add_issue(leaf, 743, message % 'function')
else:
self.add_issuadd_issue(741, message % 'variables', leaf)
elif leaf.value == ':':
if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef':
next_leaf = leaf.get_next_leaf()
if next_leaf.type != 'newline':
if leaf.parent.type == 'funcdef':
self.add_issue(next_leaf, 704, 'Multiple statements on one line (def)')
else:
self.add_issue(next_leaf, 701, 'Multiple statements on one line (colon)')
elif leaf.value == ';':
if leaf.get_next_leaf().type in ('newline', 'endmarker'):
self.add_issue(leaf, 703, 'Statement ends with a semicolon')
else:
self.add_issue(leaf, 702, 'Multiple statements on one line (semicolon)')
elif leaf.value in ('==', '!='):
comparison = leaf.parent
index = comparison.children.index(leaf)
left = comparison.children[index - 1]
right = comparison.children[index + 1]
for node in left, right:
if node.type == 'keyword' or node.type == 'name':
if node.value == 'None':
message = "comparison to None should be 'if cond is None:'"
self.add_issue(leaf, 711, message)
break
elif node.value in ('True', 'False'):
message = "comparison to False/True should be " \
"'if cond is True:' or 'if cond:'"
self.add_issue(leaf, 712, message)
break
elif leaf.value in ('in', 'is'):
comparison = leaf.parent
if comparison.type == 'comparison' and comparison.parent.type == 'not_test':
if leaf.value == 'in':
self.add_issue(leaf, 713, "test for membership should be 'not in'")
else:
self.add_issue(leaf, 714, "test for object identity should be 'is not'")
elif typ == 'string':
# Checking multiline strings
for i, line in enumerate(leaf.value.splitlines()[1:]):
indentation = re.match(r'[ \t]*', line).group(0)
start_pos = leaf.line + i, len(indentation)
# TODO check multiline indentation.
start_pos
elif typ == 'endmarker':
if self._newline_count >= 2:
self.add_issue(leaf, 391, 'Blank line at end of file')
def add_issue(self, node, code, message):
if self._previous_leaf is not None:
if self._previous_leaf.search_ancestor('error_node') is not None:
return
if self._previous_leaf.type == 'error_leaf':
return
if node.search_ancestor('error_node') is not None:
return
if code in (901, 903):
# 901 and 903 are raised by the ErrorFinder.
super().add_issue(node, code, message)
else:
# Skip ErrorFinder here, because it has custom behavior.
super(ErrorFinder, self).add_issue(node, code, message)
| PEP8Normalizer |
python | huggingface__transformers | src/transformers/models/marian/convert_marian_to_pytorch.py | {
"start": 17074,
"end": 27113
} | class ____:
def __init__(self, source_dir, eos_token_id=0):
npz_path = find_model_file(source_dir)
self.state_dict = np.load(npz_path)
cfg = load_config_from_state_dict(self.state_dict)
if cfg["dim-vocabs"][0] != cfg["dim-vocabs"][1]:
raise ValueError
if "Wpos" in self.state_dict:
raise ValueError("Wpos key in state dictionary")
self.state_dict = dict(self.state_dict)
if cfg["tied-embeddings-all"]:
cfg["tied-embeddings-src"] = True
cfg["tied-embeddings"] = True
self.share_encoder_decoder_embeddings = cfg["tied-embeddings-src"]
# create the tokenizer here because we need to know the eos_token_id
self.source_dir = source_dir
self.tokenizer = self.load_tokenizer()
# retrieve EOS token and set correctly
tokenizer_has_eos_token_id = (
hasattr(self.tokenizer, "eos_token_id") and self.tokenizer.eos_token_id is not None
)
eos_token_id = self.tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0
if cfg["tied-embeddings-src"]:
self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1)
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
else:
self.wemb, _ = add_emb_entries(self.state_dict["encoder_Wemb"], self.state_dict[BIAS_KEY], 1)
self.dec_wemb, self.final_bias = add_emb_entries(
self.state_dict["decoder_Wemb"], self.state_dict[BIAS_KEY], 1
)
# still assuming that vocab size is same for encoder and decoder
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
cfg["decoder_vocab_size"] = self.pad_token_id + 1
if cfg["vocab_size"] != self.tokenizer.vocab_size:
raise ValueError(
f"Original vocab size {cfg['vocab_size']} and new vocab size {len(self.tokenizer.encoder)} mismatched."
)
# self.state_dict['Wemb'].sha
self.state_keys = list(self.state_dict.keys())
if "Wtype" in self.state_dict:
raise ValueError("Wtype key in state dictionary")
self._check_layer_entries()
self.cfg = cfg
hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape
if hidden_size != cfg["dim-emb"]:
raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched")
# Process decoder.yml
decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml"))
check_marian_cfg_assumptions(cfg)
self.hf_config = MarianConfig(
vocab_size=cfg["vocab_size"],
decoder_vocab_size=cfg.get("decoder_vocab_size", cfg["vocab_size"]),
share_encoder_decoder_embeddings=cfg["tied-embeddings-src"],
decoder_layers=cfg["dec-depth"],
encoder_layers=cfg["enc-depth"],
decoder_attention_heads=cfg["transformer-heads"],
encoder_attention_heads=cfg["transformer-heads"],
decoder_ffn_dim=cfg["transformer-dim-ffn"],
encoder_ffn_dim=cfg["transformer-dim-ffn"],
d_model=cfg["dim-emb"],
activation_function=cfg["transformer-ffn-activation"],
pad_token_id=self.pad_token_id,
eos_token_id=eos_token_id,
forced_eos_token_id=eos_token_id,
bos_token_id=0,
max_position_embeddings=cfg["dim-emb"],
scale_embedding=True,
normalize_embedding="n" in cfg["transformer-preprocess"],
static_position_embeddings=not cfg["transformer-train-position-embeddings"],
tie_word_embeddings=cfg["tied-embeddings"],
dropout=0.1, # see opus-mt-train repo/transformer-dropout param.
# default: add_final_layer_norm=False,
num_beams=decoder_yml["beam-size"],
decoder_start_token_id=self.pad_token_id,
bad_words_ids=[[self.pad_token_id]],
max_length=512,
)
def _check_layer_entries(self):
self.encoder_l1 = self.sub_keys("encoder_l1")
self.decoder_l1 = self.sub_keys("decoder_l1")
self.decoder_l2 = self.sub_keys("decoder_l2")
if len(self.encoder_l1) != 16:
warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}")
if len(self.decoder_l1) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
if len(self.decoder_l2) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
@property
def extra_keys(self):
extra = []
for k in self.state_keys:
if (
k.startswith("encoder_l")
or k.startswith("decoder_l")
or k in [CONFIG_KEY, "Wemb", "encoder_Wemb", "decoder_Wemb", "Wpos", "decoder_ff_logit_out_b"]
):
continue
else:
extra.append(k)
return extra
def sub_keys(self, layer_prefix):
return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)]
def load_tokenizer(self):
# save tokenizer
add_special_tokens_to_vocab(self.source_dir, not self.share_encoder_decoder_embeddings)
return MarianTokenizer.from_pretrained(str(self.source_dir))
def load_marian_model(self) -> MarianMTModel:
state_dict, cfg = self.state_dict, self.hf_config
if not cfg.static_position_embeddings:
raise ValueError("config.static_position_embeddings should be True")
model = MarianMTModel(cfg)
if "hidden_size" in cfg.to_dict():
raise ValueError("hidden_size is in config")
load_layers_(
model.model.encoder.layers,
state_dict,
BART_CONVERTER,
)
load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True)
# handle tensors not associated with layers
if self.cfg["tied-embeddings-src"]:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.shared.weight = wemb_tensor
model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared
else:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
model.model.encoder.embed_tokens.weight = wemb_tensor
decoder_wemb_tensor = nn.Parameter(torch.FloatTensor(self.dec_wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.decoder.embed_tokens.weight = decoder_wemb_tensor
# handle tied embeddings, otherwise "from_pretrained" loads them incorrectly
if self.cfg["tied-embeddings"]:
model.lm_head.weight.data = model.model.decoder.embed_tokens.weight.data.clone()
model.final_logits_bias = bias_tensor
if "Wpos" in state_dict:
print("Unexpected: got Wpos")
wpos_tensor = torch.tensor(state_dict["Wpos"])
model.model.encoder.embed_positions.weight = wpos_tensor
model.model.decoder.embed_positions.weight = wpos_tensor
if cfg.normalize_embedding:
if "encoder_emb_ln_scale_pre" not in state_dict:
raise ValueError("encoder_emb_ln_scale_pre is not in state dictionary")
raise NotImplementedError("Need to convert layernorm_embedding")
if self.extra_keys:
raise ValueError(f"Failed to convert {self.extra_keys}")
if model.get_input_embeddings().padding_idx != self.pad_token_id:
raise ValueError(
f"Padding tokens {model.get_input_embeddings().padding_idx} and {self.pad_token_id} mismatched"
)
return model
def download_and_unzip(url, dest_dir):
try:
import wget
except ImportError:
raise ImportError("you must pip install wget")
filename = wget.download(url)
unzip(filename, dest_dir)
os.remove(filename)
def convert(source_dir: Path, dest_dir):
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
opus_state = OpusState(source_dir)
# save tokenizer
opus_state.tokenizer.save_pretrained(dest_dir)
# save_json(opus_state.cfg, dest_dir / "marian_original_config.json")
# ^^ Uncomment to save human readable marian config for debugging
model = opus_state.load_marian_model()
model = model.half()
model.save_pretrained(dest_dir)
model.from_pretrained(dest_dir) # sanity check
def load_yaml(path):
import yaml
with open(path, encoding="utf-8") as f:
return yaml.load(f, Loader=yaml.BaseLoader)
def save_json(content: Union[dict, list], path: str) -> None:
with open(path, "w") as f:
json.dump(content, f)
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, "r") as zipObj:
zipObj.extractall(dest_dir)
if __name__ == "__main__":
"""
Tatoeba conversion instructions in scripts/tatoeba/README.md
"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src",
type=str,
help="path to marian model sub dir. yaml.load will be used to load the configuration file, please be wary of which file you're loading.",
default="en-de",
)
parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.")
args = parser.parse_args()
source_dir = Path(args.src)
if not source_dir.exists():
raise ValueError(f"Source directory {source_dir} not found")
dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest
convert(source_dir, dest_dir)
| OpusState |
python | tensorflow__tensorflow | tensorflow/python/util/nest_test.py | {
"start": 3500,
"end": 81496
} | class ____(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a structure, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassCustomProtocol(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
self.assertIsInstance(mt, CustomNestProtocol)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassIsNested(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
self.assertTrue(nest.is_nested(mt))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlatten(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
leaves = nest.flatten(mt)
self.assertLen(leaves, 1)
self.assertAllEqual(leaves[0], [1])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenUpToCompatible(self):
simple_list = [2]
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
flattened_mt = nest.flatten_up_to(
shallow_tree=simple_list, input_tree=mt, check_types=False
)
# Expected flat_path_mt = [Tensor([1])]
self.assertAllEqual(flattened_mt[0], [1])
flattened_list = nest.flatten_up_to(
shallow_tree=mt, input_tree=simple_list, check_types=False
)
self.assertEqual(flattened_list, [2])
nested_list = [[2]]
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([3])
)
flattened_nmt = nest.flatten_up_to(
shallow_tree=nested_list, input_tree=nmt, check_types=False
)
# Expected flattened_nmt = [Tensor([3])]
self.assertAllEqual(flattened_nmt[0], [3])
flat_path_nested_list = nest.flatten_up_to(
shallow_tree=nmt, input_tree=nested_list, check_types=False
)
self.assertAllEqual(flat_path_nested_list, [2])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenUpToIncompatible(self):
simple_list = [2]
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
# When `check_types=True` is set, `flatten_up_to` would fail when input_tree
# and shallow_tree args don't have the same type
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(simple_list), input_type=type(mt)
),
):
nest.flatten_up_to(
shallow_tree=simple_list, input_tree=mt, check_types=True
)
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(mt), input_type=type(simple_list)
),
):
nest.flatten_up_to(
shallow_tree=mt, input_tree=simple_list, check_types=True
)
nested_list = [[1]]
# Although `check_types=False` is set, this assertion would fail because the
# shallow_tree component has a deeper structure than the input_tree
# component.
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
TypeError,
"If shallow structure is a sequence, input must also be a sequence",
):
nest.flatten_up_to(
shallow_tree=nested_list, input_tree=mt, check_types=False
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenWithTuplePathsUpToCompatible(self):
simple_list = [2]
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
flat_path_mt = nest.flatten_with_tuple_paths_up_to(
shallow_tree=simple_list, input_tree=mt, check_types=False
)
# Expected flat_path_mt = [((0,), Tensor([1]))]
self.assertEqual(flat_path_mt[0][0], (0,))
self.assertAllEqual(flat_path_mt[0][1], [1])
flat_path_list = nest.flatten_with_tuple_paths_up_to(
shallow_tree=mt, input_tree=simple_list, check_types=False
)
self.assertAllEqual(flat_path_list, [[(0,), 2]])
nested_list = [[2]]
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([3])
)
flat_path_nmt = nest.flatten_with_tuple_paths_up_to(
shallow_tree=nested_list, input_tree=nmt, check_types=False
)
# Expected flat_path_nmt = [((0,), Tensor([3]))]
self.assertAllEqual(flat_path_nmt[0][0], [0, 0])
self.assertAllEqual(flat_path_nmt[0][1], [3])
flat_path_nested_list = nest.flatten_with_tuple_paths_up_to(
shallow_tree=nmt, input_tree=nested_list, check_types=False
)
self.assertAllEqual(flat_path_nested_list, [[(0, 0), 2]])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenWithTuplePathsUpToIncompatible(self):
simple_list = [2]
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(simple_list), input_type=type(mt)
),
):
nest.flatten_with_tuple_paths_up_to(
shallow_tree=simple_list, input_tree=mt, check_types=True
)
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(mt), input_type=type(simple_list)
),
):
nest.flatten_with_tuple_paths_up_to(
shallow_tree=mt, input_tree=simple_list, check_types=True
)
nested_list2 = [[[2]]]
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([3])
)
# Although `check_types=False` is set, this assertion would fail because the
# shallow_tree component has a deeper structure than the input_tree
# component.
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
TypeError,
"If shallow structure is a sequence, input must also be a sequence",
):
nest.flatten_up_to(
shallow_tree=nested_list2, input_tree=nmt, check_types=False
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenAndPack(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
leaves = nest.flatten(mt)
reconstructed_mt = nest.pack_sequence_as(mt, leaves)
self.assertIsInstance(reconstructed_mt, MaskedTensor)
self.assertEqual(reconstructed_mt, mt)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassMapStructure(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt_doubled = nest.map_structure(lambda x: x * 2, mt)
self.assertIsInstance(mt_doubled, MaskedTensor)
self.assertEqual(mt_doubled.mask, True)
self.assertAllEqual(mt_doubled.value, [2])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassMapStructureWithPaths(self):
mt = MaskedTensor(mask=False, value=constant_op.constant([1]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2]))
mt3 = MaskedTensor(mask=True, value=constant_op.constant([3]))
def path_sum(path, *tensors):
return (path, sum(tensors))
mt_combined_with_path = nest.map_structure_with_paths(
path_sum, mt, mt2, mt3
)
self.assertIsInstance(mt_combined_with_path, MaskedTensor)
# metadata uses the one from the first input (mt).
self.assertEqual(mt_combined_with_path.mask, False)
# Tesnor index is '0' for the only compoenent in MaskedTensor.
self.assertAllEqual(mt_combined_with_path.value[0], "0")
# sum of all input tensors.
self.assertAllEqual(mt_combined_with_path.value[1], [6])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([4])
)
nmt2 = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=False, inner_value=constant_op.constant([5])
)
nmt_combined_with_path = nest.map_structure_with_paths(path_sum, nmt, nmt2)
self.assertIsInstance(nmt_combined_with_path, NestedMaskedTensor)
self.assertEqual(nmt_combined_with_path.mask, True)
self.assertEqual(nmt_combined_with_path.value.mask, False)
self.assertAllEqual(nmt_combined_with_path.value.value[0], "0/0")
self.assertAllEqual(nmt_combined_with_path.value.value[1], [9])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassMapStructureWithTuplePaths(self):
mt = MaskedTensor(mask=False, value=constant_op.constant([1]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2]))
mt3 = MaskedTensor(mask=True, value=constant_op.constant([3]))
def tuple_path_sum(tuple_path, *tensors):
return (tuple_path, sum(tensors))
mt_combined_with_path = nest.map_structure_with_tuple_paths(
tuple_path_sum, mt, mt2, mt3
)
self.assertIsInstance(mt_combined_with_path, MaskedTensor)
# metadata uses the one from the first input (mt).
self.assertEqual(mt_combined_with_path.mask, False)
# Tesnor index is 0 for the only compoenent in MaskedTensor.
self.assertAllEqual(mt_combined_with_path.value[0], (0,))
# sum of all input tensors.
self.assertAllEqual(mt_combined_with_path.value[1], [6])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([4])
)
nmt2 = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=False, inner_value=constant_op.constant([5])
)
nmt_combined_with_path = nest.map_structure_with_tuple_paths(
tuple_path_sum, nmt, nmt2
)
self.assertIsInstance(nmt_combined_with_path, NestedMaskedTensor)
self.assertEqual(nmt_combined_with_path.mask, True)
self.assertEqual(nmt_combined_with_path.value.mask, False)
self.assertAllEqual(nmt_combined_with_path.value.value[0], (0, 0))
self.assertAllEqual(nmt_combined_with_path.value.value[1], [9])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassMapStructureUpTo(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2]))
mt3 = MaskedTensor(mask=True, value=constant_op.constant([3]))
mt_out_template = MaskedTensor(mask=False, value=constant_op.constant([4]))
def sum_tensors(*tensors):
return sum(tensors)
mt_combined_with_path = nest.map_structure_up_to(
mt_out_template, sum_tensors, mt, mt2, mt3
)
self.assertIsInstance(mt_combined_with_path, MaskedTensor)
# metadata uses the one from the first arg (mt_out_template).
self.assertEqual(mt_combined_with_path.mask, False)
# sum of all input tensors.
self.assertAllEqual(mt_combined_with_path.value, [6])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([4])
)
nmt2 = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([5])
)
nmt_out = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=False, inner_value=constant_op.constant([6])
)
nmt_combined_with_path = nest.map_structure_up_to(
nmt_out, sum_tensors, nmt, nmt2
)
self.assertIsInstance(nmt_combined_with_path, NestedMaskedTensor)
self.assertEqual(nmt_combined_with_path.mask, False)
self.assertEqual(nmt_combined_with_path.value.mask, True)
self.assertAllEqual(nmt_combined_with_path.value.value, [9])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassMapStructureWithTuplePathsUoTo(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2]))
mt3 = MaskedTensor(mask=True, value=constant_op.constant([3]))
mt_out_template = MaskedTensor(mask=False, value=constant_op.constant([4]))
def tuple_path_sum(tuple_path, *tensors):
return (tuple_path, sum(tensors))
mt_combined_with_path = nest.map_structure_with_tuple_paths_up_to(
mt_out_template, tuple_path_sum, mt, mt2, mt3
)
self.assertIsInstance(mt_combined_with_path, MaskedTensor)
# metadata uses the one from the first arg (mt_out_template).
self.assertEqual(mt_combined_with_path.mask, False)
# Tesnor index is 0 for the only compoenent in MaskedTensor.
self.assertAllEqual(mt_combined_with_path.value[0], (0,))
# sum of all input tensors.
self.assertAllEqual(mt_combined_with_path.value[1], [6])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([4])
)
nmt2 = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([5])
)
nmt_out = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=False, inner_value=constant_op.constant([6])
)
nmt_combined_with_path = nest.map_structure_with_tuple_paths_up_to(
nmt_out, tuple_path_sum, nmt, nmt2
)
self.assertIsInstance(nmt_combined_with_path, NestedMaskedTensor)
self.assertEqual(nmt_combined_with_path.mask, False)
self.assertEqual(nmt_combined_with_path.value.mask, True)
self.assertAllEqual(nmt_combined_with_path.value.value[0], (0, 0))
self.assertAllEqual(nmt_combined_with_path.value.value[1], [9])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNestedDataclassIsNested(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
self.assertTrue(nest.is_nested(mt))
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
self.assertTrue(nest.is_nested(nmt))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassAssertShallowStructure(self):
# These assertions are expected to pass: two dataclasses with the same
# component size are considered to have the same shallow structure.
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt2 = MaskedTensor(mask=False, value=constant_op.constant([2, 3]))
nest.assert_shallow_structure(
shallow_tree=mt, input_tree=mt2, check_types=True
)
nest.assert_shallow_structure(
shallow_tree=mt2, input_tree=mt, check_types=True
)
mt3 = MaskedTensor2(mask=True, value=constant_op.constant([1]))
# These assertions are expected to pass: two dataclasses with the same
# component size are considered to have the same shallow structure.
nest.assert_shallow_structure(
shallow_tree=mt, input_tree=mt3, check_types=False
)
nest.assert_shallow_structure(
shallow_tree=mt3, input_tree=mt, check_types=False
)
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
# This assertion is expected to fail, when `check_types=True`, because the
# shallow_tree type is not the same as input_tree.
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(mt), input_type=type(nmt)
),
):
nest.assert_shallow_structure(
shallow_tree=mt, input_tree=nmt, check_types=True
)
# This assertion is expected to pass: the shallow_tree component contains
# the shallow structure of the input_tree component.
nest.assert_shallow_structure(
shallow_tree=mt, input_tree=nmt, check_types=False
)
# This assertion is expected to fail: the shallow_tree component has
# a deeper structure than the input_tree component.
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
TypeError,
"If shallow structure is a sequence, input must also be a sequence",
):
nest.assert_shallow_structure(
shallow_tree=nmt, input_tree=mt, check_types=False
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassGetTraverseShallowStructure(self):
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
traverse_result = nest.get_traverse_shallow_structure(
lambda s: isinstance(s, (NestedMaskedTensor, MaskedTensor)), nmt
)
self.assertIsInstance(traverse_result, NestedMaskedTensor)
self.assertEqual(traverse_result.mask, nmt.mask)
self.assertIsInstance(traverse_result.value, MaskedTensor)
self.assertEqual(traverse_result.value.value, False)
nest.assert_shallow_structure(traverse_result, nmt)
traverse_result2 = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, list), nmt
)
self.assertIsInstance(traverse_result2, NestedMaskedTensor)
self.assertEqual(traverse_result2.mask, nmt.mask)
self.assertIsInstance(traverse_result2.value, MaskedTensor)
# Expected traverse_result2.value.value is True since it can pass the
# traverse function, but there is no more flattening for the Tensor value.
self.assertEqual(traverse_result2.value.value, True)
nest.assert_shallow_structure(traverse_result2, nmt)
traverse_result3 = nest.get_traverse_shallow_structure(
lambda s: isinstance(s, tensor.Tensor), nmt
)
# Expected `traverse_result3 = False` because `nmt` doesn't pass the
# traverse function.
self.assertEqual(traverse_result3, False)
nest.assert_shallow_structure(traverse_result3, nmt)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNestedDataclassFlatten(self):
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
leaves = nest.flatten(nmt)
self.assertLen(leaves, 1)
self.assertAllEqual(leaves[0], [1])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNestedDataclassFlattenAndPack(self):
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
leaves = nest.flatten(nmt)
reconstructed_mt = nest.pack_sequence_as(nmt, leaves)
self.assertIsInstance(reconstructed_mt, NestedMaskedTensor)
self.assertEqual(reconstructed_mt, nmt)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNestedDataclassMapStructure(self):
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([1])
)
mt_doubled = nest.map_structure(lambda x: x * 2, nmt)
expected = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([2])
)
self.assertIsInstance(mt_doubled, NestedMaskedTensor)
self.assertEqual(mt_doubled.mask, expected.mask)
self.assertEqual(mt_doubled.value.mask, expected.value.mask)
self.assertAllEqual(mt_doubled.value.value, expected.value.value)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassYieldFlatPaths(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt_flat_paths = list(nest.yield_flat_paths(mt))
self.assertEqual(mt_flat_paths, [(0,)])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([2])
)
nmt_flat_paths = list(nest.yield_flat_paths(nmt))
self.assertEqual(nmt_flat_paths, [(0, 0)])
dict_mt_nmt = {"mt": mt, "nmt": nmt, "mt_nmt_list": [mt, nmt]}
dict_mt_nmt_flat_paths = list(nest.yield_flat_paths(dict_mt_nmt))
self.assertEqual(
dict_mt_nmt_flat_paths,
[
("mt", 0),
("mt_nmt_list", 0, 0),
("mt_nmt_list", 1, 0, 0),
("nmt", 0, 0),
],
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenWithStringPaths(self):
sep = "/"
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt_flat_paths = nest.flatten_with_joined_string_paths(mt, separator=sep)
self.assertEqual(mt_flat_paths[0][0], "0")
self.assertAllEqual(mt_flat_paths[0][1], [1])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([2])
)
nmt_flat_paths = nest.flatten_with_joined_string_paths(nmt, separator=sep)
self.assertEqual(nmt_flat_paths[0][0], "0/0")
self.assertAllEqual(nmt_flat_paths[0][1], [2])
dict_mt_nmt = {"mt": mt, "nmt": nmt}
dict_mt_nmt_flat_paths = nest.flatten_with_joined_string_paths(
dict_mt_nmt, separator=sep
)
self.assertEqual(dict_mt_nmt_flat_paths[0][0], "mt/0")
self.assertAllEqual(dict_mt_nmt_flat_paths[0][1], [1])
self.assertEqual(dict_mt_nmt_flat_paths[1][0], "nmt/0/0")
self.assertAllEqual(dict_mt_nmt_flat_paths[1][1], [2])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassFlattenWithTuplePaths(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt_flat_paths = nest.flatten_with_tuple_paths(mt)
self.assertEqual(mt_flat_paths[0][0], (0,))
self.assertAllEqual(mt_flat_paths[0][1], [1])
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([2])
)
nmt_flat_paths = nest.flatten_with_tuple_paths(nmt)
self.assertEqual(nmt_flat_paths[0][0], (0, 0))
self.assertAllEqual(nmt_flat_paths[0][1], [2])
dict_mt_nmt = {"mt": mt, "nmt": nmt}
dict_mt_nmt_flat_paths = nest.flatten_with_tuple_paths(dict_mt_nmt)
self.assertEqual(dict_mt_nmt_flat_paths[0][0], ("mt", 0))
self.assertAllEqual(dict_mt_nmt_flat_paths[0][1], [1])
self.assertEqual(dict_mt_nmt_flat_paths[1][0], ("nmt", 0, 0))
self.assertAllEqual(dict_mt_nmt_flat_paths[1][1], [2])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDataclassListToTuple(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
nmt = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=True, inner_value=constant_op.constant([2])
)
input_sequence = [mt, (nmt, {"a": [mt, nmt, (mt,)]}, None, nmt, [[[mt]]])]
mt2 = MaskedTensor(mask=True, value=constant_op.constant([3]))
nmt2 = NestedMaskedTensor.nested_masked_tensor_with_opposite_masks(
mask=False, inner_value=constant_op.constant([2])
)
results = nest.list_to_tuple(input_sequence)
expected = (
mt2,
(nmt2, {"a": (mt2, nmt2, (mt2,))}, None, nmt2, (((mt2,),),)),
)
nest.assert_same_structure(results, expected)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegex(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegex(ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegex(
ValueError, "Structure had 2 atoms, but flat_sequence had 3 items."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
def testPackSequenceAs_CompositeTensor(self):
val = ragged_tensor.RaggedTensor.from_row_splits(values=[1],
row_splits=[0, 1])
with self.assertRaisesRegex(
ValueError, "Structure had 2 atoms, but flat_sequence had 1 items."):
nest.pack_sequence_as(val, [val], expand_composites=True)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.keys()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.values()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.items()))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegex(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegex(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegex(TypeError, "don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegex(ValueError, "don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegex(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
structure3 = collections.defaultdict(list)
structure3["a"] = [1, 2, 3, 4]
structure3["b"] = [2, 3, 4, 5]
expected_structure3 = collections.defaultdict(list)
expected_structure3["a"] = [2, 3, 4, 5]
expected_structure3["b"] = [3, 4, 5, 6]
self.assertEqual(expected_structure3,
nest.map_structure(lambda x: x + 1, structure3))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegex(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegex(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegex(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegex(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegex(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
def testMapStructureOverPlaceholders(self):
# Test requires placeholders and thus requires graph mode
with ops.Graph().as_default():
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest.STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab), shallow_length=len(inp_abc)
),
):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest.STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]), input_type=type(inp_ab1[0])
),
):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError, nest.SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])
):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
# This assertion is expected to pass: two list-types with same number
# of fields are considered identical.
inp_shallow = _CustomList([1, 2])
inp_deep = [1, 2]
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
# This assertion is expected to pass: a VariableSpec with alias_id and
# a Variable are considered identical.
inp_shallow = resource_variable_ops.VariableSpec(None, alias_id=0)
inp_deep = resource_variable_ops.ResourceVariable(1.)
nest.assert_shallow_structure(inp_shallow, inp_deep,
expand_composites=False)
nest.assert_shallow_structure(inp_shallow, inp_deep,
expand_composites=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest.STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)
)
with self.assertRaisesRegex(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest.STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)
),
):
get_paths_and_values(shallow_tree, input_tree)
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest.IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree)),
):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest.IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree)),
):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest.IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree)),
):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest.IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree)),
):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError, nest.SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])
):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError, nest.SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])
):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegex(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegex(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegex(TypeError,
"didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
def testFlattenCustomSequenceThatRaisesException(self): # b/140746865
seq = _CustomSequenceThatRaisesException()
with self.assertRaisesRegex(ValueError, "Cannot get item"):
nest.flatten(seq)
def testListToTuple(self):
input_sequence = [1, (2, {3: [4, 5, (6,)]}, None, 7, [[[8]]])]
expected = (1, (2, {3: (4, 5, (6,))}, None, 7, (((8,),),)))
nest.assert_same_structure(
nest.list_to_tuple(input_sequence),
expected,
)
def testInvalidCheckTypes(self):
with self.assertRaises((ValueError, TypeError)):
nest.assert_same_structure(
nest1=array_ops.zeros((1)),
nest2=array_ops.ones((1, 1, 1)),
check_types=array_ops.ones((2)))
with self.assertRaises((ValueError, TypeError)):
nest.assert_same_structure(
nest1=array_ops.zeros((1)),
nest2=array_ops.ones((1, 1, 1)),
expand_composites=array_ops.ones((2)))
def testIsNamedtuple(self):
# A classic namedtuple.
Foo = collections.namedtuple("Foo", ["a", "b"])
self.assertTrue(nest.is_namedtuple(Foo(1, 2)))
# A subclass of it.
class SubFoo(Foo):
def extra_method(self, x):
return self.a + x
self.assertTrue(nest.is_namedtuple(SubFoo(1, 2)))
# A typing.NamedTuple.
class TypedFoo(NamedTuple):
a: int
b: int
self.assertTrue(nest.is_namedtuple(TypedFoo(1, 2)))
# Their types are not namedtuple values themselves.
self.assertFalse(nest.is_namedtuple(Foo))
self.assertFalse(nest.is_namedtuple(SubFoo))
self.assertFalse(nest.is_namedtuple(TypedFoo))
# These values don't have namedtuple types.
self.assertFalse(nest.is_namedtuple(123))
self.assertFalse(nest.is_namedtuple("abc"))
self.assertFalse(nest.is_namedtuple((123, "abc")))
class SomethingElseWithFields(tuple):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._fields = [1, 2, 3] # Not str, as expected for a namedtuple.
self.assertFalse(nest.is_namedtuple(SomethingElseWithFields()))
def testSameNamedtuples(self):
# A classic namedtuple and an equivalent cppy.
Foo1 = collections.namedtuple("Foo", ["a", "b"])
Foo2 = collections.namedtuple("Foo", ["a", "b"])
self.assertTrue(nest.same_namedtuples(Foo1(1, 2), Foo1(3, 4)))
self.assertTrue(nest.same_namedtuples(Foo1(1, 2), Foo2(3, 4)))
# Non-equivalent namedtuples.
Bar = collections.namedtuple("Bar", ["a", "b"])
self.assertFalse(nest.same_namedtuples(Foo1(1, 2), Bar(1, 2)))
FooXY = collections.namedtuple("Foo", ["x", "y"])
self.assertFalse(nest.same_namedtuples(Foo1(1, 2), FooXY(1, 2)))
# An equivalent subclass from the typing module
class Foo(NamedTuple):
a: int
b: int
self.assertTrue(nest.same_namedtuples(Foo1(1, 2), Foo(3, 4)))
| NestTest |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 21106,
"end": 22556
} | class ____(Base):
__tablename__ = "input_tags"
__table_args__ = (PrimaryKeyConstraint("input_uuid", "name", name="input_tags_pk"),)
input_uuid = Column(String(36), ForeignKey("inputs.input_uuid"), nullable=False)
"""
Input UUID: `String` (limit 36 characters). Defined as *Non-null* in schema.
*Foreign Key* into ``inputs`` table. Part of *Primary Key* for ``input_tags`` table.
"""
name = Column(String(255), nullable=False)
"""
Param name: `String` (limit 255 characters). Defined as *Non-null* in schema.
Part of *Primary Key* for ``input_tags`` table.
"""
value = Column(String(500), nullable=False)
"""
Param value: `String` (limit 500 characters). Defined as *Non-null* in schema.
Part of *Primary Key* for ``input_tags`` table.
"""
def __repr__(self):
return f"<SqlInputTag ({self.input_uuid}, {self.name}, {self.value})>"
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
mlflow.entities.InputTag: Description of the return value.
"""
return InputTag(key=self.name, value=self.value)
#######################################################################################
# Below are Tracing models. We may refactor them to be in a separate module in the future.
#######################################################################################
| SqlInputTag |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py | {
"start": 14176,
"end": 15410
} | class ____(MixtralForCausalLM):
def __init__(self, config):
PreTrainedModel.__init__(self, config)
self.model = Ernie4_5_MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.moe_num_experts
self.num_experts_per_tok = config.moe_k
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5_MoeForCausalLM",
"Ernie4_5_MoeModel",
"Ernie4_5_MoePreTrainedModel",
]
| Ernie4_5_MoeForCausalLM |
python | jmcnamara__XlsxWriter | xlsxwriter/test/workbook/test_workbook03.py | {
"start": 343,
"end": 1859
} | class ____(unittest.TestCase):
"""
Test assembling a complete Workbook file.
"""
def test_assemble_xml_file(self):
"""Test writing a workbook with user specified names."""
self.maxDiff = None
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
workbook.add_worksheet("Non Default Name")
workbook.add_worksheet("Another Name")
workbook._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<fileVersion appName="xl" lastEdited="4" lowestEdited="4" rupBuild="4505"/>
<workbookPr defaultThemeVersion="124226"/>
<bookViews>
<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>
</bookViews>
<sheets>
<sheet name="Non Default Name" sheetId="1" r:id="rId1"/>
<sheet name="Another Name" sheetId="2" r:id="rId2"/>
</sheets>
<calcPr calcId="124519" fullCalcOnLoad="1"/>
</workbook>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorkbook |
python | great-expectations__great_expectations | tests/expectations/test_expectation.py | {
"start": 10028,
"end": 17850
} | class ____:
"""Tests around the suite_parameter_options property of Expectations.
Note: evaluation_parameter_options is currently a sorted tuple, but doesn't necessarily have to be
""" # noqa: E501 # FIXME CoP
SUITE_PARAMETER_MIN = "my_min"
SUITE_PARAMETER_MAX = "my_max"
SUITE_PARAMETER_VALUE = "my_value"
SUITE_PARAMETER_MOSTLY = "my_mostly"
@pytest.mark.unit
def test_expectation_without_evaluation_parameter(self):
expectation = gxe.ExpectColumnValuesToBeBetween(column="foo", min_value=0, max_value=10)
assert expectation.suite_parameter_options == tuple()
@pytest.mark.unit
def test_expectation_with_evaluation_parameter(self):
expectation = gxe.ExpectColumnValuesToBeBetween(
column="foo",
min_value=0,
max_value={"$PARAMETER": self.SUITE_PARAMETER_MAX},
)
assert expectation.suite_parameter_options == (self.SUITE_PARAMETER_MAX,)
@pytest.mark.unit
def test_column_map_expectation_with_evaluation_parameter(self):
expectation = gxe.ExpectColumnValuesToBeNull(
column="foo", mostly={"$PARAMETER": self.SUITE_PARAMETER_MOSTLY}
)
assert expectation.suite_parameter_options == (self.SUITE_PARAMETER_MOSTLY,)
@pytest.mark.unit
def test_expectation_with_multiple_suite_parameters(self):
expectation = gxe.ExpectColumnValuesToBeBetween(
column="foo",
min_value={"$PARAMETER": self.SUITE_PARAMETER_MIN},
max_value={"$PARAMETER": self.SUITE_PARAMETER_MAX},
)
assert expectation.suite_parameter_options == (
self.SUITE_PARAMETER_MAX,
self.SUITE_PARAMETER_MIN,
)
@pytest.mark.unit
def test_expectation_with_duplicate_suite_parameters(self):
expectation = gxe.ExpectColumnValuesToBeBetween(
column="foo",
min_value={"$PARAMETER": self.SUITE_PARAMETER_VALUE},
max_value={"$PARAMETER": self.SUITE_PARAMETER_VALUE},
)
assert expectation.suite_parameter_options == (self.SUITE_PARAMETER_VALUE,)
@pytest.mark.unit
@pytest.mark.parametrize(
"column_a,column_b,expected",
[
pytest.param("foo", "foo", True, id="equivalent_columns"),
pytest.param("foo", "bar", False, id="different_columns"),
],
)
def test_expectation_equality(column_a: str, column_b: str, expected: bool):
expectation_a = gxe.ExpectColumnValuesToBeBetween(column=column_a, min_value=0, max_value=10)
expectation_b = gxe.ExpectColumnValuesToBeBetween(column=column_b, min_value=0, max_value=10)
assert (expectation_a == expectation_b) is expected
@pytest.mark.unit
@pytest.mark.parametrize(
"notes_a,notes_b,expected",
[
pytest.param(None, None, True, id="both_none"),
pytest.param([], None, True, id="both_falsy"),
pytest.param("my_notes", None, False, id="missing_notes"),
pytest.param("my_notes", "my_other_notes", False, id="different_notes"),
pytest.param("my_notes", "my_notes", True, id="equivalent_notes"),
],
)
def test_expectation_equality_with_notes(
notes_a: str | list[str] | None, notes_b: str | list[str] | None, expected: bool
):
expectation_a = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, max_value=10, notes=notes_a
)
expectation_b = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, max_value=10, notes=notes_b
)
assert (expectation_a == expectation_b) is expected
@pytest.mark.unit
@pytest.mark.parametrize(
"meta_a,meta_b,expected",
[
pytest.param(None, None, True, id="both_none"),
pytest.param({}, None, True, id="both_falsy"),
pytest.param({"author": "Bob Dylan"}, None, False, id="missing_meta"),
pytest.param(
{"author": "Bob Dylan"}, {"author": "John Lennon"}, False, id="different_meta"
),
pytest.param({"author": "Bob Dylan"}, {"author": "Bob Dylan"}, True, id="equivalent_meta"),
],
)
def test_expectation_equality_with_meta(meta_a: dict | None, meta_b: dict | None, expected: bool):
expectation_a = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, max_value=10, meta=meta_a
)
expectation_b = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, max_value=10, meta=meta_b
)
assert (expectation_a == expectation_b) is expected
@pytest.mark.unit
def test_expectation_equality_ignores_rendered_content():
column = "whatever"
min_value = 0
max_value = 10
expectation_a = gxe.ExpectColumnValuesToBeBetween(
column=column,
min_value=min_value,
max_value=max_value,
)
expectation_a.render()
assert expectation_a.rendered_content
expectation_b = gxe.ExpectColumnValuesToBeBetween(
column=column,
min_value=min_value,
max_value=max_value,
)
expectation_b.rendered_content = None
assert expectation_a == expectation_b
@pytest.mark.unit
def test_expectation_with_row_condition_generates_rendered_content():
condition = ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="active"
)
condition_2 = Column("age") > 18
group_condition = AndCondition(conditions=[condition, condition_2])
expectation = gxe.ExpectColumnValuesToBeBetween(
column="foo",
min_value=0,
max_value=10,
row_condition=group_condition,
)
expectation.render()
assert expectation.rendered_content is not None
assert expectation.row_condition == group_condition
@pytest.mark.unit
@pytest.mark.parametrize(
"expectation_a, expectation_b, expected_result",
[
pytest.param(
gxe.ExpectColumnValuesToBeBetween(column="foo", min_value=0),
{},
False,
id="different_objects",
),
pytest.param(
gxe.ExpectColumnDistinctValuesToBeInSet(column="bar", value_set=[1, 2, 3]),
gxe.ExpectColumnValuesToBeBetween(column="foo", min_value=0),
True,
id="different_expectation_types",
),
pytest.param(
gxe.ExpectColumnValuesToBeBetween(column="foo", min_value=0),
gxe.ExpectColumnValuesToBeBetween(column="foo", min_value=0),
False,
id="equivalent_expectations",
),
pytest.param(
gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, id="bbbe648e-0a43-431b-81a0-04e68f1473ae"
),
gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, id="aaae648e-0a43-431b-81a0-04e68f1473ae"
),
False,
id="equiv_expectations_with_ids",
),
],
)
def test_expectations___lt__(expectation_a, expectation_b, expected_result):
assert (expectation_a < expectation_b) is expected_result
@pytest.mark.unit
def test_expectation_sorting():
expectation_a = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, id="80b6d508-a843-426e-97c0-7ff64d35ac04"
)
expectation_b = gxe.ExpectColumnValuesToBeBetween(
column="foo", min_value=0, id="4cd1e63a-880b-46ea-93e8-c11636df18b8"
)
expectation_c = gxe.ExpectTableColumnCountToBeBetween()
expectation_d = gxe.ExpectColumnMaxToBeBetween(column="foo", min_value=0, max_value=10)
expectation_e = gxe.ExpectColumnMedianToBeBetween(column="foo", min_value=0, max_value=10)
expectations = [expectation_a, expectation_b, expectation_c, expectation_d, expectation_e]
assert sorted(expectations) == [
expectation_d,
expectation_e,
expectation_b,
expectation_a,
expectation_c,
]
| TestSuiteParameterOptions |
python | pytorch__pytorch | test/functorch/test_vmap.py | {
"start": 2662,
"end": 3007
} | class ____:
def __enter__(self):
self.prev_state = torch._C._debug_only_are_vmap_fallback_warnings_enabled()
torch._C._debug_only_display_vmap_fallback_warnings(True)
def __exit__(self, *ignored):
torch._C._debug_only_display_vmap_fallback_warnings(self.prev_state)
@markDynamoStrictTest
| EnableVmapFallbackWarnings |
python | ray-project__ray | python/ray/air/_internal/device_manager/cpu.py | {
"start": 164,
"end": 813
} | class ____(TorchDeviceManager):
"""CPU device manager"""
def is_available(self) -> bool():
return True
def get_devices(self) -> List[torch.device]:
"""Gets the correct torch device list configured for this process."""
return [torch.device("cpu")]
def supports_stream(self) -> bool:
"""Validate if the device type support create a stream"""
return False
def get_stream_context(self, stream):
"""Return empty context mananger for CPU."""
@contextmanager
def default_context_manager():
yield
return default_context_manager()
| CPUTorchDeviceManager |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/attrs/test_inference.py | {
"start": 3175,
"end": 3942
} | class ____:
_x: int = attr.ib()
@pytest.mark.parametrize("s", [st.just(42)])
def test_private_attribute(s):
check_can_generate_examples(st.builds(HasPrivateAttribute, x=s))
def test_private_attribute_underscore_fails():
with pytest.raises(TypeError, match="unexpected keyword argument '_x'"):
check_can_generate_examples(st.builds(HasPrivateAttribute, _x=st.just(42)))
def test_private_attribute_underscore_infer_fails():
# this has a slightly different failure case, because it goes through
# attrs-specific resolution logic.
with pytest.raises(
TypeError, match="Unexpected keyword argument _x for attrs class"
):
check_can_generate_examples(st.builds(HasPrivateAttribute, _x=...))
@attr.s
| HasPrivateAttribute |
python | PrefectHQ__prefect | tests/runtime/test_deployment.py | {
"start": 6136,
"end": 9591
} | class ____:
async def test_parameters_is_attribute(self):
assert "parameters" in dir(deployment)
async def test_parameters_is_empty_when_not_set(self):
assert deployment.parameters == {}
async def test_parameters_are_loaded_when_run_id_known(
self, deployment_id, monkeypatch, prefect_client
):
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert deployment.parameters == {}
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(flow_run.id))
assert deployment.parameters == {"foo": "bar"} # see fixture at top of file
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id, parameters={"foo": 42}
)
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(flow_run.id))
assert deployment.parameters == {"foo": 42}
async def test_parameters_accessible_in_nested_flows(
self, deployment_id, prefect_client
):
"""Test that deployment.parameters is accessible in nested flows (issue #19329)."""
seen_params = {}
@flow
async def nested_flow():
seen_params["nested"] = deployment.parameters
return deployment.parameters
@flow
async def parent_flow():
seen_params["parent"] = deployment.parameters
result = await nested_flow()
return {"parent": deployment.parameters, "nested": result}
# Create flow run from deployment with custom parameters
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id, parameters={"foo": "from_deployment"}
)
# Execute the flow using the flow engine helper
await run_flow_async(parent_flow, flow_run=flow_run)
# Both parent and nested should see the deployment parameters
assert seen_params["parent"] == {"foo": "from_deployment"}
assert seen_params["nested"] == {"foo": "from_deployment"}
async def test_parameters_accessible_in_deeply_nested_flows(
self, deployment_id, prefect_client
):
"""Test that deployment.parameters propagates through multiple nesting levels."""
# Track what we saw at each level
seen_params = {}
@flow
async def doubly_nested_flow():
seen_params["doubly_nested"] = deployment.parameters
return deployment.parameters
@flow
async def nested_flow():
seen_params["nested"] = deployment.parameters
result = await doubly_nested_flow()
return result
@flow
async def parent_flow():
seen_params["parent"] = deployment.parameters
result = await nested_flow()
return result
# Create flow run from deployment with custom parameters
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id, parameters={"foo": "from_deployment", "level": "deep"}
)
# Execute the flow using the flow engine helper
await run_flow_async(parent_flow, flow_run=flow_run)
# All levels should see the deployment parameters
expected = {"foo": "from_deployment", "level": "deep"}
assert seen_params["parent"] == expected
assert seen_params["nested"] == expected
assert seen_params["doubly_nested"] == expected
| TestParameters |
python | spyder-ide__spyder | external-deps/python-lsp-server/test/plugins/test_autoimport.py | {
"start": 5819,
"end": 11841
} | class ____:
def test_dot(self) -> None:
assert not should_insert("""str.""", 4)
def test_dot_partial(self) -> None:
assert not should_insert("""str.metho\n""", 9)
def test_comment(self) -> None:
assert not should_insert("""#""", 1)
def test_comment_indent(self) -> None:
assert not should_insert(""" # """, 5)
def test_from(self) -> None:
assert not should_insert("""from """, 5)
assert should_insert("""from """, 4)
def test_sort_sources() -> None:
result1 = _get_score(1, "import pathlib", "pathlib", "pathli")
result2 = _get_score(2, "import pathlib", "pathlib", "pathli")
assert result1 < result2
def test_sort_statements() -> None:
result1 = _get_score(
2, "from importlib_metadata import pathlib", "pathlib", "pathli"
)
result2 = _get_score(2, "import pathlib", "pathlib", "pathli")
assert result1 > result2
def test_sort_both() -> None:
result1 = _get_score(
3, "from importlib_metadata import pathlib", "pathlib", "pathli"
)
result2 = _get_score(2, "import pathlib", "pathlib", "pathli")
assert result1 > result2
def test_get_names() -> None:
source = """
from a import s as e
import blah, bleh
hello = "str"
a, b = 1, 2
def someone():
soemthing
class sfa:
sfiosifo
"""
results = get_names(jedi.Script(code=source))
assert results == {"blah", "bleh", "e", "hello", "someone", "sfa", "a", "b"}
# Tests ruff, flake8 and pyflakes messages
@pytest.mark.parametrize(
"message",
["Undefined name `os`", "F821 undefined name 'numpy'", "undefined name 'numpy'"],
)
def test_autoimport_code_actions_get_correct_module_name(
autoimport_workspace, message
) -> None:
source = "os.path.join('a', 'b')"
autoimport_workspace.put_document(DOC_URI, source=source)
doc = autoimport_workspace.get_document(DOC_URI)
diagnostic = {
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 2},
},
"message": message,
}
module_name = get_name_or_module(doc, diagnostic)
autoimport_workspace.rm_document(DOC_URI)
assert module_name == "os"
def make_context(module_name, line, character_start, character_end):
return {
"diagnostics": [
{
"message": f"undefined name '{module_name}'",
"range": {
"start": {"line": line, "character": character_start},
"end": {"line": line, "character": character_end},
},
}
]
}
def position(line, character):
return {"line": line, "character": character}
@pytest.mark.skipif(IS_WIN, reason="Flaky on Windows")
def test_autoimport_code_actions_and_completions_for_notebook_document(
client_server_pair,
) -> None:
client, server = client_server_pair
send_initialize_request(
client,
{
"pylsp": {
"plugins": {
"rope_autoimport": {
"memory": True,
"enabled": True,
"completions": {"enabled": True},
},
}
}
},
)
with patch.object(server._endpoint, "notify") as mock_notify:
# Expectations:
# 1. We receive an autoimport suggestion for "os" in the first cell because
# os is imported after that.
# 2. We don't receive an autoimport suggestion for "os" in the second cell because it's
# already imported in the second cell.
# 3. We don't receive an autoimport suggestion for "os" in the third cell because it's
# already imported in the second cell.
# 4. We receive an autoimport suggestion for "sys" because it's not already imported.
# 5. If diagnostics doesn't contain "undefined name ...", we send empty quick fix suggestions.
send_notebook_did_open(client, ["os", "import os\nos", "os", "sys"])
wait_for_condition(lambda: mock_notify.call_count >= 4)
# We received diagnostics messages for every cell
assert all(
"textDocument/publishDiagnostics" in c.args
for c in mock_notify.call_args_list
)
rope_autoimport_settings = server.workspace._config.plugin_settings(
"rope_autoimport"
)
assert rope_autoimport_settings.get("completions", {}).get("enabled", False) is True
assert rope_autoimport_settings.get("memory", False) is True
wait_for_condition(lambda: not cache.is_blocked())
# 1.
quick_fixes = server.code_actions("cell_1_uri", {}, make_context("os", 0, 0, 2))
assert any(s for s in quick_fixes if contains_autoimport_quickfix(s, "os"))
completions = server.completions("cell_1_uri", position(0, 2)).get("items")
assert any(s for s in completions if contains_autoimport_completion(s, "os"))
# 2.
# We don't test code actions here as in this case, there would be no code actions sent bc
# there wouldn't be a diagnostics message.
completions = server.completions("cell_2_uri", position(1, 2)).get("items")
assert not any(s for s in completions if contains_autoimport_completion(s, "os"))
# 3.
# Same as in 2.
completions = server.completions("cell_3_uri", position(0, 2)).get("items")
assert not any(s for s in completions if contains_autoimport_completion(s, "os"))
# 4.
quick_fixes = server.code_actions("cell_4_uri", {}, make_context("sys", 0, 0, 3))
assert any(s for s in quick_fixes if contains_autoimport_quickfix(s, "sys"))
completions = server.completions("cell_4_uri", position(0, 3)).get("items")
assert any(s for s in completions if contains_autoimport_completion(s, "sys"))
# 5.
context = {"diagnostics": [{"message": "A random message"}]}
quick_fixes = server.code_actions("cell_4_uri", {}, context)
assert len(quick_fixes) == 0
| TestShouldInsert |
python | apache__airflow | airflow-core/src/airflow/timetables/simple.py | {
"start": 5203,
"end": 6979
} | class ____(_TrivialTimetable):
"""
Timetable that never schedules anything.
This should not be directly used anywhere, but only set if a DAG is triggered by assets.
:meta private:
"""
description: str = "Triggered by assets"
def __init__(self, assets: BaseAsset) -> None:
super().__init__()
self.asset_condition = assets
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_asset_condition
return cls(decode_asset_condition(data["asset_condition"]))
@property
def summary(self) -> str:
return "Asset"
def serialize(self) -> dict[str, Any]:
from airflow.serialization.serialized_objects import encode_asset_condition
return {"asset_condition": encode_asset_condition(self.asset_condition)}
def generate_run_id(
self,
*,
run_type: DagRunType,
data_interval: DataInterval | None,
run_after: DateTime,
**extra,
) -> str:
"""
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param data_interval: the data interval
:param run_after: the date before which dag run won't start.
"""
from airflow.models.dagrun import DagRun
logical_date = data_interval.start if data_interval is not None else run_after
return DagRun.generate_run_id(run_type=run_type, logical_date=logical_date, run_after=run_after)
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
return None
| AssetTriggeredTimetable |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_getitem_test.py | {
"start": 1329,
"end": 4305
} | class ____:
"""Helper to construct arguments for __getitem__.
Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.
"""
def __getitem__(self, slice_spec):
return slice_spec
SLICE_BUILDER = _SliceBuilder()
def _make_tensor_slice_spec(slice_spec, use_constant=True):
"""Wraps all integers in an extended slice spec w/ a tensor.
This function is used to help test slicing when the slice spec contains
tensors, rather than integers.
Args:
slice_spec: The extended slice spec.
use_constant: If true, then wrap each integer with a tf.constant. If false,
then wrap each integer with a tf.placeholder.
Returns:
A copy of slice_spec, but with each integer i replaced with tf.constant(i).
"""
def make_piece_scalar(piece):
if isinstance(piece, int):
scalar = constant_op.constant(piece)
if use_constant:
return scalar
else:
return array_ops.placeholder_with_default(scalar, [])
elif isinstance(piece, slice):
return slice(
make_piece_scalar(piece.start), make_piece_scalar(piece.stop),
make_piece_scalar(piece.step))
else:
return piece
if isinstance(slice_spec, tuple):
return tuple(make_piece_scalar(piece) for piece in slice_spec)
else:
return make_piece_scalar(slice_spec)
# Example 2D ragged tensor value with one ragged dimension and with scalar
# values, expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],
[b'g']]
EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]
EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# Example 4D ragged tensor value, with two ragged dimensions and with values
# whose shape is [2], expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_4D = [
[ # rt[0]
[[1, 2], [3, 4], [5, 6]], # rt[0][0]
[[7, 8], [9, 10], [11, 12]]], # rt[0][1]
[], # rt[1]
[ # rt[2]
[[13, 14], [15, 16], [17, 18]]], # rt[2][0]
[ # rt[3]
[[19, 20]]] # rt[3][0]
] # pyformat: disable
EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]
EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]
EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18],
[19, 20]]
# Example 3D ragged tensor with uniform_row_lengths.
EXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3
EXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]
EXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
@test_util.run_all_in_graph_and_eager_modes
| _SliceBuilder |
python | scrapy__scrapy | scrapy/http/request/form.py | {
"start": 1077,
"end": 8857
} | class ____(Request):
valid_form_methods = ["GET", "POST"]
def __init__(
self, *args: Any, formdata: FormdataType = None, **kwargs: Any
) -> None:
if formdata and kwargs.get("method") is None:
kwargs["method"] = "POST"
super().__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
form_query_str = _urlencode(items, self.encoding)
if self.method == "POST":
self.headers.setdefault(
b"Content-Type", b"application/x-www-form-urlencoded"
)
self._set_body(form_query_str)
else:
self._set_url(
urlunsplit(urlsplit(self.url)._replace(query=form_query_str))
)
@classmethod
def from_response(
cls,
response: TextResponse,
formname: str | None = None,
formid: str | None = None,
formnumber: int = 0,
formdata: FormdataType = None,
clickdata: dict[str, str | int] | None = None,
dont_click: bool = False,
formxpath: str | None = None,
formcss: str | None = None,
**kwargs: Any,
) -> Self:
kwargs.setdefault("encoding", response.encoding)
if formcss is not None:
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata)
url = _get_form_url(form, kwargs.pop("url", None))
method = kwargs.pop("method", form.method)
if method is not None:
method = method.upper()
if method not in cls.valid_form_methods:
method = "GET"
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form: FormElement, url: str | None) -> str:
assert form.base_url is not None # typing
if url is None:
action = form.get("action")
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq: Iterable[FormdataKVType], enc: str) -> str:
values = [
(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (cast("Iterable[str]", vs) if is_listlike(vs) else [cast("str", vs)])
]
return urlencode(values, doseq=True)
def _get_form(
response: TextResponse,
formname: str | None,
formid: str | None,
formnumber: int,
formxpath: str | None,
) -> FormElement:
"""Find the wanted form element within the given response."""
root = response.selector.root
forms = root.xpath("//form")
if not forms:
raise ValueError(f"No <form> element found in {response}")
if formname is not None:
f = root.xpath(f'//form[@name="{formname}"]')
if f:
return cast("FormElement", f[0])
if formid is not None:
f = root.xpath(f'//form[@id="{formid}"]')
if f:
return cast("FormElement", f[0])
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == "form":
return cast("FormElement", el)
el = el.getparent()
if el is None:
break
raise ValueError(f"No <form> element found with {formxpath}")
# If we get here, it means that either formname was None or invalid
try:
form = forms[formnumber]
except IndexError:
raise IndexError(f"Form number {formnumber} not found in {response}")
return cast("FormElement", form)
def _get_inputs(
form: FormElement,
formdata: FormdataType,
dont_click: bool,
clickdata: dict[str, str | int] | None,
) -> list[FormdataKVType]:
"""Return a list of key-value pairs for the inputs found in the given form."""
try:
formdata_keys = dict(formdata or ()).keys()
except (ValueError, TypeError):
raise ValueError("formdata should be a dict or iterable of tuples")
if not formdata:
formdata = []
inputs = form.xpath(
"descendant::textarea"
"|descendant::select"
"|descendant::input[not(@type) or @type["
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
" and (../@checked or"
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
values: list[FormdataKVType] = [
(k, "" if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata_keys
]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and clickable[0] is not None:
values.append(clickable)
formdata_items = formdata.items() if isinstance(formdata, dict) else formdata
values.extend((k, v) for k, v in formdata_items if v is not None)
return values
def _value(
ele: InputElement | SelectElement | TextareaElement,
) -> tuple[str | None, str | MultipleSelectOptions | None]:
n = ele.name
v = ele.value
if ele.tag == "select":
return _select_value(cast("SelectElement", ele), n, v)
return n, v
def _select_value(
ele: SelectElement, n: str | None, v: str | MultipleSelectOptions | None
) -> tuple[str | None, str | MultipleSelectOptions | None]:
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags without options
o = ele.value_options
return (n, o[0]) if o else (None, None)
return n, v
def _get_clickable(
clickdata: dict[str, str | int] | None, form: FormElement
) -> tuple[str, str] | None:
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = list(
form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
)
if not clickables:
return None
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get("name"), el.get("value") or "")
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get("nr", None)
if nr is not None:
assert isinstance(nr, int)
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (cast("str", el.get("name")), el.get("value") or "")
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = ".//*" + "".join(f'[@{k}="{v}"]' for k, v in clickdata.items())
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get("name"), el[0].get("value") or "")
if len(el) > 1:
raise ValueError(
f"Multiple elements found ({el!r}) matching the "
f"criteria in clickdata: {clickdata!r}"
)
raise ValueError(f"No clickable element matching clickdata: {clickdata!r}")
| FormRequest |
python | PyCQA__pylint | tests/functional/i/init_not_called.py | {
"start": 307,
"end": 395
} | class ____:
"""ancestor 2"""
def __init__(self):
print("init", self)
| BBBB |
python | django__django | tests/fixtures_regress/models.py | {
"start": 5721,
"end": 5926
} | class ____(models.Model):
name = models.CharField(max_length=255, unique=True)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ["fixtures_regress.circle4"]
| Circle6 |
python | sympy__sympy | sympy/physics/quantum/pauli.py | {
"start": 580,
"end": 1044
} | class ____(Operator):
"""Pauli sigma operator, base class"""
@property
def name(self):
return self.args[0]
@property
def use_name(self):
return bool(self.args[0]) is not False
@classmethod
def default_args(self):
return (False,)
def __new__(cls, *args, **hints):
return Operator.__new__(cls, *args, **hints)
def _eval_commutator_BosonOp(self, other, **hints):
return S.Zero
| SigmaOpBase |
python | spack__spack | lib/spack/spack/util/file_cache.py | {
"start": 6976,
"end": 7015
} | class ____(SpackError):
pass
| CacheError |
python | walkccc__LeetCode | solutions/165. Compare Version Numbers/165.py | {
"start": 0,
"end": 416
} | class ____:
def compareVersion(self, version1: str, version2: str) -> int:
levels1 = version1.split('.')
levels2 = version2.split('.')
length = max(len(levels1), len(levels2))
for i in range(length):
v1 = int(levels1[i]) if i < len(levels1) else 0
v2 = int(levels2[i]) if i < len(levels2) else 0
if v1 < v2:
return -1
if v1 > v2:
return 1
return 0
| Solution |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 13118,
"end": 13190
} | class ____(TypedDict):
source: Literal["url"]
| TransactionNameRuleScope |
python | getsentry__sentry | src/sentry/integrations/types.py | {
"start": 3564,
"end": 3790
} | class ____(Enum):
STARTED = "STARTED"
HALTED = "HALTED"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
def __str__(self) -> str:
return self.value.lower()
T = TypeVar("T")
@dataclass
| EventLifecycleOutcome |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/utils/kernel_handler.py | {
"start": 3406,
"end": 21939
} | class ____(QObject):
"""
A class to handle the kernel in several ways and store kernel connection
information.
"""
sig_stdout = Signal(str)
"""
A stdout message was received on the process stdout.
"""
sig_stderr = Signal(str)
"""
A stderr message was received on the process stderr.
"""
sig_fault = Signal(str)
"""
A fault message was received.
"""
sig_kernel_is_ready = Signal()
"""
The kernel is ready.
"""
sig_kernel_connection_error = Signal()
"""
The kernel raised an error while connecting.
"""
_shutdown_thread_list = []
"""List of running shutdown threads"""
_shutdown_thread_list_lock = Lock()
"""
Lock to add threads to _shutdown_thread_list or clear that list.
"""
def __init__(
self,
kernel_client,
connection_file=None,
kernel_manager=None,
known_spyder_kernel=False,
hostname=None,
sshkey=None,
password=None,
ssh_connection=None,
websocket_url=None,
token=None,
aiohttp_session=None,
):
super().__init__()
# Connection Informations
self.connection_file = connection_file
self.kernel_manager = kernel_manager
self.kernel_client = kernel_client
self.known_spyder_kernel = known_spyder_kernel
self.hostname = hostname
self.sshkey = sshkey
self.password = password
self.ssh_connection = ssh_connection
self.websocket_url = websocket_url
self.token = token
self.aiohttp_session = None
self.kernel_error_message = None
self.connection_state = KernelConnectionState.Connecting
# Comm
self.kernel_comm = KernelComm()
self.kernel_comm.sig_comm_ready.connect(self.handle_comm_ready)
# Internal
self._shutdown_lock = Lock()
self._stdout_thread = None
self._stderr_thread = None
self._fault_args = None
self._init_stderr = ""
self._init_stdout = ""
self._shellwidget_connected = False
self._comm_ready_received = False
# Start kernel
self.kernel_client.sig_spyder_kernel_info.connect(
self.check_spyder_kernel_info
)
self.connect_std_pipes()
self.kernel_client.start_channels()
# Open comm and wait for comm ready reply.
# It only works for spyder-kernels, but this is the majority of cases.
# For ipykernels, this does nothing.
self.kernel_comm.open_comm(self.kernel_client)
@property
def is_websocket_client(self):
"""Return the websocket client."""
return isinstance(self.kernel_client, SpyderWSKernelClient)
def connect_(self):
"""Connect to shellwidget."""
self._shellwidget_connected = True
# Emit signal in case the connection is already made
if self.connection_state in [
KernelConnectionState.IpykernelReady,
KernelConnectionState.SpyderKernelReady
]:
# This is necessary for systems in which the kernel takes too much
# time to start because in that case its heartbeat is not detected
# as beating at this point.
# Fixes spyder-ide/spyder#22179
self.kernel_client.hb_channel._beating = True
self.sig_kernel_is_ready.emit()
elif self.connection_state == KernelConnectionState.Error:
self.sig_kernel_connection_error.emit()
# Show initial io
if self._init_stderr:
self.sig_stderr.emit(self._init_stderr)
self._init_stderr = None
if self._init_stdout:
self.sig_stdout.emit(self._init_stdout)
self._init_stdout = None
def check_spyder_kernel_info(self, spyder_kernel_info):
"""
Check if the Spyder-kernels version is the right one after receiving it
from the kernel.
If the kernel is non-locally managed, check if it is a spyder-kernel.
"""
if not spyder_kernel_info:
if self.known_spyder_kernel:
# spyder-kernels version < 3.0
self.kernel_error_message = (
ERROR_SPYDER_KERNEL_VERSION_OLD.format(
SPYDER_KERNELS_MIN_VERSION,
SPYDER_KERNELS_MAX_VERSION,
SPYDER_KERNELS_CONDA,
SPYDER_KERNELS_PIP,
)
)
self.connection_state = KernelConnectionState.Error
self.known_spyder_kernel = False
self.sig_kernel_connection_error.emit()
return
self.connection_state = KernelConnectionState.IpykernelReady
self.sig_kernel_is_ready.emit()
return
version, pyexec = spyder_kernel_info
if not check_version_range(version, SPYDER_KERNELS_VERSION):
# Development versions are acceptable
if "dev0" not in version:
self.kernel_error_message = ERROR_SPYDER_KERNEL_VERSION.format(
pyexec,
version,
SPYDER_KERNELS_MIN_VERSION,
SPYDER_KERNELS_MAX_VERSION,
SPYDER_KERNELS_CONDA,
SPYDER_KERNELS_PIP,
)
self.known_spyder_kernel = False
self.connection_state = KernelConnectionState.Error
self.sig_kernel_connection_error.emit()
return
self.known_spyder_kernel = True
self.connection_state = KernelConnectionState.SpyderKernelWaitComm
if self._comm_ready_received:
self.handle_comm_ready()
def handle_comm_ready(self):
"""The kernel comm is ready"""
self._comm_ready_received = True
if self.connection_state in [
KernelConnectionState.SpyderKernelWaitComm,
KernelConnectionState.Crashed,
]:
# This is necessary for systems in which the kernel takes too much
# time to start because in that case its heartbeat is not detected
# as beating at this point.
# Fixes spyder-ide/spyder#22179
self.kernel_client.hb_channel._beating = True
self.connection_state = KernelConnectionState.SpyderKernelReady
self.sig_kernel_is_ready.emit()
def connect_std_pipes(self):
"""Connect to std pipes."""
self.close_std_threads()
# Connect new threads
if self.kernel_manager is None:
return
stdout = self.kernel_manager.provisioner.process.stdout
stderr = self.kernel_manager.provisioner.process.stderr
if stdout:
self._stdout_thread = StdThread(self, stdout)
self._stdout_thread.sig_out.connect(self.handle_stdout)
self._stdout_thread.start()
if stderr:
self._stderr_thread = StdThread(self, stderr)
self._stderr_thread.sig_out.connect(self.handle_stderr)
self._stderr_thread.start()
def disconnect_std_pipes(self):
"""Disconnect old std pipes."""
if self._stdout_thread and not self._stdout_thread._closing:
self._stdout_thread.sig_out.disconnect(self.handle_stdout)
self._stdout_thread._closing = True
if self._stderr_thread and not self._stderr_thread._closing:
self._stderr_thread.sig_out.disconnect(self.handle_stderr)
self._stderr_thread._closing = True
def close_std_threads(self):
"""Close std threads."""
if self._stdout_thread is not None:
self._stdout_thread.wait()
self._stdout_thread = None
if self._stderr_thread is not None:
self._stderr_thread.wait()
self._stderr_thread = None
@Slot(str)
def handle_stderr(self, err):
"""Handle stderr"""
if self._shellwidget_connected:
self.sig_stderr.emit(err)
else:
self._init_stderr += err
@Slot(str)
def handle_stdout(self, out):
"""Handle stdout"""
if self._shellwidget_connected:
self.sig_stdout.emit(out)
else:
self._init_stdout += out
@staticmethod
def new_connection_file():
"""
Generate a new connection file
Taken from jupyter_client/console_app.py
Licensed under the BSD license
"""
# Check if jupyter_runtime_dir exists (Spyder addition)
if not osp.isdir(jupyter_runtime_dir()):
try:
os.makedirs(jupyter_runtime_dir())
except (IOError, OSError):
return None
cf = ""
while not cf:
ident = str(uuid.uuid4()).split("-")[-1]
cf = os.path.join(jupyter_runtime_dir(), "kernel-%s.json" % ident)
cf = cf if not os.path.exists(cf) else ""
return cf
@classmethod
def new_from_spec(cls, kernel_spec):
"""
Create a new kernel.
Might raise all kinds of exceptions
"""
connection_file = cls.new_connection_file()
if connection_file is None:
raise SpyderKernelError(
PERMISSION_ERROR_MSG.format(jupyter_runtime_dir())
)
# Kernel manager
kernel_manager = SpyderKernelManager(
connection_file=connection_file,
config=None,
autorestart=True,
)
kernel_manager._kernel_spec = kernel_spec
try:
kernel_manager.start_kernel(
stderr=PIPE,
stdout=PIPE,
env=kernel_spec.env,
)
except PermissionError:
# Show a nice error message when jupyter_runtime_dir is not
# writable.
# Fixes spyder-ide/spyder#23124
raise SpyderKernelError(
PERMISSION_ERROR_MSG.format(jupyter_runtime_dir())
)
# Kernel client
kernel_client = kernel_manager.client()
# Increase time (in seconds) to detect if a kernel is alive.
# See spyder-ide/spyder#3444.
kernel_client.hb_channel.time_to_dead = 25.0
return cls(
connection_file=connection_file,
kernel_manager=kernel_manager,
kernel_client=kernel_client,
known_spyder_kernel=True,
)
@classmethod
def from_connection_info(
cls,
connection_info,
hostname=None,
sshkey=None,
password=None,
ssh_connection=None,
):
"""Create kernel for given connection info."""
new_connection_file = cls.new_connection_file()
with open(new_connection_file, "w") as f:
json.dump(connection_info, f)
return cls(
connection_file=new_connection_file,
kernel_client=cls.init_kernel_client(
new_connection_file,
hostname,
sshkey,
password,
ssh_connection,
),
)
@classmethod
def from_connection_file(
cls,
connection_file,
hostname=None,
sshkey=None,
password=None,
ssh_connection=None,
):
"""Create kernel for given connection file."""
return cls(
connection_file=connection_file,
hostname=hostname,
sshkey=sshkey,
password=password,
kernel_client=cls.init_kernel_client(
connection_file,
hostname,
sshkey,
password,
ssh_connection,
),
)
@classmethod
def from_websocket(
cls,
websocket_url,
token=None,
aiohttp_session=None,
):
return cls(
websocket_url=websocket_url,
token=token,
aiohttp_session=aiohttp_session,
kernel_client=cls.init_ws_kernel_client(
websocket_url,
token=token,
aiohttp_session=aiohttp_session,
),
)
@staticmethod
def init_kernel_client(
connection_file,
hostname,
sshkey,
password,
ssh_connection,
):
"""Create kernel client."""
kernel_client = SpyderKernelClient(connection_file=connection_file)
# This is needed for issue spyder-ide/spyder#9304.
try:
kernel_client.load_connection_file()
except Exception as e:
raise SpyderKernelError(
_(
"An error occurred while trying to load "
"the kernel connection file. The error "
"was:\n\n"
)
+ f"<tt>{str(e)}</tt>"
)
if hostname is not None or ssh_connection is not None:
kernel_client.tunnel_to_kernel(
hostname=hostname,
sshkey=sshkey,
password=password,
ssh_connection=ssh_connection,
)
return kernel_client
@staticmethod
def init_ws_kernel_client(
websocket_url,
token=None,
username=None,
aiohttp_session=None,
):
"""Create kernel client."""
return SpyderWSKernelClient(
endpoint=websocket_url,
token=token,
username=username,
aiohttp_session=aiohttp_session,
)
def close(self, shutdown_kernel=True, now=False):
"""Close kernel"""
self.close_comm()
if shutdown_kernel and self.kernel_manager is not None:
km = self.kernel_manager
km.stop_restarter()
self.disconnect_std_pipes()
# This is probably necessary due to a weird interaction between
# `conda run --no-capture-output` and pytest capturing output
# facilities.
if now or running_under_pytest():
km.shutdown_kernel(now=True)
self.after_shutdown()
else:
shutdown_thread = QThread(None)
shutdown_thread.run = self._thread_shutdown_kernel
shutdown_thread.finished.connect(self.after_shutdown)
shutdown_thread.start()
with self._shutdown_thread_list_lock:
self._shutdown_thread_list.append(shutdown_thread)
if (
self.kernel_client is not None
and self.kernel_client.channels_running
):
self.kernel_client.stop_channels()
def after_shutdown(self):
"""Cleanup after shutdown"""
self.close_std_threads()
self.kernel_comm.remove(only_closing=True)
def _thread_shutdown_kernel(self):
"""Shutdown kernel."""
with self._shutdown_lock:
# Avoid calling shutdown_kernel on the same manager twice
# from different threads to avoid crash.
if self.kernel_manager.shutting_down:
return
self.kernel_manager.shutting_down = True
try:
self.kernel_manager.shutdown_kernel()
except Exception:
# kernel was externally killed
pass
@classmethod
def wait_all_shutdown_threads(cls):
"""Wait shutdown thread."""
with cls._shutdown_thread_list_lock:
for thread in cls._shutdown_thread_list:
if thread.isRunning():
try:
thread.kernel_manager._kill_kernel()
except Exception:
pass
thread.quit()
thread.wait()
cls._shutdown_thread_list = []
def copy(self):
"""Copy kernel."""
# Copy kernel infos
# Get new kernel_client
if self.is_websocket_client:
kernel_client = self.init_ws_kernel_client(
self.websocket_url,
token=self.token,
aiohttp_session=self.aiohttp_session,
)
else:
kernel_client = self.init_kernel_client(
self.connection_file,
self.hostname,
self.sshkey,
self.password,
self.ssh_connection,
)
return self.__class__(
connection_file=self.connection_file,
kernel_manager=self.kernel_manager,
known_spyder_kernel=self.known_spyder_kernel,
hostname=self.hostname,
sshkey=self.sshkey,
password=self.password,
ssh_connection=self.ssh_connection,
websocket_url=self.websocket_url,
token=self.token,
aiohttp_session=self.aiohttp_session,
kernel_client=kernel_client,
)
def faulthandler_setup(self, args):
"""Setup faulthandler"""
self._fault_args = args
def poll_fault_text(self):
"""Get a fault from a previous session."""
if self._fault_args is None:
return
self.kernel_comm.remote_call(
callback=self.emit_fault_text
).get_fault_text(*self._fault_args)
self._fault_args = None
def emit_fault_text(self, fault):
"""Emit fault text"""
if not fault:
return
self.sig_fault.emit(fault)
def fault_filename(self):
"""Get fault filename."""
if not self._fault_args:
return
return self._fault_args[0]
def close_comm(self):
"""Close comm"""
self.connection_state = KernelConnectionState.Closed
self.kernel_comm.close()
def reopen_comm(self):
"""Reopen comm (following a crash)"""
self.kernel_comm.remove()
self.connection_state = KernelConnectionState.Crashed
self.kernel_comm.open_comm(self.kernel_client)
def reconnect_kernel(self):
"""Kernel restarted successfully, so reconnect to it."""
self.reopen_comm()
self.disconnect_std_pipes()
self.connect_std_pipes()
def set_time_to_dead(self, time_to_dead):
"""Set time to detect if the kernel is dead in seconds."""
if self.kernel_client is not None:
self.kernel_client.hb_channel.time_to_dead = time_to_dead
| KernelHandler |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 32861,
"end": 33769
} | class ____(Bar):
pass
"""
)
assert module_2.Foo.model_fields['f'].annotation is int
@pytest.mark.xfail(
reason='We should keep a reference to the parent frame, not `f_locals`. '
"It's probably only reasonable to support this in Python 3.14 with PEP 649."
)
def test_can_resolve_forward_refs_in_parent_frame_after_class_definition():
def func():
class Model(BaseModel):
a: 'A'
class A(BaseModel):
pass
return Model
Model = func()
Model.model_rebuild()
def test_uses_correct_global_ns_for_type_defined_in_separate_module(create_module):
@create_module
def module_1():
from dataclasses import dataclass
@dataclass
class Bar:
f: 'A'
A = int
module_2 = create_module(
f"""
from pydantic import BaseModel
from {module_1.__name__} import Bar
A = str
| Foo |
python | django-haystack__django-haystack | test_haystack/test_query.py | {
"start": 14741,
"end": 34489
} | class ____(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.uuidmmsi = SimpleMockUUIDModelIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_len(self):
self.assertEqual(len(self.msqs), 23)
def test_repr(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertRegex(
repr(self.msqs),
r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object"
r" at 0x[0-9A-Fa-f]+>, using=None>$",
)
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
msqs = self.msqs.all()
results = [int(res.pk) for res in iter(msqs)]
self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
self.assertEqual(len(connections["default"].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(
[int(res.pk) for res in results[1:11]],
[res.pk for res in MOCK_SEARCH_RESULTS[1:11]],
)
self.assertEqual(len(connections["default"].queries), 1)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
self.assertEqual(len(connections["default"].queries), 1)
def test_manual_iter(self):
results = self.msqs.all()
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
check = [result.pk for result in results._manual_iter()]
self.assertEqual(
check,
[
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
],
)
self.assertEqual(len(connections["default"].queries), 3)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
# This will hang indefinitely if broken.
# CharPK testing
old_ui = self.ui
self.ui.build(indexes=[self.cpkmmsi])
connections["default"]._index = self.ui
self.cpkmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(loaded, ["sometext", "1234"])
self.assertEqual(len(connections["default"].queries), 1)
# UUID testing
self.ui.build(indexes=[self.uuidmmsi])
connections["default"]._index = self.ui
self.uuidmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(
loaded,
[
"53554c58-7051-4350-bcc9-dad75eb248a9",
"77554c58-7051-4350-bcc9-dad75eb24888",
],
)
connections["default"]._index = old_ui
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertEqual(self.msqs._cache_is_full(), False)
results = self.msqs.all()
fire_the_iterator_and_fill_cache = list(results)
self.assertEqual(23, len(fire_the_iterator_and_fill_cache))
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections["default"].queries), 4)
def test_all(self):
sqs = self.msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
def test_filter(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.msqs.exclude(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_order_by(self):
sqs = self.msqs.order_by("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue("foo" in sqs.query.order_by)
def test_models(self):
# Stow.
old_unified_index = connections["default"]._index
ui = UnifiedIndex()
bmmsi = BasicMockModelSearchIndex()
bammsi = BasicAnotherMockModelSearchIndex()
ui.build(indexes=[bmmsi, bammsi])
connections["default"]._index = ui
msqs = SearchQuerySet()
sqs = msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 0)
sqs = msqs.models(MockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
sqs = msqs.models(MockModel, AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 2)
# This will produce a warning.
ui.build(indexes=[bmmsi])
sqs = msqs.models(AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
def test_result_class(self):
sqs = self.msqs.all()
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
sqs = self.msqs.result_class(IttyBittyResult)
self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
# Reset to default.
sqs = self.msqs.result_class(None)
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
def test_boost(self):
sqs = self.msqs.boost("foo", 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_highlight(self):
sqs = self.msqs.highlight()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.highlight, True)
def test_spelling_override(self):
sqs = self.msqs.filter(content="not the spellchecking query")
self.assertEqual(sqs.query.spelling_query, None)
sqs = self.msqs.set_spelling_query("override")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.spelling_query, "override")
def test_spelling_suggestions(self):
# Test the case where spelling support is disabled.
sqs = self.msqs.filter(content="Indx")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion("indexy"), None)
def test_raw_search(self):
self.assertEqual(len(self.msqs.raw_search("foo")), 23)
self.assertEqual(
len(
self.msqs.raw_search("(content__exact:hello AND content__exact:world)")
),
23,
)
def test_load_all(self):
# Models with character primary keys.
sqs = SearchQuerySet()
sqs.query.backend = CharPKMockSearchBackend("charpk")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Models with uuid primary keys.
sqs = SearchQuerySet()
sqs.query.backend = UUIDMockSearchBackend("uuid")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# If nothing is handled, you get nothing.
old_ui = connections["default"]._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections["default"]._index = ui
sqs = self.msqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs), 0)
connections["default"]._index = old_ui
# For full tests, see the solr_backend.
def test_load_all_read_queryset(self):
# Stow.
old_ui = connections["default"]._index
ui = UnifiedIndex()
gafmmsi = GhettoAFifthMockModelSearchIndex()
ui.build(indexes=[gafmmsi])
connections["default"]._index = ui
gafmmsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# The deleted result isn't returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 1
)
# Register a SearchIndex with a read_queryset that returns deleted items
rqstsi = TextReadQuerySetTestSearchIndex()
ui.build(indexes=[rqstsi])
rqstsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# Both the deleted and not deleted items are returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Restore.
connections["default"]._index = old_ui
def test_auto_query(self):
sqs = self.msqs.auto_query("test search -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test search -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search -stuff>',
)
sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>',
)
sqs = self.msqs.auto_query("test - stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>"
)
# Ensure bits in exact matches get escaped properly as well.
sqs = self.msqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">'
)
# Now with a different fieldname
sqs = self.msqs.auto_query("test search -stuff", fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>"
)
sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND title__content=test "my thing" search -stuff>',
)
def test_count(self):
self.assertEqual(self.msqs.count(), 23)
def test_facet_counts(self):
self.assertEqual(self.msqs.facet_counts(), {})
def test_best_match(self):
self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))
def test_latest(self):
self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult))
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.msqs.more_like_this(mock)), 23)
def test_facets(self):
sqs = self.msqs.facet("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
sqs2 = self.msqs.facet("foo").facet("bar")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.facets), 2)
def test_date_facets(self):
try:
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="smarblaph",
)
self.fail()
except FacetingError as e:
self.assertEqual(
str(e),
"The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.",
)
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
sqs2 = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
).date_facet(
"bar",
start_date=datetime.date(2007, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="year",
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.date_facets), 2)
def test_query_facets(self):
sqs = self.msqs.query_facet("foo", "[bar TO *]")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet(
"bar", "[100 TO 499]"
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.query_facets), 2)
# Test multiple query facets on a single field
sqs3 = (
self.msqs.query_facet("foo", "[bar TO *]")
.query_facet("bar", "[100 TO 499]")
.query_facet("foo", "[1000 TO 1499]")
)
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.query_facets), 3)
def test_stats(self):
sqs = self.msqs.stats_facet("foo", "bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.stats), 1)
sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.stats), 1)
sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz")
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.stats), 2)
def test_narrow(self):
sqs = self.msqs.narrow("foo:moof")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
def test_clone(self):
results = self.msqs.filter(foo="bar", foo__lt="10")
clone = results._clone()
self.assertTrue(isinstance(clone, SearchQuerySet))
self.assertEqual(str(clone.query), str(results.query))
self.assertEqual(clone._result_cache, [])
self.assertEqual(clone._result_count, None)
self.assertEqual(clone._cache_full, False)
self.assertEqual(clone._using, results._using)
def test_using(self):
sqs = SearchQuerySet(using="default")
self.assertNotEqual(sqs.query, None)
self.assertEqual(sqs.query._using, "default")
def test_chaining(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
# A second instance should inherit none of the changes from above.
sqs = self.msqs.filter(content="bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_none(self):
sqs = self.msqs.none()
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test___and__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test___or__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test_and_or(self):
"""
Combining AND queries with OR should give
AND(OR(a, b), OR(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter(content="oof")
sqs2 = self.msqs.filter(content="bar").filter(content="rab")
sqs = sqs1 | sqs2
self.assertEqual(sqs.query.query_filter.connector, "OR")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
def test_or_and(self):
"""
Combining OR queries with AND should give
OR(AND(a, b), AND(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter_or(content="oof")
sqs2 = self.msqs.filter(content="bar").filter_or(content="rab")
sqs = sqs1 & sqs2
self.assertEqual(sqs.query.query_filter.connector, "AND")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
| SearchQuerySetTestCase |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 22385,
"end": 22730
} | class ____(_base.BaseValue):
"""ParamSpec.kwargs."""
def __init__(self, paramspec: ParamSpec, ctx: "context.Context") -> None:
super().__init__(f"{paramspec.name}.kwargs", ctx)
self.paramspec = paramspec
def instantiate(self, node: "cfg.CFGNode", container=None) -> "cfg.Variable":
return self.to_variable(node)
| ParamSpecKwargs |
python | apache__airflow | providers/salesforce/src/airflow/providers/salesforce/operators/bulk.py | {
"start": 1199,
"end": 4946
} | class ____(BaseOperator):
"""
Execute a Salesforce Bulk API and pushes results to xcom.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SalesforceBulkOperator`
:param operation: Bulk operation to be performed
Available operations are in ['insert', 'update', 'upsert', 'delete', 'hard_delete']
:param object_name: The name of the Salesforce object
:param payload: list of dict to be passed as a batch
:param external_id_field: unique identifier field for upsert operations
:param batch_size: number of records to assign for each batch in the job
:param use_serial: Process batches in serial mode
:param salesforce_conn_id: The :ref:`Salesforce Connection id <howto/connection:SalesforceHook>`.
"""
available_operations = ("insert", "update", "upsert", "delete", "hard_delete")
def __init__(
self,
*,
operation: Literal["insert", "update", "upsert", "delete", "hard_delete"],
object_name: str,
payload: list,
external_id_field: str = "Id",
batch_size: int = 10000,
use_serial: bool = False,
salesforce_conn_id: str = "salesforce_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation = operation
self.object_name = object_name
self.payload = payload
self.external_id_field = external_id_field
self.batch_size = batch_size
self.use_serial = use_serial
self.salesforce_conn_id = salesforce_conn_id
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.object_name:
raise ValueError("The required parameter 'object_name' cannot have an empty value.")
if self.operation not in self.available_operations:
raise ValueError(
f"Operation {self.operation!r} not found! "
f"Available operations are {self.available_operations}."
)
def execute(self, context: Context):
"""
Make an HTTP request to Salesforce Bulk API.
:param context: The task context during execution.
:return: API response if do_xcom_push is True
"""
sf_hook = SalesforceHook(salesforce_conn_id=self.salesforce_conn_id)
conn = sf_hook.get_conn()
bulk: SFBulkHandler = cast("SFBulkHandler", conn.__getattr__("bulk"))
result: Iterable = []
if self.operation == "insert":
result = bulk.__getattr__(self.object_name).insert(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "update":
result = bulk.__getattr__(self.object_name).update(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "upsert":
result = bulk.__getattr__(self.object_name).upsert(
data=self.payload,
external_id_field=self.external_id_field,
batch_size=self.batch_size,
use_serial=self.use_serial,
)
elif self.operation == "delete":
result = bulk.__getattr__(self.object_name).delete(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "hard_delete":
result = bulk.__getattr__(self.object_name).hard_delete(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
if self.do_xcom_push and result:
return result
return None
| SalesforceBulkOperator |
python | keras-team__keras | guides/making_new_layers_and_models_via_subclassing.py | {
"start": 8459,
"end": 9177
} | class ____(keras.layers.Layer):
def __init__(self):
super().__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert (
len(layer.losses) == 0
) # No losses yet since the layer has never been called
_ = layer(ops.zeros((1, 1)))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(ops.zeros((1, 1)))
assert len(layer.losses) == 1 # This is the loss created during the call above
"""
In addition, the `loss` property also contains regularization losses created
for the weights of any inner layer:
"""
| OuterLayer |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 1036,
"end": 1311
} | class ____(NameItemLoader):
default_input_processor = MapCompose(lambda v: v[:-1])
# test processors
def processor_with_args(value, other=None, loader_context=None):
if "key" in loader_context:
return loader_context["key"]
return value
| DefaultedItemLoader |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 36641,
"end": 37011
} | class ____(_TestBasicOps, __TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
super().setUp()
#------------------------------------------------------------------------------
| TestBasicOpsEmpty |
python | python__mypy | mypy/tvar_scope.py | {
"start": 549,
"end": 2097
} | class ____(TrivialSyntheticTypeTranslator):
"""Set namespace for all TypeVarLikeTypes types."""
def __init__(
self,
scope: TypeVarLikeScope,
fail_func: FailFunc,
source_tv: TypeVarLikeExpr,
context: Context,
) -> None:
self.scope = scope
self.fail_func = fail_func
self.source_tv = source_tv
self.context = context
super().__init__()
def visit_type_var(self, t: TypeVarType) -> Type:
existing = self.scope.get_binding(t.fullname)
if existing is None:
self._report_unbound_tvar(t)
return AnyType(TypeOfAny.from_error)
return existing
def visit_param_spec(self, t: ParamSpecType) -> Type:
existing = self.scope.get_binding(t.fullname)
if existing is None:
self._report_unbound_tvar(t)
return AnyType(TypeOfAny.from_error)
return existing
def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type:
existing = self.scope.get_binding(t.fullname)
if existing is None:
self._report_unbound_tvar(t)
return AnyType(TypeOfAny.from_error)
return existing
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t
def _report_unbound_tvar(self, tvar: TypeVarLikeType) -> None:
self.fail_func(
f"Type variable {tvar.name} referenced in the default"
f" of {self.source_tv.name} is unbound",
self.context,
)
| TypeVarLikeDefaultFixer |
python | astropy__astropy | astropy/visualization/mpl_normalize.py | {
"start": 893,
"end": 7608
} | class ____(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : ndarray, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float, optional
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in the
input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
Notes
-----
If ``vmin == vmax``, the input data will be mapped to 0.
"""
def __init__(
self,
data: np.ndarray | None = None,
interval: BaseInterval | None = None,
vmin: float | None = None,
vmax: float | None = None,
stretch: BaseStretch = LinearStretch(),
clip: bool = False,
invalid: float | None = -1.0,
):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if stretch is None:
raise ValueError("stretch must be input")
if not isinstance(stretch, BaseStretch):
raise TypeError("stretch must be an instance of a BaseStretch subclass")
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError("interval must be an instance of a BaseInterval subclass")
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
self.invalid = invalid
# Define vmin and vmax if not None and data was input
if data is not None:
self._set_limits(data)
def _set_limits(self, data):
if self.vmin is not None and self.vmax is not None:
return
# Define vmin and vmax from the interval class if not None
if self.interval is None:
if self.vmin is None:
self.vmin = np.min(data[np.isfinite(data)])
if self.vmax is None:
self.vmax = np.max(data[np.isfinite(data)])
else:
_vmin, _vmax = self.interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
# Override the matplotlib method
def autoscale_None(self, A):
"""
If vmin or vmax are not set, set them according to the interval.
If no interval is set, set them to the min/max of the data array.
"""
self._set_limits(A)
def __call__(self, values, clip=None, invalid=None):
"""
Transform values using this normalization.
Parameters
----------
values : array-like
The input values.
clip : bool, optional
If `True`, values outside the [0:1] range are clipped to the
[0:1] range. If `None` then the ``clip`` value from the
`ImageNormalize` instance is used (the default of which is
`False`).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value <
0). If `None`, then the `ImageNormalize` instance value is
used. This keyword has no effect if ``clip=True``.
"""
if clip is None:
clip = self.clip
if invalid is None:
invalid = self.invalid
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Define vmin and vmax if not None
self._set_limits(values)
if self.vmin == self.vmax:
values *= 0.0
elif self.vmin > self.vmax:
raise ValueError("vmin must be less than or equal to vmax")
else:
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if clip:
values = np.clip(values, 0.0, 1.0, out=values)
# Stretch values
if self.stretch._supports_invalid_kw:
values = self.stretch(values, out=values, clip=False, invalid=invalid)
else:
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values, invalid=None):
# Find unstretched values in range 0 to 1
if self.inverse_stretch._supports_invalid_kw:
values_norm = self.inverse_stretch(values, clip=False, invalid=invalid)
else:
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
| ImageNormalize |
python | pypa__warehouse | tests/unit/packaging/test_models.py | {
"start": 1415,
"end": 1544
} | class ____:
def test_repr(self, db_request):
role = DBRoleFactory()
assert isinstance(repr(role), str)
| TestRole |
python | kamyu104__LeetCode-Solutions | Python/get-equal-substrings-within-budget.py | {
"start": 29,
"end": 463
} | class ____(object):
def equalSubstring(self, s, t, maxCost):
"""
:type s: str
:type t: str
:type maxCost: int
:rtype: int
"""
left = 0
for right in xrange(len(s)):
maxCost -= abs(ord(s[right])-ord(t[right]))
if maxCost < 0:
maxCost += abs(ord(s[left])-ord(t[left]))
left += 1
return (right+1)-left
| Solution |
python | cookiecutter__cookiecutter | cookiecutter/extensions.py | {
"start": 2798,
"end": 3175
} | class ____(Extension):
"""Jinja2 Extension to generate uuid4 string."""
def __init__(self, environment: Environment) -> None:
"""Jinja2 Extension constructor."""
super().__init__(environment)
def uuid4() -> str:
"""Generate UUID4."""
return str(uuid.uuid4())
environment.globals.update(uuid4=uuid4)
| UUIDExtension |
python | sqlalchemy__sqlalchemy | test/ext/declarative/test_inheritance.py | {
"start": 1556,
"end": 24434
} | class ____(
RemoveORMEventsGlobally, DeclarativeTestBase, testing.AssertsCompiledSQL
):
def _roundtrip(
self,
Employee,
Manager,
Engineer,
Boss,
polymorphic=True,
explicit_type=False,
):
Base.metadata.create_all(testing.db)
sess = fixture_session()
e1 = Engineer(name="dilbert", primary_language="java")
e2 = Engineer(name="wally", primary_language="c++")
m1 = Manager(name="dogbert", golf_swing="fore!")
e3 = Engineer(name="vlad", primary_language="cobol")
b1 = Boss(name="pointy haired")
if polymorphic:
for obj in [e1, e2, m1, e3, b1]:
if explicit_type:
eq_(obj.type, obj.__mapper__.polymorphic_identity)
else:
assert_raises_message(
AttributeError,
"does not implement attribute .?'type' "
"at the instance level.",
getattr,
obj,
"type",
)
else:
assert "type" not in Engineer.__dict__
assert "type" not in Manager.__dict__
assert "type" not in Boss.__dict__
sess.add_all([e1, e2, m1, e3, b1])
sess.flush()
sess.expunge_all()
if polymorphic:
eq_(
sess.query(Employee).order_by(Employee.name).all(),
[
Engineer(name="dilbert"),
Manager(name="dogbert"),
Boss(name="pointy haired"),
Engineer(name="vlad"),
Engineer(name="wally"),
],
)
else:
eq_(
sess.query(Engineer).order_by(Engineer.name).all(),
[
Engineer(name="dilbert"),
Engineer(name="vlad"),
Engineer(name="wally"),
],
)
eq_(sess.query(Manager).all(), [Manager(name="dogbert")])
eq_(sess.query(Boss).all(), [Boss(name="pointy haired")])
e1 = sess.query(Engineer).order_by(Engineer.name).first()
sess.expire(e1)
eq_(e1.name, "dilbert")
def test_explicit(self):
engineers = Table(
"engineers",
Base.metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
Base.metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("golf_swing", String(50)),
)
boss = Table(
"boss",
Base.metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("golf_swing", String(50)),
)
punion = polymorphic_union(
{"engineer": engineers, "manager": managers, "boss": boss},
"type",
"punion",
)
class Employee(Base, ComparableEntity):
__table__ = punion
__mapper_args__ = {"polymorphic_on": punion.c.type}
class Engineer(Employee):
__table__ = engineers
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
class Manager(Employee):
__table__ = managers
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Boss(Manager):
__table__ = boss
__mapper_args__ = {
"polymorphic_identity": "boss",
"concrete": True,
}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_inline_non_polymorphic(self):
"""test the example from the declarative docs."""
class Employee(Base, ComparableEntity):
__tablename__ = "people"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
class Engineer(Employee):
__tablename__ = "engineers"
__mapper_args__ = {"concrete": True}
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
primary_language = Column(String(50))
name = Column(String(50))
class Manager(Employee):
__tablename__ = "manager"
__mapper_args__ = {"concrete": True}
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
golf_swing = Column(String(50))
name = Column(String(50))
class Boss(Manager):
__tablename__ = "boss"
__mapper_args__ = {"concrete": True}
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
golf_swing = Column(String(50))
name = Column(String(50))
self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False)
def test_abstract_concrete_base_didnt_configure(self):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
strict_attrs = True
assert_raises_message(
orm_exc.UnmappedClassError,
"Class test.ext.declarative.test_inheritance.Employee is a "
"subclass of AbstractConcreteBase and has a mapping pending "
"until all subclasses are defined. Call the "
r"sqlalchemy.orm.configure_mappers\(\) function after all "
"subclasses have been defined to complete the "
"mapping of this class.",
Session().query,
Employee,
)
Base.registry.configure()
# no subclasses yet.
assert_raises_message(
orm_exc.UnmappedClassError,
".*and has a mapping pending",
Session().query,
Employee,
)
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
# didn't call configure_mappers() again
assert_raises_message(
orm_exc.UnmappedClassError,
".*and has a mapping pending",
Session().query,
Employee,
)
Base.registry.configure()
self.assert_compile(
Session().query(Employee),
"SELECT pjoin.employee_id AS pjoin_employee_id, pjoin.type AS "
"pjoin_type, pjoin.name AS pjoin_name, "
"pjoin.golf_swing AS pjoin_golf_swing "
"FROM (SELECT manager.employee_id "
"AS employee_id, manager.name AS name, manager.golf_swing AS "
"golf_swing, 'manager' AS type FROM manager) AS pjoin",
)
def test_abstract_concrete_extension(self):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
name = Column(String(50))
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Boss(Manager):
__tablename__ = "boss"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "boss",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
self._roundtrip(Employee, Manager, Engineer, Boss)
with expect_raises_message(
sa_exc.InvalidRequestError,
r"Can't instantiate class for Mapper\[Employee\(pjoin\)\]; "
r"mapper is marked polymorphic_abstract=True",
):
Employee()
@testing.combinations(True, False)
def test_abstract_concrete_extension_descriptor_refresh(
self, use_strict_attrs
):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
strict_attrs = use_strict_attrs
@declared_attr
def name(cls):
return Column(String(50))
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
paperwork = Column(String(10))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
@property
def paperwork(self):
return "p"
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
Base.metadata.create_all(testing.db)
sess = fixture_session()
sess.add(Engineer(name="d"))
sess.commit()
if use_strict_attrs:
assert "paperwork" not in Engineer.__mapper__.class_manager
else:
assert "paperwork" in Engineer.__mapper__.class_manager
assert "paperwork" not in Engineer.__mapper__.attrs.keys()
# type currently does get mapped, as a
# ConcreteInheritedProperty, which means, "ignore this thing inherited
# from the concrete base". if we didn't specify concrete=True, then
# this one gets stuck in the error condition also.
assert "type" in Engineer.__mapper__.class_manager
assert "type" in Engineer.__mapper__.attrs.keys()
e1 = sess.query(Engineer).first()
eq_(e1.name, "d")
sess.expire(e1)
eq_(e1.name, "d")
def test_concrete_extension(self):
class Employee(ConcreteBase, Base, ComparableEntity):
__tablename__ = "employee"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Boss(Manager):
__tablename__ = "boss"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "boss",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_extension_warn_for_overlap(self):
class Employee(ConcreteBase, Base, ComparableEntity):
__tablename__ = "employee"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
with expect_raises_message(
sa_exc.InvalidRequestError,
"Polymorphic union can't use 'type' as the discriminator "
"column due to mapped column "
r"Column\('type', String\(length=50\), table=<manager>\); "
"please "
"apply the 'typecolname' argument; this is available on "
"ConcreteBase as '_concrete_discriminator_name'",
):
configure_mappers()
def test_concrete_extension_warn_concrete_disc_resolves_overlap(self):
class Employee(ConcreteBase, Base, ComparableEntity):
_concrete_discriminator_name = "_type"
__tablename__ = "employee"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
configure_mappers()
self.assert_compile(
select(Employee),
"SELECT pjoin.employee_id, pjoin.name, pjoin._type, pjoin.type "
"FROM (SELECT employee.employee_id AS employee_id, "
"employee.name AS name, CAST(NULL AS VARCHAR(50)) AS type, "
"'employee' AS _type FROM employee UNION ALL "
"SELECT manager.employee_id AS employee_id, "
"CAST(NULL AS VARCHAR(50)) AS name, manager.type AS type, "
"'manager' AS _type FROM manager) AS pjoin",
)
def test_abs_clean_dir(self):
"""test #8402"""
class Employee(AbstractConcreteBase, Base):
strict_attrs = True
name = Column(String(50))
class Manager(Employee):
__tablename__ = "manager"
id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
id = Column(Integer, primary_key=True)
name = Column(String(50))
engineer_info = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
configure_mappers()
eq_(
{n for n in dir(Employee) if not n.startswith("_")},
{"name", "strict_attrs", "registry", "id", "type", "metadata"},
)
eq_(
{n for n in dir(Manager) if not n.startswith("_")},
{
"type",
"strict_attrs",
"metadata",
"name",
"id",
"registry",
"manager_data",
},
)
eq_(
{n for n in dir(Engineer) if not n.startswith("_")},
{
"name",
"strict_attrs",
"registry",
"id",
"type",
"metadata",
"engineer_info",
},
)
def test_abs_concrete_extension_warn_for_overlap(self):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
with expect_raises_message(
sa_exc.InvalidRequestError,
"Polymorphic union can't use 'type' as the discriminator "
"column due to mapped column "
r"Column\('type', String\(length=50\), table=<manager>\); "
"please "
"apply the 'typecolname' argument; this is available on "
"ConcreteBase as '_concrete_discriminator_name'",
):
configure_mappers()
@testing.combinations(True, False)
def test_abs_concrete_extension_warn_concrete_disc_resolves_overlap(
self, use_strict_attrs
):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
strict_attrs = use_strict_attrs
_concrete_discriminator_name = "_type"
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
configure_mappers()
self.assert_compile(
select(Employee),
(
"SELECT pjoin.employee_id, pjoin.name, pjoin._type, "
"pjoin.type "
if use_strict_attrs
else "SELECT pjoin.employee_id, pjoin.type, pjoin.name, "
"pjoin._type "
)
+ "FROM (SELECT manager.employee_id AS employee_id, "
"manager.type AS type, manager.name AS name, 'manager' AS _type "
"FROM manager) AS pjoin",
)
def test_has_inherited_table_doesnt_consider_base(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
assert not has_inherited_table(A)
class B(A):
__tablename__ = "b"
id = Column(Integer, ForeignKey("a.id"), primary_key=True)
assert has_inherited_table(B)
def test_has_inherited_table_in_mapper_args(self):
class Test(Base):
__tablename__ = "test"
id = Column(Integer, primary_key=True)
type = Column(String(20))
@declared_attr
def __mapper_args__(cls):
if not has_inherited_table(cls):
ret = {
"polymorphic_identity": "default",
"polymorphic_on": cls.type,
}
else:
ret = {"polymorphic_identity": cls.__name__}
return ret
class PolyTest(Test):
__tablename__ = "poly_test"
id = Column(Integer, ForeignKey(Test.id), primary_key=True)
configure_mappers()
assert Test.__mapper__.polymorphic_on is Test.__table__.c.type
assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type
def test_ok_to_override_type_from_abstract(self):
class Employee(AbstractConcreteBase, Base, ComparableEntity):
name = Column(String(50))
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "manager"
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Boss(Manager):
__tablename__ = "boss"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "boss"
__mapper_args__ = {
"polymorphic_identity": "boss",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
employee_id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
primary_language = Column(String(40))
@property
def type(self):
return "engineer"
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True)
| ConcreteInhTest |
python | pytorch__pytorch | torch/testing/_internal/opinfo/refs.py | {
"start": 6861,
"end": 8039
} | class ____(BinaryUfuncInfo):
"""
An OpInfo for a Python reference of an elementwise binary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
assert isinstance(self.torch_opinfo, BinaryUfuncInfo)
inherited = self.torch_opinfo._original_binary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
| ElementwiseBinaryPythonRefInfo |
python | wandb__wandb | wandb/sdk/launch/runner/sagemaker_runner.py | {
"start": 683,
"end": 4221
} | class ____(AbstractRun):
"""Instance of ``AbstractRun`` corresponding to a subprocess launched to run an entry point command on aws sagemaker."""
def __init__(
self,
training_job_name: str,
client: "boto3.Client",
log_client: Optional["boto3.Client"] = None,
) -> None:
super().__init__()
self.client = client
self.log_client = log_client
self.training_job_name = training_job_name
self._status = Status("running")
@property
def id(self) -> str:
return f"sagemaker-{self.training_job_name}"
async def get_logs(self) -> Optional[str]:
if self.log_client is None:
return None
try:
describe_log_streams = event_loop_thread_exec(
self.log_client.describe_log_streams
)
describe_res = await describe_log_streams(
logGroupName="/aws/sagemaker/TrainingJobs",
logStreamNamePrefix=self.training_job_name,
)
if len(describe_res["logStreams"]) == 0:
wandb.termwarn(
f"Failed to get logs for training job: {self.training_job_name}"
)
return None
log_name = describe_res["logStreams"][0]["logStreamName"]
get_log_events = event_loop_thread_exec(self.log_client.get_log_events)
res = await get_log_events(
logGroupName="/aws/sagemaker/TrainingJobs",
logStreamName=log_name,
)
assert "events" in res
return "\n".join(
[f"{event['timestamp']}:{event['message']}" for event in res["events"]]
)
except self.log_client.exceptions.ResourceNotFoundException:
wandb.termwarn(
f"Failed to get logs for training job: {self.training_job_name}"
)
return None
except Exception as e:
wandb.termwarn(
f"Failed to handle logs for training job: {self.training_job_name} with error {str(e)}"
)
return None
async def wait(self) -> bool:
while True:
status_state = (await self.get_status()).state
wandb.termlog(
f"{LOG_PREFIX}Training job {self.training_job_name} status: {status_state}"
)
if status_state in ["stopped", "failed", "finished"]:
break
await asyncio.sleep(5)
return status_state == "finished"
async def cancel(self) -> None:
# Interrupt child process if it hasn't already exited
status = await self.get_status()
if status.state == "running":
self.client.stop_training_job(TrainingJobName=self.training_job_name)
await self.wait()
async def get_status(self) -> Status:
describe_training_job = event_loop_thread_exec(
self.client.describe_training_job
)
job_status = (
await describe_training_job(TrainingJobName=self.training_job_name)
)["TrainingJobStatus"]
if job_status == "Completed" or job_status == "Stopped":
self._status = Status("finished")
elif job_status == "Failed":
self._status = Status("failed")
elif job_status == "Stopping":
self._status = Status("stopping")
elif job_status == "InProgress":
self._status = Status("running")
return self._status
| SagemakerSubmittedRun |
python | numba__numba | numba/core/types/abstract.py | {
"start": 9101,
"end": 9391
} | class ____(Type):
"""
Base class for types usable as "dtype" arguments to various Numpy APIs
(e.g. np.empty()).
"""
@property
@abstractmethod
def dtype(self):
"""
The actual dtype denoted by this dtype spec (a Type instance).
"""
| DTypeSpec |
python | PrefectHQ__prefect | src/prefect/server/orchestration/rules.py | {
"start": 8593,
"end": 14965
} | class ____(
OrchestrationContext[orm_models.FlowRun, core.FlowRunPolicy]
):
"""
A container for a flow run state transition, governed by orchestration rules.
When a flow- run attempts to change state, Prefect REST API has an opportunity
to decide whether this transition can proceed. All the relevant information
associated with the state transition is stored in an `OrchestrationContext`,
which is subsequently governed by nested orchestration rules implemented using
the `BaseOrchestrationRule` ABC.
`FlowOrchestrationContext` introduces the concept of a state being `None` in the
context of an intended state transition. An initial state can be `None` if a run
is is attempting to set a state for the first time. The proposed state might be
`None` if a rule governing the transition determines that no state change
should occur at all and nothing is written to the database.
Attributes:
session: a SQLAlchemy database session
run: the flow run attempting to change state
initial_state: the initial state of the run
proposed_state: the proposed state the run is transitioning into
validated_state: a proposed state that has committed to the database
rule_signature: a record of rules that have fired on entry into a
managed context, currently only used for debugging purposes
finalization_signature: a record of rules that have fired on exit from a
managed context, currently only used for debugging purposes
response_status: a SetStateStatus object used to build the API response
response_details:a StateResponseDetails object use to build the API response
Args:
session: a SQLAlchemy database session
run: the flow run attempting to change state
initial_state: the initial state of a run
proposed_state: the proposed state a run is transitioning into
"""
run: orm_models.FlowRun
@inject_db
async def validate_proposed_state(
self,
db: PrefectDBInterface,
):
"""
Validates a proposed state by committing it to the database.
After the `FlowOrchestrationContext` is governed by orchestration rules, the
proposed state can be validated: the proposed state is added to the current
SQLAlchemy session and is flushed. `self.validated_state` set to the flushed
state. The state on the run is set to the validated state as well.
If the proposed state is `None` when this method is called, no state will be
written and `self.validated_state` will be set to the run's current state.
Returns:
None
"""
# (circular import)
from prefect.server.api.server import is_client_retryable_exception
try:
await self._validate_proposed_state()
return
except Exception as exc:
logger.exception("Encountered error during state validation")
self.proposed_state: states.State[Any] | None = None
if is_client_retryable_exception(exc):
# Do not capture retryable database exceptions, this exception will be
# raised as a 503 in the API layer
raise
reason = f"Error validating state: {exc!r}"
self.response_status = SetStateStatus.ABORT
self.response_details: StateResponseDetails = StateAbortDetails(
reason=reason
)
@db_injector
async def _validate_proposed_state(
self,
db: PrefectDBInterface,
):
if self.proposed_state is None:
validated_orm_state = self.run.state
state_data = None
if (
self.run.state is not None
and self.run.state.result_artifact_id is not None
):
# We cannot access `self.run.state.data` directly for unknown reasons
artifact = await artifacts.read_artifact(
self.session, self.run.state.result_artifact_id
)
state_data = artifact.data if artifact else None
else:
state_payload = self.proposed_state.model_dump_for_orm()
state_data: dict[str, Any] | Any | None = state_payload.pop("data", None)
if state_data is not None and not (
isinstance(state_data, dict) and state_data.get("type") == "unpersisted"
):
state_result_artifact = core.Artifact.from_result(state_data)
state_result_artifact.flow_run_id = self.run.id
await artifacts.create_artifact(self.session, state_result_artifact)
state_payload["result_artifact_id"] = state_result_artifact.id
validated_orm_state = db.FlowRunState(
flow_run_id=self.run.id,
**state_payload,
)
if validated_orm_state:
self.session.add(validated_orm_state)
self.run.set_state(validated_orm_state)
await self.session.flush()
self.validated_state = states.State.from_orm_without_result(
validated_orm_state, with_data=state_data
)
else:
self.validated_state = None
def safe_copy(self) -> Self:
"""
Creates a mostly-mutation-safe copy for use in orchestration rules.
Orchestration rules govern state transitions using information stored in
an `OrchestrationContext`. However, mutating objects stored on the context
directly can have unintended side-effects. To guard against this,
`self.safe_copy` can be used to pass information to orchestration rules
without risking mutation.
Note:
`self.run` is an ORM model, and even when copied is unsafe to mutate
Returns:
A mutation-safe copy of `FlowOrchestrationContext`
"""
return super().safe_copy()
@property
def run_settings(self) -> core.FlowRunPolicy:
"""Run-level settings used to orchestrate the state transition."""
return self.run.empirical_policy
async def task_run(self) -> None:
return None
async def flow_run(self) -> orm_models.FlowRun:
return self.run
| FlowOrchestrationContext |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/llvm_client/package.py | {
"start": 219,
"end": 529
} | class ____(CMakePackage):
"""A client package that depends on llvm and needs C and C++ compilers."""
git = "https://github.com/mycpptutorial/helloworld-cmake"
depends_on("c", type="build")
depends_on("cxx", type="build")
version("develop", branch="master")
depends_on("llvm")
| LlvmClient |
python | getsentry__sentry | src/sentry/auth/authenticators/sms.py | {
"start": 595,
"end": 865
} | class ____(Exception):
def __init__(self, phone_number: str, user_id: int | None, remote_ip: str | None) -> None:
super().__init__()
self.phone_number = phone_number
self.user_id = user_id
self.remote_ip = remote_ip
| SMSRateLimitExceeded |
python | wandb__wandb | wandb/integration/openai/resolver.py | {
"start": 654,
"end": 8164
} | class ____:
def __init__(self):
self.define_metrics_called = False
def __call__(
self,
args: Sequence[Any],
kwargs: Dict[str, Any],
response: Response,
start_time: float, # pass to comply with the protocol, but use response["created"] instead
time_elapsed: float,
) -> Optional[Dict[str, Any]]:
request = kwargs
if not self.define_metrics_called:
# define metrics on first call
for key in usage_metric_keys:
wandb.define_metric(key, step_metric="_timestamp")
self.define_metrics_called = True
try:
if response.get("object") == "edit":
return self._resolve_edit(request, response, time_elapsed)
elif response.get("object") == "text_completion":
return self._resolve_completion(request, response, time_elapsed)
elif response.get("object") == "chat.completion":
return self._resolve_chat_completion(request, response, time_elapsed)
else:
# todo: properly treat failed requests
logger.info(
f"Unsupported OpenAI response object: {response.get('object')}"
)
except Exception as e:
logger.warning(f"Failed to resolve request/response: {e}")
return None
@staticmethod
def results_to_trace_tree(
request: Dict[str, Any],
response: Response,
results: List[trace_tree.Result],
time_elapsed: float,
) -> trace_tree.WBTraceTree:
"""Converts the request, response, and results into a trace tree.
params:
request: The request dictionary
response: The response object
results: A list of results object
time_elapsed: The time elapsed in seconds
returns:
A wandb trace tree object.
"""
start_time_ms = int(round(response["created"] * 1000))
end_time_ms = start_time_ms + int(round(time_elapsed * 1000))
span = trace_tree.Span(
name=f"{response.get('model', 'openai')}_{response['object']}_{response.get('created')}",
attributes=dict(response), # type: ignore
start_time_ms=start_time_ms,
end_time_ms=end_time_ms,
span_kind=trace_tree.SpanKind.LLM,
results=results,
)
model_obj = {"request": request, "response": response, "_kind": "openai"}
return trace_tree.WBTraceTree(root_span=span, model_dict=model_obj)
def _resolve_edit(
self,
request: Dict[str, Any],
response: Response,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Edit`."""
request_str = (
f"\n\n**Instruction**: {request['instruction']}\n\n"
f"**Input**: {request['input']}\n"
)
choices = [
f"\n\n**Edited**: {choice['text']}\n" for choice in response["choices"]
]
return self._resolve_metrics(
request=request,
response=response,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
def _resolve_completion(
self,
request: Dict[str, Any],
response: Response,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
request_str = f"\n\n**Prompt**: {request['prompt']}\n"
choices = [
f"\n\n**Completion**: {choice['text']}\n" for choice in response["choices"]
]
return self._resolve_metrics(
request=request,
response=response,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
def _resolve_chat_completion(
self,
request: Dict[str, Any],
response: Response,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
prompt = io.StringIO()
for message in request["messages"]:
prompt.write(f"\n\n**{message['role']}**: {message['content']}\n")
request_str = prompt.getvalue()
choices = [
f"\n\n**{choice['message']['role']}**: {choice['message']['content']}\n"
for choice in response["choices"]
]
return self._resolve_metrics(
request=request,
response=response,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
def _resolve_metrics(
self,
request: Dict[str, Any],
response: Response,
request_str: str,
choices: List[str],
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
results = [
trace_tree.Result(
inputs={"request": request_str},
outputs={"response": choice},
)
for choice in choices
]
metrics = self._get_metrics_to_log(request, response, results, time_elapsed)
return self._convert_metrics_to_dict(metrics)
@staticmethod
def _get_usage_metrics(response: Response, time_elapsed: float) -> UsageMetrics:
"""Gets the usage stats from the response object."""
if response.get("usage"):
usage_stats = UsageMetrics(**response["usage"])
else:
usage_stats = UsageMetrics()
usage_stats.elapsed_time = time_elapsed
return usage_stats
def _get_metrics_to_log(
self,
request: Dict[str, Any],
response: Response,
results: List[Any],
time_elapsed: float,
) -> Metrics:
model = response.get("model") or request.get("model")
usage_metrics = self._get_usage_metrics(response, time_elapsed)
usage = []
for result in results:
row = {
"request": result.inputs["request"],
"response": result.outputs["response"],
"model": model,
"start_time": datetime.datetime.fromtimestamp(response["created"]),
"end_time": datetime.datetime.fromtimestamp(
response["created"] + time_elapsed
),
"request_id": response.get("id", None),
"api_type": response.get("api_type", "openai"),
"session_id": wandb.run.id,
}
row.update(asdict(usage_metrics))
usage.append(row)
usage_table = wandb.Table(
columns=list(usage[0].keys()),
data=[(item.values()) for item in usage],
)
trace = self.results_to_trace_tree(request, response, results, time_elapsed)
metrics = Metrics(stats=usage_table, trace=trace, usage=usage_metrics)
return metrics
@staticmethod
def _convert_metrics_to_dict(metrics: Metrics) -> Dict[str, Any]:
"""Converts metrics to a dict."""
metrics_dict = {
"stats": metrics.stats,
"trace": metrics.trace,
}
usage_stats = {f"usage/{k}": v for k, v in asdict(metrics.usage).items()}
metrics_dict.update(usage_stats)
return metrics_dict
| OpenAIRequestResponseResolver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.