language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/pipelines/test_pipelines_zero_shot_image_classification.py | {
"start": 1187,
"end": 9087
} | class ____(unittest.TestCase):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLIP would be there for now.
# model_mapping = {CLIPConfig: CLIPModel}
# def get_test_pipeline(self, model, tokenizer, processor):
# if tokenizer is None:
# # Side effect of no Fast Tokenizer class for these model, so skipping
# # But the slow tokenizer test should still run as they're quite small
# self.skipTest(reason="No tokenizer available")
# return
# # return None, None
# image_classifier = ZeroShotImageClassificationPipeline(
# model=model, tokenizer=tokenizer, feature_extractor=processor
# )
# # test with a raw waveform
# image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# return image_classifier, [image, image2]
# def run_pipeline_test(self, pipe, examples):
# image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# outputs = pipe(image, candidate_labels=["A", "B"])
# self.assertEqual(outputs, {"text": ANY(str)})
# # Batching
# outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"])
@require_torch
def test_small_model_pt(self, dtype="float32"):
image_classifier = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", dtype=dtype
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(output),
[
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
[{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}],
],
)
output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(output),
# Pipeline outputs are supposed to be deterministic and
# So we could in theory have real values "A", "B", "C" instead
# of ANY(str).
# However it seems that in this particular case, the floating
# scores are so close, we enter floating error approximation
# and the order is not guaranteed anymore with batching.
[
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
],
)
for single_output in output:
compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement)
@require_torch
def test_small_model_pt_fp16(self):
self.test_small_model_pt(dtype="float16")
@slow
@require_torch
def test_large_model_pt(self):
image_classifier = pipeline(
task="zero-shot-image-classification",
model="openai/clip-vit-base-patch32",
)
# This is an image of 2 cats with remotes and no planes
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(output),
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
)
output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5,
)
@slow
@require_torch
def test_siglip_model_pt(self):
image_classifier = pipeline(
task="zero-shot-image-classification",
model="google/siglip-base-patch16-224",
)
# This is an image of 2 cats with remotes and no planes
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"])
self.assertEqual(
nested_simplify(output),
[
{"score": 0.198, "label": "2 cats"},
{"score": 0.0, "label": "a remote"},
{"score": 0.0, "label": "a plane"},
],
)
output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.198, "label": "2 cats"},
{"score": 0.0, "label": "a remote"},
{"score": 0.0, "label": "a plane"},
]
]
* 5,
)
@slow
@require_torch
def test_blip2_model_pt(self):
image_classifier = pipeline(
task="zero-shot-image-classification",
model="Salesforce/blip2-itm-vit-g",
)
# This is an image of 2 cats with remotes and no planes
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(
image,
candidate_labels=["2 cats", "a plane", "a remote"],
tokenizer_kwargs={"return_token_type_ids": False},
)
self.assertEqual(
nested_simplify(output),
[
{"score": 0.369, "label": "2 cats"},
{"score": 0.333, "label": "a remote"},
{"score": 0.297, "label": "a plane"},
],
)
output = image_classifier(
[image] * 5,
candidate_labels=["2 cats", "a plane", "a remote"],
batch_size=2,
tokenizer_kwargs={"return_token_type_ids": False},
)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.369, "label": "2 cats"},
{"score": 0.333, "label": "a remote"},
{"score": 0.297, "label": "a plane"},
]
]
* 5,
)
| ZeroShotImageClassificationPipelineTests |
python | openai__openai-python | tests/api_resources/test_uploads.py | {
"start": 5593,
"end": 11275
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
upload = await async_client.uploads.create(
bytes=0,
filename="filename",
mime_type="mime_type",
purpose="assistants",
)
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
upload = await async_client.uploads.create(
bytes=0,
filename="filename",
mime_type="mime_type",
purpose="assistants",
expires_after={
"anchor": "created_at",
"seconds": 3600,
},
)
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.uploads.with_raw_response.create(
bytes=0,
filename="filename",
mime_type="mime_type",
purpose="assistants",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = response.parse()
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.uploads.with_streaming_response.create(
bytes=0,
filename="filename",
mime_type="mime_type",
purpose="assistants",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = await response.parse()
assert_matches_type(Upload, upload, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
upload = await async_client.uploads.cancel(
"upload_abc123",
)
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.uploads.with_raw_response.cancel(
"upload_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = response.parse()
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
async with async_client.uploads.with_streaming_response.cancel(
"upload_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = await response.parse()
assert_matches_type(Upload, upload, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
await async_client.uploads.with_raw_response.cancel(
"",
)
@parametrize
async def test_method_complete(self, async_client: AsyncOpenAI) -> None:
upload = await async_client.uploads.complete(
upload_id="upload_abc123",
part_ids=["string"],
)
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_method_complete_with_all_params(self, async_client: AsyncOpenAI) -> None:
upload = await async_client.uploads.complete(
upload_id="upload_abc123",
part_ids=["string"],
md5="md5",
)
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_raw_response_complete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.uploads.with_raw_response.complete(
upload_id="upload_abc123",
part_ids=["string"],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = response.parse()
assert_matches_type(Upload, upload, path=["response"])
@parametrize
async def test_streaming_response_complete(self, async_client: AsyncOpenAI) -> None:
async with async_client.uploads.with_streaming_response.complete(
upload_id="upload_abc123",
part_ids=["string"],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
upload = await response.parse()
assert_matches_type(Upload, upload, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_complete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
await async_client.uploads.with_raw_response.complete(
upload_id="",
part_ids=["string"],
)
| TestAsyncUploads |
python | bokeh__bokeh | src/bokeh/core/property/bases.py | {
"start": 19341,
"end": 20294
} | class ____(ParameterizedProperty[T]):
""" A base class for Container-like type properties.
"""
def _may_have_unstable_default(self) -> bool:
# all containers are mutable, so the default can be modified
return self._default is not Undefined
def validation_on() -> bool:
""" Check if property validation is currently active
Returns:
bool
"""
return Property._should_validate
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(SingleParameterizedProperty)
def _sphinx_type(obj: SingleParameterizedProperty[Any]):
return f"{property_link(obj)}({type_link(obj.type_param)})"
| ContainerProperty |
python | keras-team__keras | keras/src/optimizers/lamb_test.py | {
"start": 151,
"end": 2705
} | class ____(testing.TestCase):
def test_config(self):
optimizer = Lamb(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Lamb(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.3693, 0.6306, 1.6306, 2.6306], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Lamb(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Lamb()
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Lamb(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Lamb(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| LambTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_datacatalog.py | {
"start": 52340,
"end": 69208
} | class ____:
def setup_method(self):
with pytest.warns(AirflowProviderDeprecationWarning):
with mock.patch(
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudDataCatalogHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_entry_group(
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=deepcopy(TEST_TAG),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_protobuff(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=Tag(),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag_template(
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
read_mask=TEST_READ_MASK,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_list_tags(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.list_tags(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
page_size=TEST_PAGE_SIZE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_for_template_name(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
tag_1 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format("invalid-project"))
tag_2 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2))
mock_get_conn.return_value.list_tags.return_value = [tag_1, tag_2]
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_tag_for_template_name(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
template_name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_rename_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.rename_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_entry(
entry=TEST_ENTRY,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag(
tag=deepcopy(TEST_TAG),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag_id=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag_template(
tag_template=TEST_TAG_TEMPLATE,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag_template_field(
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
tag_template=TEST_TAG_TEMPLATE_ID,
location=TEST_LOCATION,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudDataCatalogMissingProjectIdHook |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_url_top_level/package.py | {
"start": 217,
"end": 2058
} | class ____(Package):
"""Mock package that top-level git and url attributes.
This demonstrates how Spack infers fetch mechanisms from parameters
to the ``version`` directive.
"""
homepage = "http://www.git-fetch-example.com"
git = "https://example.com/some/git/repo"
url = "https://example.com/some/tarball-1.0.tar.gz"
# These resolve to git fetchers
version("develop", branch="develop")
version("submodules", submodules=True)
version("3.4", commit="abc34")
version("3.3", branch="releases/v3.3", commit="abc33")
version("3.2", branch="releases/v3.2")
version("3.1", tag="v3.1", commit="abc31")
version("3.0", tag="v3.0")
# These resolve to URL fetchers
version(
"2.3",
sha256="0000000000000000000000000000000000000000000000000000000000000023",
url="https://www.example.com/foo2.3.tar.gz",
)
version(
"2.2",
sha256="0000000000000000000000000000000000000000000000000000000000000022",
url="https://www.example.com/foo2.2.tar.gz",
)
version("2.1", sha256="0000000000000000000000000000000000000000000000000000000000000021")
version("2.0", sha256="0000000000000000000000000000000000000000000000000000000000000020")
# These result in a FetcherConflict b/c we can't tell what to use
version(
"1.3",
sha256="f66bbef3ccb8b06542c57d69804c5b0aba72051f693c17761ad8525786d259fa",
commit="abc13",
)
version(
"1.2",
sha512="f66bbef3ccb8b06542c57d69804c5b0aba72051f693c17761ad8525786d259fa"
"9ed8f2e950a4fb8a4b936f33e689187784699357bc16e49f33dfcda8ab8b00e4",
branch="releases/v1.2",
)
version("1.1", md5="00000000000000000000000000000011", tag="v1.1")
version("1.0", md5="00000000000000000000000000000011", tag="abc123")
| GitUrlTopLevel |
python | Textualize__textual | src/textual/css/_help_text.py | {
"start": 893,
"end": 30032
} | class ____:
"""
Args:
inline: Information only relevant to users who are using inline styling.
css: Information only relevant to users who are using CSS.
"""
inline: Sequence[Bullet]
css: Sequence[Bullet]
def get_by_context(self, context: StylingContext) -> list[Bullet]:
"""Get the information associated with the given context
Args:
context: The context to retrieve info for.
"""
if context == "inline":
return list(self.inline)
else:
return list(self.css)
def _python_name(property_name: str) -> str:
"""Convert a CSS property name to the corresponding Python attribute name
Args:
property_name: The CSS property name
Returns:
The Python attribute name as found on the Styles object
"""
return property_name.replace("-", "_")
def _css_name(property_name: str) -> str:
"""Convert a Python style attribute name to the corresponding CSS property name
Args:
property_name: The Python property name
Returns:
The CSS property name
"""
return property_name.replace("_", "-")
def _contextualize_property_name(
property_name: str,
context: StylingContext,
) -> str:
"""Convert a property name to CSS or inline by replacing
'-' with '_' or vice-versa
Args:
property_name: The name of the property
context: The context the property is being used in.
Returns:
The property name converted to the given context.
"""
return _css_name(property_name) if context == "css" else _python_name(property_name)
def _spacing_examples(property_name: str) -> ContextSpecificBullets:
"""Returns examples for spacing properties"""
return ContextSpecificBullets(
inline=[
Bullet(
f"Set [i]{property_name}[/] to a tuple to assign spacing to each edge",
examples=[
Example(
f"widget.styles.{property_name} = (1, 2) [dim]# Vertical, horizontal"
),
Example(
f"widget.styles.{property_name} = (1, 2, 3, 4) [dim]# Top, right, bottom, left"
),
],
),
Bullet(
"Or to an integer to assign a single value to all edges",
examples=[Example(f"widget.styles.{property_name} = 2")],
),
],
css=[
Bullet(
"Supply 1, 2 or 4 integers separated by a space",
examples=[
Example(f"{property_name}: 1;"),
Example(f"{property_name}: 1 2; [dim]# Vertical, horizontal"),
Example(
f"{property_name}: 1 2 3 4; [dim]# Top, right, bottom, left"
),
],
),
],
)
def property_invalid_value_help_text(
property_name: str,
context: StylingContext,
*,
suggested_property_name: str | None = None,
) -> HelpText:
"""Help text to show when the user supplies an invalid value for CSS property
property.
Args:
property_name: The name of the property.
context: The context the spacing property is being used in.
Keyword Args:
suggested_property_name: A suggested name for the property (e.g. "width" for "wdth").
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
summary = f"Invalid CSS property {property_name!r}"
if suggested_property_name:
suggested_property_name = _contextualize_property_name(
suggested_property_name, context
)
summary += f". Did you mean '{suggested_property_name}'?"
return HelpText(summary)
def spacing_wrong_number_of_values_help_text(
property_name: str,
num_values_supplied: int,
context: StylingContext,
) -> HelpText:
"""Help text to show when the user supplies the wrong number of values
for a spacing property (e.g. padding or margin).
Args:
property_name: The name of the property.
num_values_supplied: The number of values the user supplied (a number other than 1, 2 or 4).
context: The context the spacing property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid number of values for the [i]{property_name}[/] property",
bullets=[
Bullet(
f"You supplied {num_values_supplied} values for the [i]{property_name}[/] property"
),
Bullet(
"Spacing properties like [i]margin[/] and [i]padding[/] require either 1, 2 or 4 integer values"
),
*_spacing_examples(property_name).get_by_context(context),
],
)
def spacing_invalid_value_help_text(
property_name: str,
context: StylingContext,
) -> HelpText:
"""Help text to show when the user supplies an invalid value for a spacing
property.
Args:
property_name: The name of the property.
context: The context the spacing property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for the [i]{property_name}[/] property",
bullets=_spacing_examples(property_name).get_by_context(context),
)
def scalar_help_text(
property_name: str,
context: StylingContext,
) -> HelpText:
"""Help text to show when the user supplies an invalid value for
a scalar property.
Args:
property_name: The name of the property.
num_values_supplied: The number of values the user supplied (a number other than 1, 2 or 4).
context: The context the scalar property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for the [i]{property_name}[/] property",
bullets=[
Bullet(
f"Scalar properties like [i]{property_name}[/] require numerical values and an optional unit"
),
Bullet(f"Valid units are {friendly_list(SYMBOL_UNIT)}"),
*ContextSpecificBullets(
inline=[
Bullet(
"Assign a string, int or Scalar object itself",
examples=[
Example(f'widget.styles.{property_name} = "50%"'),
Example(f"widget.styles.{property_name} = 10"),
Example(f"widget.styles.{property_name} = Scalar(...)"),
],
),
],
css=[
Bullet(
"Write the number followed by the unit",
examples=[
Example(f"{property_name}: 50%;"),
Example(f"{property_name}: 5;"),
],
),
],
).get_by_context(context),
],
)
def string_enum_help_text(
property_name: str,
valid_values: Iterable[str],
context: StylingContext,
) -> HelpText:
"""Help text to show when the user supplies an invalid value for a string
enum property.
Args:
property_name: The name of the property.
valid_values: A list of the values that are considered valid.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for the [i]{property_name}[/] property",
bullets=[
Bullet(
f"The [i]{property_name}[/] property can only be set to {friendly_list(valid_values)}"
),
*ContextSpecificBullets(
inline=[
Bullet(
"Assign any of the valid strings to the property",
examples=[
Example(f'widget.styles.{property_name} = "{valid_value}"')
for valid_value in sorted(valid_values)
],
)
],
css=[
Bullet(
"Assign any of the valid strings to the property",
examples=[
Example(f"{property_name}: {valid_value};")
for valid_value in sorted(valid_values)
],
)
],
).get_by_context(context),
],
)
def color_property_help_text(
property_name: str,
context: StylingContext,
*,
error: Exception | None = None,
value: str | None = None,
) -> HelpText:
"""Help text to show when the user supplies an invalid value for a color
property. For example, an unparsable color string.
Args:
property_name: The name of the property.
context: The context the property is being used in.
error: The error that caused this help text to be displayed.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
if value is None:
summary = f"Invalid value for the [i]{property_name}[/] property"
else:
summary = f"Invalid value ({value!r}) for the [i]{property_name}[/] property"
suggested_color = (
error.suggested_color if error and isinstance(error, ColorParseError) else None
)
if suggested_color:
summary += f". Did you mean '{suggested_color}'?"
return HelpText(
summary=summary,
bullets=[
Bullet(
f"The [i]{property_name}[/] property can only be set to a valid color"
),
Bullet("Colors can be specified using hex, RGB, or ANSI color names"),
*ContextSpecificBullets(
inline=[
Bullet(
"Assign colors using strings or Color objects",
examples=[
Example(f'widget.styles.{property_name} = "#ff00aa"'),
Example(
f'widget.styles.{property_name} = "rgb(12,231,45)"'
),
Example(f'widget.styles.{property_name} = "red"'),
Example(
f"widget.styles.{property_name} = Color(1, 5, 29, a=0.5)"
),
],
)
],
css=[
Bullet(
"Colors can be set as follows",
examples=[
Example(f"{property_name}: [#ff00aa]#ff00aa[/];"),
Example(f"{property_name}: rgb(12,231,45);"),
Example(f"{property_name}: [rgb(255,0,0)]red[/];"),
],
)
],
).get_by_context(context),
],
)
def border_property_help_text(property_name: str, context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value for a border
property (such as border, border-right, outline).
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for [i]{property_name}[/] property",
bullets=[
*ContextSpecificBullets(
inline=[
Bullet(
f"Set [i]{property_name}[/] using a tuple of the form (<bordertype>, <color>)",
examples=[
Example(
f'widget.styles.{property_name} = ("solid", "red")'
),
Example(
f'widget.styles.{property_name} = ("round", "#f0f0f0")'
),
Example(
f'widget.styles.{property_name} = [("dashed", "#f0f0f0"), ("solid", "blue")] [dim]# Vertical, horizontal'
),
],
),
Bullet(
f"Valid values for <bordertype> are:\n{friendly_list(VALID_BORDER)}"
),
Bullet(
"Colors can be specified using hex, RGB, or ANSI color names"
),
],
css=[
Bullet(
f"Set [i]{property_name}[/] using a value of the form [i]<bordertype> <color>[/]",
examples=[
Example(f"{property_name}: solid red;"),
Example(f"{property_name}: dashed #00ee22;"),
],
),
Bullet(
f"Valid values for <bordertype> are:\n{friendly_list(VALID_BORDER)}"
),
Bullet(
"Colors can be specified using hex, RGB, or ANSI color names"
),
],
).get_by_context(context),
],
)
def layout_property_help_text(property_name: str, context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value
for a layout property.
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for [i]{property_name}[/] property",
bullets=[
Bullet(
f"The [i]{property_name}[/] property expects a value of {friendly_list(VALID_LAYOUT)}"
),
],
)
def dock_property_help_text(property_name: str, context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value for dock.
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for [i]{property_name}[/] property",
bullets=[
Bullet(
"The value must be one of 'top', 'right', 'bottom', 'left' or 'none'"
),
*ContextSpecificBullets(
inline=[
Bullet(
"The 'dock' rule attaches a widget to the edge of a container.",
examples=[Example('header.styles.dock = "top"')],
)
],
css=[
Bullet(
"The 'dock' rule attaches a widget to the edge of a container.",
examples=[Example("dock: top")],
)
],
).get_by_context(context),
],
)
def split_property_help_text(property_name: str, context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value for split.
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for [i]{property_name}[/] property",
bullets=[
Bullet("The value must be one of 'top', 'right', 'bottom' or 'left'"),
*ContextSpecificBullets(
inline=[
Bullet(
"The 'split' splits the container and aligns the widget to the given edge.",
examples=[Example('header.styles.split = "top"')],
)
],
css=[
Bullet(
"The 'split' splits the container and aligns the widget to the given edge.",
examples=[Example("split: top")],
)
],
).get_by_context(context),
],
)
def fractional_property_help_text(
property_name: str, context: StylingContext
) -> HelpText:
"""Help text to show when the user supplies an invalid value for a fractional property.
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value for [i]{property_name}[/] property",
bullets=[
*ContextSpecificBullets(
inline=[
Bullet(
f"Set [i]{property_name}[/] to a string or float value",
examples=[
Example(f'widget.styles.{property_name} = "50%"'),
Example(f"widget.styles.{property_name} = 0.25"),
],
)
],
css=[
Bullet(
f"Set [i]{property_name}[/] to a string or float",
examples=[
Example(f"{property_name}: 50%;"),
Example(f"{property_name}: 0.25;"),
],
)
],
).get_by_context(context)
],
)
def offset_property_help_text(context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value for the offset property.
Args:
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary="Invalid value for [i]offset[/] property",
bullets=[
*ContextSpecificBullets(
inline=[
Bullet(
markup="The [i]offset[/] property expects a tuple of 2 values [i](<horizontal>, <vertical>)[/]",
examples=[
Example("widget.styles.offset = (2, '50%')"),
],
),
],
css=[
Bullet(
markup="The [i]offset[/] property expects a value of the form [i]<horizontal> <vertical>[/]",
examples=[
Example(
"offset: 2 3; [dim]# Horizontal offset of 2, vertical offset of 3"
),
Example(
"offset: 2 50%; [dim]# Horizontal offset of 2, vertical offset of 50%"
),
],
),
],
).get_by_context(context),
Bullet("<horizontal> and <vertical> can be a number or scalar value"),
],
)
def scrollbar_size_property_help_text(context: StylingContext) -> HelpText:
"""Help text to show when the user supplies an invalid value for the scrollbar-size property.
Args:
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary="Invalid value for [i]scrollbar-size[/] property",
bullets=[
*ContextSpecificBullets(
inline=[
Bullet(
markup="The [i]scrollbar_size[/] property expects a tuple of 2 values [i](<horizontal>, <vertical>)[/]",
examples=[
Example("widget.styles.scrollbar_size = (2, 1)"),
],
),
],
css=[
Bullet(
markup="The [i]scrollbar-size[/] property expects a value of the form [i]<horizontal> <vertical>[/]",
examples=[
Example(
"scrollbar-size: 2 3; [dim]# Horizontal size of 2, vertical size of 3"
),
],
),
],
).get_by_context(context),
Bullet("<horizontal> and <vertical> must be non-negative integers."),
],
)
def scrollbar_size_single_axis_help_text(property_name: str) -> HelpText:
"""Help text to show when the user supplies an invalid value for a scrollbar-size-* property.
Args:
property_name: The name of the property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary=f"Invalid value for [i]{property_name}[/]",
bullets=[
Bullet(
markup=f"The [i]{property_name}[/] property can only be set to a positive integer, greater than zero",
examples=[
Example(f"{property_name}: 2;"),
],
),
],
)
def integer_help_text(property_name: str) -> HelpText:
"""Help text to show when the user supplies an invalid integer value.
Args:
property_name: The name of the property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary=f"Invalid value for [i]{property_name}[/]",
bullets=[
Bullet(
markup="An integer value is expected here",
examples=[
Example(f"{property_name}: 2;"),
],
),
],
)
def align_help_text() -> HelpText:
"""Help text to show when the user supplies an invalid value for a `align`.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary="Invalid value for [i]align[/] property",
bullets=[
Bullet(
markup="The [i]align[/] property expects exactly 2 values",
examples=[
Example("align: <horizontal> <vertical>"),
Example(
"align: center middle; [dim]# Center vertically & horizontally within parent"
),
Example(
"align: left middle; [dim]# Align on the middle left of the parent"
),
],
),
Bullet(
f"Valid values for <horizontal> are {friendly_list(VALID_ALIGN_HORIZONTAL)}"
),
Bullet(
f"Valid values for <vertical> are {friendly_list(VALID_ALIGN_VERTICAL)}",
),
],
)
def keyline_help_text() -> HelpText:
"""Help text to show when the user supplies an invalid value for a `keyline`.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary="Invalid value for [i]keyline[/] property",
bullets=[
Bullet(
markup="The [i]keyline[/] property expects exactly 2 values",
examples=[
Example("keyline: <type> <color>"),
],
),
Bullet(f"Valid values for <type> are {friendly_list(VALID_KEYLINE)}"),
],
)
def text_align_help_text() -> HelpText:
"""Help text to show when the user supplies an invalid value for the text-align property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary="Invalid value for the [i]text-align[/] property.",
bullets=[
Bullet(
f"The [i]text-align[/] property must be one of {friendly_list(VALID_TEXT_ALIGN)}",
examples=[
Example("text-align: center;"),
Example("text-align: right;"),
],
)
],
)
def offset_single_axis_help_text(property_name: str) -> HelpText:
"""Help text to show when the user supplies an invalid value for an offset-* property.
Args:
property_name: The name of the property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary=f"Invalid value for [i]{property_name}[/]",
bullets=[
Bullet(
markup=f"The [i]{property_name}[/] property can be set to a number or scalar value",
examples=[
Example(f"{property_name}: 10;"),
Example(f"{property_name}: 50%;"),
],
),
Bullet(f"Valid scalar units are {friendly_list(SYMBOL_UNIT)}"),
],
)
def position_help_text(property_name: str) -> HelpText:
"""Help text to show when the user supplies the wrong value for position.
Args:
property_name: The name of the property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary=f"Invalid value for [i]{property_name}[/]",
bullets=[
Bullet(f"Valid values are {friendly_list(VALID_POSITION)}"),
],
)
def expand_help_text(property_name: str) -> HelpText:
"""Help text to show when the user supplies the wrong value for expand.
Args:
property_name: The name of the property.
Returns:
Renderable for displaying the help text for this property.
"""
return HelpText(
summary=f"Invalid value for [i]{property_name}[/]",
bullets=[
Bullet(f"Valid values are {friendly_list(VALID_EXPAND)}"),
],
)
def style_flags_property_help_text(
property_name: str, value: str, context: StylingContext
) -> HelpText:
"""Help text to show when the user supplies an invalid value for a style flags property.
Args:
property_name: The name of the property.
context: The context the property is being used in.
Returns:
Renderable for displaying the help text for this property.
"""
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value '{value}' in [i]{property_name}[/] property",
bullets=[
Bullet(
f"Style flag values such as [i]{property_name}[/] expect space-separated values"
),
Bullet(f"Permitted values are {friendly_list(VALID_STYLE_FLAGS)}"),
Bullet("The value 'none' cannot be mixed with others"),
*ContextSpecificBullets(
inline=[
Bullet(
markup="Supply a string or Style object",
examples=[
Example(
f'widget.styles.{property_name} = "bold italic underline"'
)
],
),
],
css=[
Bullet(
markup="Supply style flags separated by spaces",
examples=[Example(f"{property_name}: bold italic underline;")],
)
],
).get_by_context(context),
],
)
def table_rows_or_columns_help_text(
property_name: str, value: str, context: StylingContext
):
property_name = _contextualize_property_name(property_name, context)
return HelpText(
summary=f"Invalid value '{value}' in [i]{property_name}[/] property"
)
| ContextSpecificBullets |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_typing.py | {
"start": 1341,
"end": 1576
} | class ____(Protocol):
@property
def __class__(self, /) -> type[complex]: ...
@__class__.setter
def __class__(self, value: type[complex], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
#
| JustComplex |
python | scikit-learn__scikit-learn | sklearn/tests/test_base.py | {
"start": 15717,
"end": 17068
} | class ____(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle():
iris = datasets.load_iris()
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert b"_sklearn_version" not in tree_pickle_noversion
message = pickle_error_message.format(
estimator="TreeNoVersion",
old_version="pre-0.18",
current_version=sklearn.__version__,
)
# check we got the warning about using pre-0.18 pickle
with pytest.warns(UserWarning, match=message):
pickle.loads(tree_pickle_noversion)
# The test modifies global state by changing the TreeNoVersion class
@pytest.mark.thread_unsafe
def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
iris = datasets.load_iris()
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
try:
module_backup = TreeNoVersion.__module__
TreeNoVersion.__module__ = "notsklearn"
with warnings.catch_warnings():
warnings.simplefilter("error")
pickle.loads(tree_pickle_noversion)
finally:
TreeNoVersion.__module__ = module_backup
| TreeNoVersion |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 35033,
"end": 39395
} | class ____(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05 21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("2010-12-21 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean("12/21/2010 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"""
Localized DateTimeFields in a non-localized environment act as
unlocalized widgets.
"""
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05 21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("2010-12-21 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("12/21/2010 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"""
DateTimeFields with manually specified input formats can accept those
formats
"""
f = forms.DateTimeField(
input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"]
)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30:05 PM 21.12.2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30 PM 21-12-2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"""
Localized DateTimeFields with manually specified input formats can
accept those formats.
"""
f = forms.DateTimeField(
input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True
)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30:05 PM 21.12.2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30 PM 21-12-2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| SimpleDateTimeFormatTests |
python | realpython__materials | python-yaml/models.py | {
"start": 74,
"end": 129
} | class ____:
first_name: str
last_name: str
| Person |
python | wandb__wandb | wandb/vendor/pygments/lexers/d.py | {
"start": 9289,
"end": 9530
} | class ____(CrocLexer):
"""
For MiniD source. MiniD is now known as Croc.
"""
name = 'MiniD'
filenames = [] # don't lex .md as MiniD, reserve for Markdown
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
| MiniDLexer |
python | doocs__leetcode | solution/1400-1499/1496.Path Crossing/Solution.py | {
"start": 0,
"end": 469
} | class ____:
def isPathCrossing(self, path: str) -> bool:
i = j = 0
vis = {(0, 0)}
for c in path:
match c:
case 'N':
i -= 1
case 'S':
i += 1
case 'E':
j += 1
case 'W':
j -= 1
if (i, j) in vis:
return True
vis.add((i, j))
return False
| Solution |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 169796,
"end": 173595
} | class ____(TopLevelMixin, core.TopLevelHConcatSpec):
"""A chart with horizontally-concatenated facets."""
@utils.use_signature(core.TopLevelHConcatSpec)
def __init__(
self,
data: Optional[ChartDataType] = Undefined,
hconcat: Sequence[ConcatType] = (),
**kwargs: Any,
) -> None:
for spec in hconcat:
_check_if_valid_subspec(spec, "HConcatChart")
super().__init__(data=data, hconcat=list(hconcat), **kwargs)
self.hconcat: list[ChartType]
self.params: Optional[Sequence[_Parameter]]
self.data: Optional[ChartDataType]
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
self.params, self.hconcat = _combine_subchart_params(self.params, self.hconcat)
def __ior__(self, other: ChartType) -> Self:
_check_if_valid_subspec(other, "HConcatChart")
self.hconcat.append(other)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
self.params, self.hconcat = _combine_subchart_params(self.params, self.hconcat)
return self
def __or__(self, other: ChartType) -> Self:
copy = self.copy(deep=["hconcat"])
copy |= other
return copy
def transformed_data(
self, row_limit: int | None = None, exclude: Iterable[str] | None = None
) -> list[DataFrameLike]:
"""
Evaluate a HConcatChart's transforms.
Evaluate the data transforms associated with a HConcatChart and return the
transformed data for each subplot as a list of DataFrames
Parameters
----------
row_limit : int (optional)
Maximum number of rows to return for each DataFrame. None (default) for unlimited
exclude : iterable of str
Set of the names of charts to exclude
Returns
-------
list of DataFrame
Transformed data for each subplot as a list of DataFrames
"""
from altair.utils._transformed_data import transformed_data
return transformed_data(self, row_limit=row_limit, exclude=exclude)
def interactive(
self, name: str | None = None, bind_x: bool = True, bind_y: bool = True
) -> Self:
"""
Make chart axes scales interactive.
Parameters
----------
name : string
The parameter name to use for the axes scales. This name should be
unique among all parameters within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
encodings: list[SingleDefUnitChannel_T] = []
if bind_x:
encodings.append("x")
if bind_y:
encodings.append("y")
return self.add_params(selection_interval(bind="scales", encodings=encodings))
def add_params(self, *params: Parameter) -> Self:
"""Add one or more parameters to the chart."""
if not params or not self.hconcat:
return self
copy = self.copy()
copy.hconcat = [chart.add_params(*params) for chart in copy.hconcat]
return copy
@utils.deprecated(version="5.0.0", alternative="add_params")
def add_selection(self, *selections) -> Self: # noqa: ANN002
"""'add_selection' is deprecated. Use 'add_params' instead."""
return self.add_params(*selections)
def hconcat(*charts: ConcatType, **kwargs: Any) -> HConcatChart:
"""Concatenate charts horizontally."""
return HConcatChart(hconcat=charts, **kwargs)
| HConcatChart |
python | kamyu104__LeetCode-Solutions | Python/group-shifted-strings.py | {
"start": 54,
"end": 754
} | class ____(object):
# @param {string[]} strings
# @return {string[][]}
def groupStrings(self, strings):
groups = collections.defaultdict(list)
for s in strings: # Grouping.
groups[self.hashStr(s)].append(s)
result = []
for key, val in groups.iteritems():
result.append(sorted(val))
return result
def hashStr(self, s):
base = ord(s[0])
hashcode = ""
for i in xrange(len(s)):
if ord(s[i]) - base >= 0:
hashcode += unichr(ord('a') + ord(s[i]) - base)
else:
hashcode += unichr(ord('a') + ord(s[i]) - base + 26)
return hashcode
| Solution |
python | ansible__ansible | lib/ansible/playbook/delegatable.py | {
"start": 241,
"end": 625
} | class ____:
delegate_to = FieldAttribute(isa='string')
delegate_facts = FieldAttribute(isa='bool')
def _post_validate_delegate_to(self, attr, value, templar):
"""This method exists just to make it clear that ``Task.post_validate``
does not template this value, it is set via ``TaskExecutor._calculate_delegate_to``
"""
return value
| Delegatable |
python | ZoranPandovski__al-go-rithms | recursive_algorithms/shortest_path_algorithm.py | {
"start": 175,
"end": 2616
} | class ____():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print("Vertex \t Distance from Source")
for node in range(self.V):
print(" ", node, "\t\t", dist[node])
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initialize minimum distance for next node
min = sys.maxsize
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxsize] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shortest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shortest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
self.printSolution(dist)
# Driver program
g = Graph(9)
g.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0]
]
g.dijkstra(0)
# This code is contributed by Divyanshu Mehta
| Graph |
python | scikit-image__scikit-image | tests/skimage/morphology/test_footprints.py | {
"start": 10144,
"end": 11762
} | class ____:
@pytest.mark.parametrize("i", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("j", [0, 1, 2, 3, 4])
def test_rectangle(self, i, j):
desired = np.ones((i, j), dtype='uint8')
actual = footprint_rectangle((i, j))
assert_equal(actual, desired)
@pytest.mark.parametrize("i", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("j", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("k", [0, 1, 2, 3, 4])
def test_cuboid(self, i, j, k):
desired = np.ones((i, j, k), dtype='uint8')
actual = footprint_rectangle((i, j, k))
assert_equal(actual, desired)
@pytest.mark.parametrize("shape", [(3,), (5, 5), (5, 5, 7)])
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_decomposition(self, shape, decomposition):
regular = footprint_rectangle(shape)
decomposed = footprint_rectangle(shape, decomposition=decomposition)
recomposed = footprint_from_sequence(decomposed)
assert_equal(recomposed, regular)
@pytest.mark.parametrize("shape", [(2,), (3, 4)])
def test_uneven_sequence_decomposition_warning(self, shape):
"""Should fall back to decomposition="separable" for uneven footprint size."""
desired = footprint_rectangle(shape, decomposition="separable")
regex = "decomposition='sequence' is only supported for uneven footprints"
with pytest.warns(UserWarning, match=regex) as record:
actual = footprint_rectangle(shape, decomposition="sequence")
assert_stacklevel(record)
assert_equal(actual, desired)
| Test_footprint_rectangule |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/pandas_datasource.py | {
"start": 18408,
"end": 23092
} | class ____(Datasource, Generic[_DataAssetT]):
# class attributes
asset_types: ClassVar[Sequence[Type[DataAsset]]] = []
# instance attributes
assets: MutableSequence[_DataAssetT] = []
# Abstract Methods
@property
@override
def execution_engine_type(self) -> Type[PandasExecutionEngine]:
"""Return the PandasExecutionEngine unless the override is set"""
from great_expectations.execution_engine.pandas_execution_engine import (
PandasExecutionEngine,
)
return PandasExecutionEngine
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the _PandasDatasource.
Args:
test_assets: If assets have been passed to the _PandasDatasource,
an attempt can be made to test them as well.
Raises:
TestConnectionError: If the connection test fails.
"""
raise NotImplementedError(
"""One needs to implement "test_connection" on a _PandasDatasource subclass."""
)
# End Abstract Methods
@override
def json( # noqa: PLR0913 # FIXME CoP
self,
*,
include: AbstractSetIntStr | MappingIntStrAny | None = None,
exclude: AbstractSetIntStr | MappingIntStrAny | None = None,
by_alias: bool = False,
# deprecated - use exclude_unset instead
skip_defaults: bool | None = None,
# Default to True to prevent serializing long configs full of unset default values
exclude_unset: bool = True,
exclude_defaults: bool = False,
exclude_none: bool = False,
encoder: Callable[[Any], Any] | None = None,
models_as_dict: bool = True,
**dumps_kwargs: Any,
) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude` arguments
as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other
arguments as per `json.dumps()`.
Deviates from pydantic `exclude_unset` `True` by default instead of `False` by
default.
"""
exclude_fields: dict[int | str, Any] = self._include_exclude_to_dict(
include_exclude=exclude
)
if "assets" in self.__fields_set__:
exclude_assets = {}
for asset in self.assets:
# don't check fields that should always be set
check_fields: set[str] = asset.__fields_set__.copy().difference(_FIELDS_ALWAYS_SET)
for field in check_fields:
if isinstance(getattr(asset, field), tuple(_EXCLUDE_TYPES_FROM_JSON)):
exclude_assets[asset.name] = {field: True}
if exclude_assets:
exclude_fields["assets"] = exclude_assets
return super().json(
include=include,
exclude=exclude_fields,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
encoder=encoder,
models_as_dict=models_as_dict,
**dumps_kwargs,
)
@override
def _add_asset(self, asset: _DataAssetT, connect_options: dict | None = None) -> _DataAssetT:
"""Adds an asset to this "_PandasDatasource" object.
The reserved asset name "DEFAULT_PANDAS_DATA_ASSET_NAME" undergoes replacement (rather than signaling error).
Args:
asset: The DataAsset to be added to this datasource.
""" # noqa: E501 # FIXME CoP
asset_name: str = asset.name
asset_names: Set[str] = self.get_asset_names()
in_cloud_context: bool = False
if self._data_context:
in_cloud_context = self._data_context._datasource_store.cloud_mode
if asset_name == DEFAULT_PANDAS_DATA_ASSET_NAME:
if in_cloud_context:
# In cloud mode, we need to generate a unique name for the asset so that it gets persisted # noqa: E501 # FIXME CoP
asset_name = f"{asset.type}-{_short_id()}"
logger.info(
f"Generating unique name for '{DEFAULT_PANDAS_DATA_ASSET_NAME}' asset '{asset_name}'" # noqa: E501 # FIXME CoP
)
asset.name = asset_name
elif asset_name in asset_names:
self.delete_asset(name=asset_name)
return super()._add_asset(asset=asset, connect_options=connect_options)
_DYNAMIC_ASSET_TYPES = list(_PANDAS_ASSET_MODELS.values())
@public_api
| _PandasDatasource |
python | pytorch__pytorch | torch/__init__.py | {
"start": 83066,
"end": 86628
} | class ____:
compiler_name = "inductor"
def __init__(self, mode, options, dynamic):
from torch._inductor.compiler_bisector import CompilerBisector
self.config: dict[str, _Any] = {}
self.dynamic = dynamic
self.apply_mode(mode)
self.apply_options(options)
self.apply_options(CompilerBisector.get_config_change("inductor"))
cuda_version = None
if hasattr(torch, "version"):
from torch.torch_version import TorchVersion
cuda_version = TorchVersion(getattr(torch.version, "cuda", "0.0"))
if self.config.get("triton.cudagraphs", False) and (
(cuda_version and cuda_version < "12.6")
or not profiler_allow_cudagraph_cupti_lazy_reinit_cuda12()
):
os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
# FIXME: CUDA Graph does not work well with CUPTI teardown.
# 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
# 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
# Workaround: turn off CUPTI teardown when using CUDA Graphs.
os.environ["TEARDOWN_CUPTI"] = "0"
def __eq__(self, other):
return (
isinstance(other, _TorchCompileInductorWrapper)
and self.config == other.config
and self.dynamic == other.dynamic
)
def apply_mode(self, mode: _Optional[str]):
if mode and mode != "default":
from torch._inductor import list_mode_options
self.apply_options(list_mode_options(mode, self.dynamic))
def apply_options(self, options: _Optional[dict[str, _Any]]):
if not options:
return
from torch._inductor import config
current_config: dict[str, _Any] = config.get_config_copy()
for key, val in options.items():
attr_name = key.replace("-", "_")
if attr_name not in current_config:
raise RuntimeError(
f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
)
attr_type = config.get_type(attr_name) # type: ignore[attr-defined]
# Subscriptable generic types don't support isinstance so skip the type
# check. There doesn't seem to be a good way of checking membership without
# 3rd party libraries.
if _get_origin(attr_type) is None:
if not isinstance(val, attr_type):
val_type_str = type(val).__name__
expected_type_str = type(current_config[attr_name]).__name__
raise RuntimeError(
f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
)
self.config[attr_name] = val
def __call__(self, model_, inputs_):
from torch._inductor.compile_fx import compile_fx
return compile_fx(model_, inputs_, config_patches=self.config)
def get_compiler_config(self):
from torch._inductor.compile_fx import get_patched_config_dict
return get_patched_config_dict(config_patches=self.config)
def reset(self):
from torch._inductor import config
if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
if self.config.get("triton.cudagraphs", True):
from torch._inductor.cudagraph_trees import reset_cudagraph_trees
reset_cudagraph_trees()
| _TorchCompileInductorWrapper |
python | geekcomputers__Python | Windows_Wallpaper_Script/wallpaper_extract.py | {
"start": 61,
"end": 4475
} | class ____:
# Set Environment Variables
username = os.environ["USERNAME"]
# An Amazing Code You Will Love To Have
# All file urls
file_urls = {
"wall_src": "C:\\Users\\"
+ username
+ "\\AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\"
+ "LocalState\\Assets\\",
"wall_dst": os.path.dirname(os.path.abspath(__file__)) + "\\Wallpapers\\",
"wall_mobile": os.path.dirname(os.path.abspath(__file__))
+ "\\Wallpapers\\mobile\\",
"wall_desktop": os.path.dirname(os.path.abspath(__file__))
+ "\\Wallpapers\\desktop\\",
}
msg = """
DDDDD OOOOO NN N EEEEEEE
D D O O N N N E
D D O O N N N E
D D O O N N N EEEE
D D O O N N N E
D D O O N N N E
DDDDD OOOOO N NN EEEEEEE
"""
# A method to showcase time effect
@staticmethod
def time_gap(string):
print(string, end="")
time.sleep(1)
print(".", end="")
time.sleep(1)
print(".")
# A method to import the wallpapers from src folder(dir_src)
@staticmethod
def copy_wallpapers():
w = Wallpaper
w.time_gap("Copying Wallpapers")
# Copy All Wallpapers From Src Folder To Dest Folder
for filename in os.listdir(w.file_urls["wall_src"]):
shutil.copy(w.file_urls["wall_src"] + filename, w.file_urls["wall_dst"])
# A method to Change all the Extensions
@staticmethod
def change_ext():
w = Wallpaper
w.time_gap("Changing Extensions")
# Look into all the files in the executing folder and change extension
for filename in os.listdir(w.file_urls["wall_dst"]):
base_file, ext = os.path.splitext(filename)
if ext == "":
if not os.path.isdir(w.file_urls["wall_dst"] + filename):
os.rename(
w.file_urls["wall_dst"] + filename,
w.file_urls["wall_dst"] + filename + ".jpg",
)
# Remove all files Not having Wallpaper Resolution
@staticmethod
def extract_wall():
w = Wallpaper
w.time_gap("Extracting Wallpapers")
for filename in os.listdir(w.file_urls["wall_dst"]):
base_file, ext = os.path.splitext(filename)
if ext == ".jpg":
try:
im = Image.open(w.file_urls["wall_dst"] + filename)
except IOError:
print("This isn't a picture.", filename)
if list(im.size)[0] != 1920 and list(im.size)[0] != 1080:
im.close()
os.remove(w.file_urls["wall_dst"] + filename)
else:
im.close()
# Arrange the wallpapers into the corresponding folders
@staticmethod
def arr_desk_wallpapers():
w = Wallpaper
w.time_gap("Arranging Desktop wallpapers")
for filename in os.listdir(w.file_urls["wall_dst"]):
base_file, ext = os.path.splitext(filename)
if ext == ".jpg":
try:
im = Image.open(w.file_urls["wall_dst"] + filename)
if list(im.size)[0] == 1920:
im.close()
os.rename(
w.file_urls["wall_dst"] + filename,
w.file_urls["wall_desktop"] + filename,
)
elif list(im.size)[0] == 1080:
im.close()
os.rename(
w.file_urls["wall_dst"] + filename,
w.file_urls["wall_mobile"] + filename,
)
else:
im.close()
except FileExistsError:
print("File Already Exists!")
os.remove(w.file_urls["wall_dst"] + filename)
@staticmethod
def exec_all():
w = Wallpaper
w.copy_wallpapers()
w.change_ext()
w.extract_wall()
w.arr_desk_wallpapers()
print(w.msg)
time.sleep(2)
wall = Wallpaper()
wall.exec_all()
| Wallpaper |
python | tensorflow__tensorflow | tensorflow/python/framework/importer_test.py | {
"start": 1996,
"end": 49764
} | class ____(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutputFloatOutput' }
node { name: 'B' op: 'ListOutput'
attr { key: 'T'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "IntOutputFloatOutput")
self.assertEqual(b.type, "ListOutput")
self.assertEqual(c.type, "ListInput")
self.assertEqual(d.type, "ListInput")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testMultipleImport(self):
graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
""")
with ops.Graph().as_default():
# Initial import
a, b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(list(b.inputs), [a.outputs[0]])
# Repeat the same import
a1, b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a1.name, "A_1")
self.assertEqual(b1.name, "B_1")
self.assertEqual(list(b1.inputs), [a1.outputs[0]])
# Repeat the same import again
a2, b2 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a2.name, "A_2")
self.assertEqual(b2.name, "B_2")
self.assertEqual(list(b2.inputs), [a2.outputs[0]])
# Import with an already-used name
a3, b3 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A")
self.assertEqual(a3.name, "A_3/A")
self.assertEqual(b3.name, "A_3/B")
self.assertEqual(list(b3.inputs), [a3.outputs[0]])
# Import with an already-used name but with a '/' to indicate an
# "absolute" name scope (see the Graph.name_scope docstring).
a_a, a_b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A/")
self.assertEqual(a_a.name, "A/A")
self.assertEqual(a_b.name, "A/B")
self.assertEqual(list(a_b.inputs), [a_a.outputs[0]])
# Repeat the same import.
a_a1, a_b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A/")
self.assertEqual(a_a1.name, "A/A_1")
self.assertEqual(a_b1.name, "A/B_1")
self.assertEqual(list(a_b1.inputs), [a_a1.outputs[0]])
# Import with existing de-duped node names
a1_1, b1_1 = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A_1' op: 'IntOutput' }
node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
"""),
return_elements=["A_1", "B_1"],
name="")
self.assertEqual(a1_1.name, "A_1_1")
self.assertEqual(b1_1.name, "B_1_1")
self.assertEqual(list(b1_1.inputs), [a1_1.outputs[0]])
# Create a name scope and then import node with same name
with ops.name_scope("foo"):
constant_op.constant(1)
foo, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(foo.name, "foo_1")
# Imported node name can't conflict with intermediate name scope (but can
# conflict with outer scope and full name scope)
with ops.name_scope("outer"):
with ops.name_scope("inner"):
c = constant_op.constant(1, name="c")
self.assertEqual(c.op.name, "outer/inner/c")
outer, inner, new_c, outer_inner, outer_inner_c = (
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'outer' op: 'IntOutput' }"
"node { name: 'inner' op: 'IntOutput' }"
"node { name: 'c' op: 'IntOutput' }"
"node { name: 'outer/inner' op: 'IntOutput' }"
"node { name: 'outer/inner/c' op: 'IntOutput' }"),
return_elements=["outer", "inner", "c", "outer/inner",
"outer/inner/c"],
name=""))
self.assertEqual(outer.name, "outer_1")
self.assertEqual(inner.name, "inner")
self.assertEqual(new_c.name, "c")
self.assertEqual(outer_inner.name, "outer/inner_1")
self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
def testEmptyNameScope(self):
with ops.Graph().as_default():
# Create name scope but don't create any ops with it
with ops.name_scope("foo"):
pass
# Import graph def that uses name scope name
op, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(op.name, "foo")
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'RefOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'C' op: 'TwoIntInputs' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'RefInputIntInput' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_types, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_types, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testResources(self):
# Produce GraphDef containing a ops producing and consuming resources.
graph = ops.Graph()
with graph.as_default():
var = resource_variable_ops.ResourceVariable(1.0)
var_assign = var.assign(2.0)
# Use an op that requires handle shape to be set.
var_shape = resource_variable_ops.variable_shape(var.handle)
init = variables.global_variables_initializer()
graph_def = graph.as_graph_def()
# Import the GraphDef.
with ops.Graph().as_default():
# pylint: disable=unused-variable
imported_var, imported_assign, imported_shape, imported_init = (
importer.import_graph_def(
graph_def,
return_elements=[var.name, var_assign.name, var_shape.name,
init.name]))
# Make sure the handle shape is set on the imported variable.
new_var_shape = resource_variable_ops.variable_shape(imported_var)
# pylint: enable=unused-variable
# Run the imported graph.
# TODO(b/76173421): make this work (currently DCHECKS)
# with self.cached_session() as sess:
# self.evaluate(imported_init)
# self.assertEqual(self.evaluate(imported_var), 1.0)
# self.assertEqual(self.evaluate(imported_assign), 2.0)
# self.assertEqual(list(self.evaluate(imported_shape)), [])
# self.assertEqual(list(self.evaluate(new_var_shape)), [])
def testWhileLoop(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = while_loop.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
# Add an op that consumes the while loop output.
math_ops.add(r, 1)
graph_def = graph.as_graph_def()
# Import the GraphDef and make sure it runs.
with ops.Graph().as_default():
imported_r, = importer.import_graph_def(graph_def,
return_elements=[r.name])
self.assertEqual(imported_r.name, "import/" + r.name)
with self.cached_session() as sess:
self.assertEqual(self.evaluate(imported_r), 10)
def testImportWhileLoopInCond(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = while_loop.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef inside a cond and make sure it runs.
with ops.Graph().as_default():
def ImportFn():
return importer.import_graph_def(graph_def, return_elements=[r.name])[0]
pred = array_ops.placeholder(dtypes.bool)
out = cond.cond(pred, ImportFn,
lambda: constant_op.constant(1))
with self.cached_session() as sess:
self.assertEqual(sess.run(out, {pred: True}), 10)
self.assertEqual(sess.run(out, {pred: False}), 1)
def testImportWhileLoopInWhileLoop(self):
self.skipTest("b/111757448")
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = while_loop.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef inside another loop and make sure it runs.
with ops.Graph().as_default():
def ImportFn(_):
return importer.import_graph_def(graph_def, return_elements=[r.name])[0]
out = while_loop.while_loop(
lambda i: i < 2,
ImportFn, [0],
shape_invariants=[tensor_shape.TensorShape(None)])
with self.cached_session() as sess:
self.assertEqual(self.evaluate(out), 10)
def testTypeMismatchInGraphDef(self):
# TODO(skyewm): improve error message
error_msg = ("Input 0 of node import/B was passed int32 from import/A:0 "
"incompatible with expected float.")
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testShapeAllowlistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with ops.Graph().as_default():
# TODO(skyewm): improve error message
with self.assertRaisesRegex(
ValueError,
"NodeDef expected inputs '' do not match 1 inputs specified"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with ops.Graph().as_default():
# TODO(skyewm): improve error message
with self.assertRaisesRegex(
ValueError,
"NodeDef expected inputs 'int32, float' do not match 1 inputs "
"specified"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInputFloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError,
"Node 'B': Unknown input node 'A:0'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
"Node 'B': Connecting to invalid output 1 of source node A "
"which has 1 outputs"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:1' }
"""))
def testMissingControlInputInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError,
r"Node 'B': Unknown input node '\^A'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError,
"Node 'B': Unknown input node 'A:B'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
def testInvalidTensorNameInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError,
"Node 'B': Unknown input node 'A:B:0'"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
def testMissingReturnOperation(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Requested return node 'B' not found in graph def"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
def testMissingReturnTensor(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
r"Invalid return output 1 of node 'A', which has 1 output\(s\)"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:1"])
with self.assertRaisesRegex(
ValueError, "Requested return tensor 'B:0' not found in graph def"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["B:0"])
with self.assertRaisesRegex(ValueError,
"Cannot convert 'A:B:0' to a tensor name."):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:B:0"])
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[B:0\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
def testInputMapUnusedAsInput(self):
with ops.Graph().as_default():
# Mapping an unused node output should succeed.
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
# Mapping a non-existent output of an existing node should fail.
with self.assertRaisesRegex(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[A:2\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:2": constant_op.constant(5.0)})
def testInputMapTypeMismatch(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Input 0 of node import/B was passed float from Const:0 "
"incompatible with expected int32."):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testDefaultNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name=None)
self.assertEqual(a.name, "import/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertTrue("_class" in b.node_def.attr)
self.assertProtoEquals(
"list { s: 'loc:@imported_graph/A' }",
b.node_def.attr["_class"])
def testColocationAndDevice(self):
# A and B are colocated, device set on A.
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' device: '/device:CPU:0' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.device, "/device:CPU:0")
self.assertEqual(b.device, "/device:CPU:0")
self.assertEqual(a.colocation_groups(), [b"loc:@A"])
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
# A and B are colocated, device set on B.
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' device: '/device:CPU:0' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="")
# TODO(skyewm): this behavior seems inconsistent with the above. Why is
# B's device ignored?
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@A"])
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
def testColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
# A device function that places "A" on one device and "B" on
# another device. Because B is colocated with A, we test that B's
# device function is overridden by A.
def CustomDeviceFn(op):
if "A" in op.name:
return "/device:A:0"
else:
return "/device:B:0"
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Test a scenario where 'A' doesn't get a device; 'A' should not have a
# device, but during runtime will get colocated with 'B' because of the
# colocation attribute. B's device function is still overridden by A.
def BDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(BDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Only A gets a device, so B inherits it implicitly.
def ADeviceFn(op):
if "A" in op.name:
return "/device:A:0"
return ""
with ops.Graph().as_default():
with ops.device(ADeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
def testMultipleColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None'}
node { name: 'B' op: 'None'}
node { name: 'C' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' s: 'loc:@B' } }
} }""")
# A device function that places "B" on a device, and "A" is empty.
#
# B and C should contain "/device:B". A will not right now. But
# because of the colocation property, at runtime it would be
# placed with B and C.
def CustomDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b, c = importer.import_graph_def(original_graph_def,
return_elements=["A", "B", "C"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "/device:B:0")
self.assertEqual(c.device, "/device:B:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/B"])
self.assertEqual(c.colocation_groups(),
[b"loc:@imported_graph/A", b"loc:@imported_graph/B"])
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
a_1, b_1 = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
self.assertEqual(a_1.name, "A_1")
self.assertEqual(b_1.name, "B_1")
self.assertEqual(b_1.colocation_groups(), [b"loc:@A_1"])
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Node 'B' expects to be colocated with unknown node 'A'"):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError, r"Argument `graph_def` must be a GraphDef proto."):
importer.import_graph_def("")
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError,
r"Argument `input_map` must be a dictionary. Obtained list"):
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
graph_def = self._MakeGraphDef("""
node { name: 'a' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'id' op: 'Identity' input: 'a:0'
attr { key: 'T' value { type: DT_FLOAT } }}""")
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
graph_def,
input_map={"a:0": variables.Variable(5.0)},
name="")
self.assertStartsWith(str(e.exception),
"tf.import_graph_def() requires a non-empty `name` "
"if `input_map` contains non-Tensor values.")
with ops.Graph().as_default():
t, = importer.import_graph_def(
graph_def,
input_map={"a:0": constant_op.constant(5.0)},
name="",
return_elements=["id:0"])
with self.cached_session():
self.assertEqual(5.0, self.evaluate(t))
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError, "Argument `return_elements` must be a list of strings."):
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
with self.assertRaisesRegex(ValueError,
"Cannot convert 'a:b:c' to a tensor name."):
importer.import_graph_def(
self._MakeGraphDef(""), return_elements=["a:b:c"])
def testDuplicateOperationNames(self):
with self.assertRaisesRegex(ValueError, "Node 'A' is not unique"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'A' op: 'IntOutput' }
"""))
@test_util.run_v1_only("v1 Tensor doesn't have attribute 'numpy'")
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops_stack.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.cached_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0], [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/device:GPU:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v1 = constant_op.constant(1.0)
v2 = constant_op.constant(1.0)
_ = v1 + v2
_ = v1 - v2
_ = array_ops.identity(v1)
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def InputCounter(op):
if len(op.inputs) == 2:
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(InputCounter):
importer.import_graph_def(gdef)
# We expect to see the add and subtract, but not identity.
self.assertEqual(2, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.cached_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
self.evaluate(g)
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'TwoIntOutputs' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
Exception,
r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER):
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
def testVersionHigh(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION)):
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
def testVersionAppliesToOpConstruction(self):
"""These tests rely on shape fns in test_ops.cc."""
with ops.Graph().as_default():
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION - 1),
return_elements=["A"])
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(ValueError,
"Wrong graph version.*"):
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION),
return_elements=["A"])
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
with self.assertRaisesRegex(
ValueError, "Operation 'import/A' has no attr named 'default_int'."):
a[0].get_attr("default_int")
def testFunctions(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype, dtype)
def Grad(x, y, dout1, dout2): # pylint: disable=unused-argument
# Return the inputs for simplicity of testing. The correct return value
# would be (dout1 + dout2, dout1 - dout2)
return x, y
@function.Defun(dtype, dtype, grad_func=Grad)
def FuncWithGrad(x, y):
return x + y, x - y
@function.Defun(dtypes.int32)
def ExternalTensorFunc(x):
# c must be defined in the containing graph
return x + c
@function.Defun(dtypes.int32, dtypes.int32)
def OuterFunc(x, y):
@function.Defun(dtypes.int32)
def InnerFunc(x):
return x + x
return InnerFunc(x) + y
# Create graph with function calls and export to GraphDef
with ops.Graph().as_default() as g1:
p1 = array_ops.placeholder(dtype, name="p1")
p2 = array_ops.placeholder(dtype, name="p2")
# pylint: disable=unexpected-keyword-arg
a, b = FuncWithGrad(p1, p2, name="f")
c = constant_op.constant(10, dtype=dtypes.int32)
ExternalTensorFunc(1, name="external")
OuterFunc(10, 1, name="outer")
# pylint: enable=unexpected-keyword-arg
gdef = g1.as_graph_def()
# Import GraphDef into new graph, add imported gradients, and test that
# imported functions can be run
with ops.Graph().as_default() as g2:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
grad = gradients_impl.gradients([a], [p1, p2])
with self.session(graph=g2) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
# Grad function returns inputs values for testing
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
# Export the new graph and reimport to test that imported functions can be
# successfully exported/imported again
gdef = g2.as_graph_def()
with ops.Graph().as_default() as g3:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
# Create new gradient functions (in additional to the imported gradient
# functions created in g2).
grad = gradients_impl.gradients([a], [p1, p2])
with self.session(graph=g3) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
@test_util.run_v1_only("import inside defun not supported when eager "
"execution is enabled.")
def testImportInsideDefun(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = constant_op.constant(3.0, dtype=dtypes.float32)
y = constant_op.constant(-5.0, dtype=dtypes.float32)
z = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
@function.Defun()
def TestFunc():
return importer.import_graph_def(gdef, return_elements=["z:0"])[0]
z = TestFunc()
with self.cached_session():
z_val = self.evaluate(z)
self.assertEqual(z_val, -2.0)
@test_util.run_v1_only("_as_tf_output not supported when eager execution "
"is enabled.")
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.cached_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
if __name__ == "__main__":
test.main()
| ImportGraphDefTest |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 38444,
"end": 38644
} | class ____(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
| Sky2Pix_TangentialSphericalCube |
python | pypa__setuptools | setuptools/_distutils/tests/test_archive_util.py | {
"start": 1014,
"end": 11761
} | class ____(support.TempdirManager):
@pytest.mark.usefixtures('needs_zlib')
def test_make_tarball(self, name='archive'):
# creating something to tar
tmpdir = self._create_files()
self._make_tarball(tmpdir, name, '.tar.gz')
# trying an uncompressed one
self._make_tarball(tmpdir, name, '.tar', compress=None)
@pytest.mark.usefixtures('needs_zlib')
def test_make_tarball_gzip(self):
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip')
def test_make_tarball_bzip2(self):
pytest.importorskip('bz2')
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2')
def test_make_tarball_xz(self):
pytest.importorskip('lzma')
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz')
@pytest.mark.skipif("not can_fs_encode('årchiv')")
def test_make_tarball_latin1(self):
"""
Mirror test_make_tarball, except filename contains latin characters.
"""
self.test_make_tarball('årchiv') # note this isn't a real word
@pytest.mark.skipif("not can_fs_encode('のアーカイブ')")
def test_make_tarball_extended(self):
"""
Mirror test_make_tarball, except filename contains extended
characters outside the latin charset.
"""
self.test_make_tarball('のアーカイブ') # japanese for archive
def _make_tarball(self, tmpdir, target_name, suffix, **kwargs):
tmpdir2 = self.mkdtemp()
if same_drive(tmpdir, tmpdir2):
pytest.skip("source and target should be on same drive")
base_name = os.path.join(tmpdir2, target_name)
# working with relative paths to avoid tar warnings
with path.Path(tmpdir):
make_tarball(splitdrive(base_name)[1], 'dist', **kwargs)
# check if the compressed tarball was created
tarball = base_name + suffix
assert os.path.exists(tarball)
assert self._tarinfo(tarball) == self._created_files
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return names
finally:
tar.close()
_zip_created_files = [
'dist/',
'dist/file1',
'dist/file2',
'dist/sub/',
'dist/sub/file3',
'dist/sub2/',
]
_created_files = [p.rstrip('/') for p in _zip_created_files]
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
return tmpdir
@pytest.mark.usefixtures('needs_zlib')
@pytest.mark.skipif("not (shutil.which('tar') and shutil.which('gzip'))")
def test_tarfile_vs_tar(self):
tmpdir = self._create_files()
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
assert os.path.exists(tarball)
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
assert os.path.exists(tarball2)
# let's compare both tarballs
assert self._tarinfo(tarball) == self._created_files
assert self._tarinfo(tarball2) == self._created_files
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
assert os.path.exists(tarball)
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
assert os.path.exists(tarball)
@pytest.mark.usefixtures('needs_zlib')
def test_make_zipfile(self):
zipfile = pytest.importorskip('zipfile')
# creating something to tar
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
with path.Path(tmpdir):
make_zipfile(base_name, 'dist')
# check if the compressed tarball was created
tarball = base_name + '.zip'
assert os.path.exists(tarball)
with zipfile.ZipFile(tarball) as zf:
assert sorted(zf.namelist()) == self._zip_created_files
def test_make_zipfile_no_zlib(self):
zipfile = pytest.importorskip('zipfile')
patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError
called = []
zipfile_class = zipfile.ZipFile
def fake_zipfile(*a, **kw):
if kw.get('compression', None) == zipfile.ZIP_STORED:
called.append((a, kw))
return zipfile_class(*a, **kw)
patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile)
# create something to tar and compress
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
with path.Path(tmpdir):
make_zipfile(base_name, 'dist')
tarball = base_name + '.zip'
assert called == [((tarball, "w"), {'compression': zipfile.ZIP_STORED})]
assert os.path.exists(tarball)
with zipfile.ZipFile(tarball) as zf:
assert sorted(zf.namelist()) == self._zip_created_files
def test_check_archive_formats(self):
assert check_archive_formats(['gztar', 'xxx', 'zip']) == 'xxx'
assert (
check_archive_formats(['gztar', 'bztar', 'xztar', 'ztar', 'tar', 'zip'])
is None
)
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
with pytest.raises(ValueError):
make_archive(base_name, 'xxx')
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
assert os.getcwd() == current_dir
finally:
ARCHIVE_FORMATS.pop('xxx')
def test_make_archive_tar(self):
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'tar', base_dir, 'dist')
assert os.path.exists(res)
assert os.path.basename(res) == 'archive.tar'
assert self._tarinfo(res) == self._created_files
@pytest.mark.usefixtures('needs_zlib')
def test_make_archive_gztar(self):
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'gztar', base_dir, 'dist')
assert os.path.exists(res)
assert os.path.basename(res) == 'archive.tar.gz'
assert self._tarinfo(res) == self._created_files
def test_make_archive_bztar(self):
pytest.importorskip('bz2')
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'bztar', base_dir, 'dist')
assert os.path.exists(res)
assert os.path.basename(res) == 'archive.tar.bz2'
assert self._tarinfo(res) == self._created_files
def test_make_archive_xztar(self):
pytest.importorskip('lzma')
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'xztar', base_dir, 'dist')
assert os.path.exists(res)
assert os.path.basename(res) == 'archive.tar.xz'
assert self._tarinfo(res) == self._created_files
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_0_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir = self._create_files()
root_dir = self.mkdtemp()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(
base_name, 'zip', root_dir, base_dir, owner=owner, group=group
)
assert os.path.exists(res)
res = make_archive(base_name, 'zip', root_dir, base_dir)
assert os.path.exists(res)
res = make_archive(
base_name, 'tar', root_dir, base_dir, owner=owner, group=group
)
assert os.path.exists(res)
res = make_archive(
base_name, 'tar', root_dir, base_dir, owner='kjhkjhkjg', group='oihohoh'
)
assert os.path.exists(res)
@pytest.mark.usefixtures('needs_zlib')
@require_unix_id
@require_uid_0
def test_tarfile_root_owner(self):
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = make_tarball(
base_name, 'dist', compress=None, owner=owner, group=group
)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
assert os.path.exists(archive_name)
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
assert member.uid == 0
assert member.gid == 0
finally:
archive.close()
| ArchiveUtilTestCase |
python | streamlit__streamlit | lib/tests/streamlit/dataframe_util_test.py | {
"start": 31846,
"end": 35868
} | class ____(DeltaGeneratorTestCase):
"""Test class for the automatic arrow truncation feature."""
@patch_config_options(
{"server.maxMessageSize": 3, "server.enableArrowTruncation": True}
)
def test_truncate_larger_table(self):
"""Test that `_maybe_truncate_table` correctly truncates a table that is
larger than the max message size.
"""
col_data = list(range(200000))
original_df = pd.DataFrame(
{
"col 1": col_data,
"col 2": col_data,
"col 3": col_data,
}
)
original_table = pa.Table.from_pandas(original_df)
truncated_table = dataframe_util._maybe_truncate_table(
pa.Table.from_pandas(original_df)
)
# Should be under the configured 3MB limit:
assert truncated_table.nbytes < 3 * int(1000000.0)
# Test that the table should have been truncated
assert truncated_table.nbytes < original_table.nbytes
assert truncated_table.num_rows < original_table.num_rows
# Test that it prints out a caption test:
el = self.get_delta_from_queue().new_element
assert "due to data size limitations" in el.markdown.body
assert el.markdown.is_caption
@patch_config_options(
{"server.maxMessageSize": 3, "server.enableArrowTruncation": True}
)
def test_dont_truncate_smaller_table(self):
"""Test that `_maybe_truncate_table` doesn't truncate smaller tables."""
col_data = list(range(100))
original_df = pd.DataFrame(
{
"col 1": col_data,
"col 2": col_data,
"col 3": col_data,
}
)
original_table = pa.Table.from_pandas(original_df)
truncated_table = dataframe_util._maybe_truncate_table(
pa.Table.from_pandas(original_df)
)
# Test that the tables are the same:
assert truncated_table.nbytes == original_table.nbytes
assert truncated_table.num_rows == original_table.num_rows
@patch_config_options({"server.enableArrowTruncation": False})
def test_dont_truncate_if_deactivated(self):
"""Test that `_maybe_truncate_table` doesn't do anything
when server.enableArrowTruncation is decatived
"""
col_data = list(range(200000))
original_df = pd.DataFrame(
{
"col 1": col_data,
"col 2": col_data,
"col 3": col_data,
}
)
original_table = pa.Table.from_pandas(original_df)
truncated_table = dataframe_util._maybe_truncate_table(
pa.Table.from_pandas(original_df)
)
# Test that the tables are the same:
assert truncated_table.nbytes == original_table.nbytes
assert truncated_table.num_rows == original_table.num_rows
@patch_config_options(
{"server.maxMessageSize": 3, "server.enableArrowTruncation": True}
)
def test_st_dataframe_truncates_data(self):
"""Test that `st.dataframe` truncates the data if server.enableArrowTruncation==True."""
col_data = list(range(200000))
original_df = pd.DataFrame(
{
"col 1": col_data,
"col 2": col_data,
"col 3": col_data,
}
)
original_table = pa.Table.from_pandas(original_df)
st.dataframe(original_df)
el = self.get_delta_from_queue().new_element
# Test that table bytes should be smaller than the full table
assert len(el.arrow_data_frame.data) < original_table.nbytes
# Should be under the configured 3MB limit:
assert len(el.arrow_data_frame.data) < 3 * int(1000000.0)
# Test that it prints out a caption test:
el = self.get_delta_from_queue(-2).new_element
assert "due to data size limitations" in el.markdown.body
assert el.markdown.is_caption
| TestArrowTruncation |
python | scikit-image__scikit-image | tests/skimage/feature/test_texture.py | {
"start": 265,
"end": 9188
} | class ____:
def setup_method(self):
self.image = np.array(
[[0, 0, 1, 1], [0, 0, 1, 1], [0, 2, 2, 2], [2, 2, 3, 3]], dtype=np.uint8
)
@run_in_parallel()
def test_output_angles(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], 4
)
assert result.shape == (4, 4, 1, 4)
expected1 = np.array(
[[2, 2, 1, 0], [0, 2, 0, 0], [0, 0, 3, 1], [0, 0, 0, 1]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 0], expected1)
expected2 = np.array(
[[1, 1, 3, 0], [0, 1, 1, 0], [0, 0, 0, 2], [0, 0, 0, 0]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 1], expected2)
expected3 = np.array(
[[3, 0, 2, 0], [0, 2, 2, 0], [0, 0, 1, 2], [0, 0, 0, 0]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 2], expected3)
expected4 = np.array(
[[2, 0, 0, 0], [1, 1, 2, 0], [0, 0, 2, 1], [0, 0, 0, 0]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 3], expected4)
def test_output_symmetric_1(self):
result = graycomatrix(self.image, [1], [np.pi / 2], 4, symmetric=True)
assert result.shape == (4, 4, 1, 1)
expected = np.array(
[[6, 0, 2, 0], [0, 4, 2, 0], [2, 2, 2, 2], [0, 0, 2, 0]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
def test_error_raise_float(self):
for dtype in [float, np.double, np.float16, np.float32, np.float64]:
with pytest.raises(ValueError):
graycomatrix(self.image.astype(dtype), [1], [np.pi], 4)
def test_error_raise_int_types(self):
for dtype in [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]:
with pytest.raises(ValueError):
graycomatrix(self.image.astype(dtype), [1], [np.pi])
def test_error_raise_negative(self):
with pytest.raises(ValueError):
graycomatrix(self.image.astype(np.int16) - 1, [1], [np.pi], 4)
def test_error_raise_levels_smaller_max(self):
with pytest.raises(ValueError):
graycomatrix(self.image - 1, [1], [np.pi], 3)
def test_image_data_types(self):
for dtype in [np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]:
img = self.image.astype(dtype)
result = graycomatrix(img, [1], [np.pi / 2], 4, symmetric=True)
assert result.shape == (4, 4, 1, 1)
expected = np.array(
[[6, 0, 2, 0], [0, 4, 2, 0], [2, 2, 2, 2], [0, 0, 2, 0]],
dtype=np.uint32,
)
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
return
def test_output_distance(self):
im = np.array(
[[0, 0, 0, 0], [1, 0, 0, 1], [2, 0, 0, 2], [3, 0, 0, 3]], dtype=np.uint8
)
result = graycomatrix(im, [3], [0], 4, symmetric=False)
expected = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
def test_output_combo(self):
im = np.array([[0], [1], [2], [3]], dtype=np.uint8)
result = graycomatrix(im, [1, 2], [0, np.pi / 2], 4)
assert result.shape == (4, 4, 2, 2)
z = np.zeros((4, 4), dtype=np.uint32)
e1 = np.array(
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]], dtype=np.uint32
)
e2 = np.array(
[[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint32
)
np.testing.assert_array_equal(result[:, :, 0, 0], z)
np.testing.assert_array_equal(result[:, :, 1, 0], z)
np.testing.assert_array_equal(result[:, :, 0, 1], e1)
np.testing.assert_array_equal(result[:, :, 1, 1], e2)
def test_output_empty(self):
result = graycomatrix(self.image, [10], [0], 4)
np.testing.assert_array_equal(
result[:, :, 0, 0], np.zeros((4, 4), dtype=np.uint32)
)
result = graycomatrix(self.image, [10], [0], 4, normed=True)
np.testing.assert_array_equal(
result[:, :, 0, 0], np.zeros((4, 4), dtype=np.uint32)
)
def test_normed_symmetric(self):
result = graycomatrix(
self.image, [1, 2, 3], [0, np.pi / 2, np.pi], 4, normed=True, symmetric=True
)
for d in range(result.shape[2]):
for a in range(result.shape[3]):
np.testing.assert_almost_equal(result[:, :, d, a].sum(), 1.0)
np.testing.assert_array_equal(
result[:, :, d, a], result[:, :, d, a].transpose()
)
def test_contrast(self):
result = graycomatrix(self.image, [1, 2], [0], 4, normed=True, symmetric=True)
result = np.round(result, 3)
contrast = graycoprops(result, 'contrast')
np.testing.assert_almost_equal(contrast[0, 0], 0.585, decimal=3)
def test_dissimilarity(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 2], 4, normed=True, symmetric=True
)
result = np.round(result, 3)
dissimilarity = graycoprops(result, 'dissimilarity')
np.testing.assert_almost_equal(dissimilarity[0, 0], 0.418, decimal=3)
def test_dissimilarity_2(self):
result = graycomatrix(
self.image, [1, 3], [np.pi / 2], 4, normed=True, symmetric=True
)
result = np.round(result, 3)
dissimilarity = graycoprops(result, 'dissimilarity')[0, 0]
np.testing.assert_almost_equal(dissimilarity, 0.665, decimal=3)
def test_non_normalized_glcm(self):
img = (np.random.random((100, 100)) * 8).astype(np.uint8)
p = graycomatrix(img, [1, 2, 4, 5], [0, 0.25, 1, 1.5], levels=8)
np.testing.assert_(np.max(graycoprops(p, 'correlation')) < 1.0)
def test_invalid_property(self):
result = graycomatrix(self.image, [1], [0], 4)
with pytest.raises(ValueError):
graycoprops(result, 'ABC')
def test_homogeneity(self):
result = graycomatrix(self.image, [1], [0, 6], 4, normed=True, symmetric=True)
homogeneity = graycoprops(result, 'homogeneity')[0, 0]
np.testing.assert_almost_equal(homogeneity, 0.80833333)
def test_energy(self):
result = graycomatrix(self.image, [1], [0, 4], 4, normed=True, symmetric=True)
energy = graycoprops(result, 'energy')[0, 0]
np.testing.assert_almost_equal(energy, 0.38188131)
def test_correlation(self):
result = graycomatrix(self.image, [1, 2], [0], 4, normed=True, symmetric=True)
energy = graycoprops(result, 'correlation')
np.testing.assert_almost_equal(energy[0, 0], 0.71953255)
np.testing.assert_almost_equal(energy[1, 0], 0.41176470)
def test_mean(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 2], 4, normed=True, symmetric=True
)
mean = graycoprops(result, 'mean')[0, 0]
# Reference value was calculated by hand and is close to original source if precision 3 is used.
np.testing.assert_almost_equal(mean, 1.29166667)
def test_variance(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 2], 4, normed=True, symmetric=True
)
variance = graycoprops(result, 'variance')[0, 0]
# Reference value was calculated by hand and is close to original source if precision 3 is used.
np.testing.assert_almost_equal(variance, 1.03993055)
def test_std(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 2], 4, normed=True, symmetric=True
)
std = graycoprops(result, 'std')[0, 0]
# Reference value was calculated by hand and is close to original source if precision 3 is used.
np.testing.assert_almost_equal(std, 1.01976985)
def test_entropy(self):
result = graycomatrix(
self.image, [1], [0, np.pi / 2], 4, normed=True, symmetric=True
)
entropy = graycoprops(result, 'entropy')[0, 0]
# Reference value was calculated by hand and is close to original source if precision 3 is used.
np.testing.assert_almost_equal(entropy, 2.09472904)
def test_uniform_properties(self):
im = np.ones((4, 4), dtype=np.uint8)
result = graycomatrix(
im, [1, 2, 8], [0, np.pi / 2], 4, normed=True, symmetric=True
)
for prop in [
'contrast',
'dissimilarity',
'homogeneity',
'energy',
'correlation',
'ASM',
'mean',
'variance',
'std',
'entropy',
]:
graycoprops(result, prop)
| TestGLCM |
python | Netflix__metaflow | metaflow/metadata_provider/heartbeat.py | {
"start": 423,
"end": 3127
} | class ____(object):
def __init__(self):
self.headers = SERVICE_HEADERS
self.req_thread = Thread(target=self._ping)
self.req_thread.daemon = True
self.default_frequency_secs = 10
self.hb_url = None
def process_message(self, msg):
# type: (Message) -> None
if msg.msg_type == MessageTypes.SHUTDOWN:
self._shutdown()
if not self.req_thread.is_alive():
# set post url
self.hb_url = msg.payload[HB_URL_KEY]
# start thread
self.req_thread.start()
@classmethod
def get_worker(cls):
return cls
def _ping(self):
retry_counter = 0
while True:
try:
frequency_secs = self._heartbeat()
if frequency_secs is None or frequency_secs <= 0:
frequency_secs = self.default_frequency_secs
time.sleep(frequency_secs)
retry_counter = 0
except HeartBeatException as e:
print(e)
retry_counter = retry_counter + 1
time.sleep(1.5**retry_counter)
def _heartbeat(self):
if self.hb_url is not None:
try:
response = requests.post(
url=self.hb_url, data="{}", headers=self.headers.copy()
)
except requests.exceptions.ConnectionError as e:
raise HeartBeatException(
"HeartBeat request (%s) failed" " (ConnectionError)" % (self.hb_url)
)
except requests.exceptions.Timeout as e:
raise HeartBeatException(
"HeartBeat request (%s) failed" " (Timeout)" % (self.hb_url)
)
except requests.exceptions.RequestException as e:
raise HeartBeatException(
"HeartBeat request (%s) failed"
" (RequestException) %s" % (self.hb_url, str(e))
)
# Unfortunately, response.json() returns a string that we need
# to cast to json; however when the request encounters an error
# the return type is a json blob :/
if response.status_code == 200:
return json.loads(response.json()).get("wait_time_in_seconds")
else:
raise HeartBeatException(
"HeartBeat request (%s) failed"
" (code %s): %s"
% (self.hb_url, response.status_code, response.text)
)
return None
def _shutdown(self):
# attempts sending one last heartbeat
self._heartbeat()
| MetadataHeartBeat |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 304032,
"end": 305597
} | class ____:
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __init__(self, path, options=''):
if isinstance( path, str):
pass
elif hasattr( path, 'absolute'):
path = str( path)
elif hasattr( path, 'name'):
path = path.name
if isinstance( path, str):
self.this = mupdf.FzDocumentWriter( path, options, mupdf.FzDocumentWriter.PathType_PDF)
else:
# Need to keep the Python JM_new_output_fileptr_Output instance
# alive for the lifetime of this DocumentWriter, otherwise calls
# to virtual methods implemented in Python fail. So we make it a
# member of this DocumentWriter.
#
# Unrelated to this, mupdf.FzDocumentWriter will set
# self._out.m_internal to null because ownership is passed in.
#
out = JM_new_output_fileptr( path)
self.this = mupdf.FzDocumentWriter( out, options, mupdf.FzDocumentWriter.OutputType_PDF)
assert out.m_internal_value() == 0
assert hasattr( self.this, '_out')
def begin_page( self, mediabox):
mediabox2 = JM_rect_from_py(mediabox)
device = mupdf.fz_begin_page( self.this, mediabox2)
device_wrapper = DeviceWrapper( device)
return device_wrapper
def close( self):
mupdf.fz_close_document_writer( self.this)
def end_page( self):
mupdf.fz_end_page( self.this)
| DocumentWriter |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/utils/config.py | {
"start": 14620,
"end": 18125
} | class ____:
"""
Configuration for the default memory resource.
Parameters
----------
qualname
The fully qualified name of the memory resource class to use.
options
This can be either a dictionary representing the options to pass
to the memory resource class, or, a dictionary representing a
nested memory resource configuration. The presence of "qualname"
field indicates a nested memory resource configuration.
Examples
--------
Create a memory resource config for a single memory resource:
>>> MemoryResourceConfig(
... qualname="rmm.mr.CudaAsyncMemoryResource",
... options={"initial_pool_size": 100},
... )
Create a memory resource config for a nested memory resource configuration:
>>> MemoryResourceConfig(
... qualname="rmm.mr.PrefetchResourceAdaptor",
... options={
... "upstream_mr": {
... "qualname": "rmm.mr.PoolMemoryResource",
... "options": {
... "upstream_mr": {
... "qualname": "rmm.mr.ManagedMemoryResource",
... },
... "initial_pool_size": 256,
... },
... }
... },
... )
"""
_env_prefix = "CUDF_POLARS__MEMORY_RESOURCE_CONFIG"
qualname: str = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__QUALNAME",
str,
# We shouldn't reach here if qualname isn't set in the environment.
default=None, # type: ignore[assignment]
)
)
options: dict[str, Any] | None = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__OPTIONS",
json.loads,
default=None,
)
)
def __post_init__(self) -> None:
if self.qualname.count(".") < 1:
raise ValueError(
f"MemoryResourceConfig.qualname '{self.qualname}' must be a fully qualified name to a class, including the module name."
)
def create_memory_resource(self) -> rmm.mr.DeviceMemoryResource:
"""Create a memory resource from the configuration."""
def create_mr(
qualname: str, options: dict[str, Any] | None
) -> rmm.mr.DeviceMemoryResource:
module_name, class_name = qualname.rsplit(".", 1)
module = importlib.import_module(module_name)
cls = getattr(module, class_name)
return cls(**options or {})
def process_options(opts: dict[str, Any] | None) -> dict[str, Any]:
if opts is None:
return {}
processed = {}
for key, value in opts.items():
if isinstance(value, dict) and "qualname" in value:
# This is a nested memory resource config
nested_qualname = value["qualname"]
nested_options = process_options(value.get("options"))
processed[key] = create_mr(nested_qualname, nested_options)
else:
processed[key] = value
return processed
# Create the top-level memory resource
return create_mr(self.qualname, process_options(self.options))
def __hash__(self) -> int:
return hash((self.qualname, json.dumps(self.options, sort_keys=True)))
@dataclasses.dataclass(frozen=True, eq=True)
| MemoryResourceConfig |
python | realpython__materials | duck-typing-python/vehicles_abc.py | {
"start": 515,
"end": 711
} | class ____(Vehicle):
def start(self):
print("The car is starting")
def stop(self):
print("The car is stopping")
def drive(self):
print("The car is driving")
| Car |
python | google__jax | jax/experimental/colocated_python/serialization.py | {
"start": 1112,
"end": 12301
} | class ____(threading.local):
"""Tracks repeated objects within a single `_serialize()` or `_deserialize()`.
It is common for `_serialize(x)` to be called with `x` being a nested
container or capturing other objects in a closure, with many references
pointing to only a few unique objects. The logic below
(`_make_reduce_func_with_common_obj`) avoids duplicating object serialization
by reducing a reference handle instead of the full object when an equal object
is repeatedly seen.
"""
def __init__(self):
# Map from a common object key to its ID. Any objects with a matching key
# will use the common object ID instead of the full object during
# serialization.
self.common_obj_index: dict[Any, int] | None = None
# Common object that has been reconstructed when their key was seen for the
# first time during deserialization.
self.common_obj: list[Any] | None = None
_common_obj_state = _CommonObjectState()
def _wrapped_unreduce_func_with_new_common_obj(
common_obj_id, unreduce_func, unreduce_args):
"""Unreduces a new common object."""
assert _common_obj_state.common_obj is not None
obj = unreduce_func(*unreduce_args)
assert len(_common_obj_state.common_obj) == common_obj_id, (
f"Expected {common_obj_id} common objects, but got"
f" {len(_common_obj_state.common_obj)}. This can happen if serialization"
" and deserialization of objects happened in different orders."
)
_common_obj_state.common_obj.append(obj)
return obj
def _wrapped_unreduce_func_with_existing_common_obj(common_obj_id):
"""Unreduces a common object that has already appeared."""
assert _common_obj_state.common_obj is not None
return _common_obj_state.common_obj[common_obj_id]
def _make_reduce_func_with_common_obj(
reduce_func: Callable[[Any], tuple[Any, Any]],
) -> Callable[[Any], tuple[Any, Any]]:
"""Wraps a reduce function to serialize a common object once."""
@functools.wraps(reduce_func)
def wrapped_reduce_func(obj):
assert _common_obj_state.common_obj_index is not None
common_obj_id = _common_obj_state.common_obj_index.get(obj)
if common_obj_id is None:
unreduced_func, unreduced_args = reduce_func(obj)
common_obj_id = len(_common_obj_state.common_obj_index)
_common_obj_state.common_obj_index[obj] = common_obj_id
return _wrapped_unreduce_func_with_new_common_obj, (
common_obj_id, unreduced_func, unreduced_args)
else:
return _wrapped_unreduce_func_with_existing_common_obj, (common_obj_id,)
return wrapped_reduce_func
@jax._src.util.cache(max_size=None)
def _get_cpu_device_map() -> dict[int, jax.Device]:
"""Returns a map from a device id to a matching device."""
cpu_device_map: dict[int, jax.Device] = {}
# TODO(hyeontaek): We should look up CPU devices for a specific CPU backend.
# When deserializing a device on the controller, the backend should be the one
# associated with colocated_python. When deserializing on the colocated_python
# executor, it should be the CPU backend visible to the user function running
# under colocated_python.
# Look for CPU devices in the default backend.
for d in xb.local_devices()[0].client._get_all_devices(): # pylint: disable=protected-access
if d.device_kind == "cpu":
if d.id in cpu_device_map:
raise ValueError(
f"Multiple CPU devices with id {d.id} found:"
f" {cpu_device_map[d.id]} and {d}"
)
cpu_device_map[d.id] = d
if cpu_device_map:
return cpu_device_map
# Fall back to searching CPU devices in all backends.
for backend in xb.backends().values():
for d in backend._get_all_devices(): # pylint: disable=protected-access
if d.device_kind == "cpu":
if d.id in cpu_device_map:
raise ValueError(
f"Multiple CPU devices with id {d.id} found:"
f" {cpu_device_map[d.id]} and {d}"
)
cpu_device_map[d.id] = d
return cpu_device_map
def _lookup_cpu_device(
cpu_device_map: dict[int, jax.Device], device_id: int
) -> jax.Device:
"""Returns a CPU device with the given device ID."""
d = cpu_device_map.get(device_id)
if d is None:
raise ValueError(
f"Invalid device ID {device_id}. Device list must contain only CPU"
" devices."
)
return d
@_make_reduce_func_with_common_obj
def _reduce_mesh(
mesh: jax.sharding.Mesh,
) -> tuple[Callable[..., jax.sharding.Mesh], Any]:
mesh_device_ids = np.vectorize(lambda d: d.id, otypes=[int])(mesh.devices)
return _unreduce_mesh, (mesh_device_ids, mesh.axis_names, mesh.axis_types)
def _unreduce_mesh(
mesh_device_ids: np.ndarray, axis_names: Any, axis_types: Any
) -> jax.sharding.Mesh:
cpu_device_map = _get_cpu_device_map()
mesh_devices = np.vectorize(
functools.partial(_lookup_cpu_device, cpu_device_map)
)(mesh_device_ids)
return jax.sharding.Mesh(mesh_devices, axis_names, axis_types)
@_make_reduce_func_with_common_obj
def _reduce_named_sharding(
sharding: jax.sharding.NamedSharding,
) -> tuple[Callable[..., jax.sharding.NamedSharding], Any]:
assert isinstance(sharding.mesh, jax.sharding.Mesh), "Only Mesh is supported"
reduced_mesh = _reduce_mesh(sharding.mesh)
return _unreduce_named_sharding, (
reduced_mesh, sharding.spec, sharding.memory_kind)
def _unreduce_named_sharding(reduced_mesh, spec, memory_kind):
mesh = reduced_mesh[0](*reduced_mesh[1])
return jax.NamedSharding(mesh, spec, memory_kind=memory_kind)
@_make_reduce_func_with_common_obj
def _reduce_device_list(
device_list: DeviceList,
) -> tuple[Callable[..., DeviceList], Any]:
device_ids = [d.id for d in device_list]
return _unreduce_device_list, (device_ids,)
def _unreduce_device_list(device_ids: Sequence[int]) -> DeviceList:
cpu_device_map = _get_cpu_device_map()
devices = np.vectorize(functools.partial(_lookup_cpu_device, cpu_device_map))(
device_ids)
return DeviceList(tuple(devices))
@_make_reduce_func_with_common_obj
def _reduce_single_device_sharding(
sharding: jax.sharding.SingleDeviceSharding,
) -> tuple[Callable[..., jax.sharding.SingleDeviceSharding], Any]:
return _unreduce_single_device_sharding, (
sharding.device_set.pop().id,
sharding.memory_kind)
def _unreduce_single_device_sharding(
device_id: int, memory_kind: str | None
) -> jax.sharding.SingleDeviceSharding:
cpu_device_map = _get_cpu_device_map()
device = _lookup_cpu_device(cpu_device_map, device_id)
return jax.sharding.SingleDeviceSharding(device, memory_kind=memory_kind)
def _serialize(obj: Any) -> bytes:
"""Serializes callables and input/output spec objects.
DO NOT USE THIS FUNCTION EXCEPT FOR THE INTERNAL IMPLEMENTATION OF
colocated_python.
This module contains utility functions used internally for implementiong
`colocated_python` when it ships callables and input/output specs through
IFRT. The pickled data is produced and consumed in an ephermeral fashion
without any persistence, and it does not expect any version compatibility
(which cloudpickle does not guarantee). Furthermore, serialization and
deserialization is expected to be done on machine(s) that are controlled by a
single tenant, which allows unpickling done during deserialization to be
trusted.
Raises:
ModuleNotFoundError: If cloudpickle is not available.
"""
if cloudpickle is None:
raise ModuleNotFoundError('No module named "cloudpickle"')
class _CustomPickler(cloudpickle.Pickler):
dispatch_table = collections.ChainMap(
{jax.sharding.Mesh: _reduce_mesh},
{jax.sharding.NamedSharding: _reduce_named_sharding},
{DeviceList: _reduce_device_list},
{jax.sharding.SingleDeviceSharding: _reduce_single_device_sharding},
cloudpickle.CloudPickler.dispatch_table, # pylint: disable=attribute-error
)
dispatch = dispatch_table
assert _common_obj_state.common_obj_index is None, (
"_serialize() expects no recursive calls")
_common_obj_state.common_obj_index = {}
try:
with io.BytesIO() as file:
_CustomPickler(file).dump(obj)
return file.getvalue()
finally:
_common_obj_state.common_obj_index = None
def _deserialize(serialized: bytes) -> Any:
"""Deserializes callables and input/output spec objects.
DO NOT USE THIS FUNCTION EXCEPT FOR THE INTERNAL IMPLEMENTATION OF
colocated_python. See serialize() for details.
Raises:
ModuleNotFoundError: If cloudpickle is not available.
"""
if cloudpickle is None:
raise ModuleNotFoundError('No module named "cloudpickle"')
assert _common_obj_state.common_obj is None, (
"_deserialize() expects no recursive calls")
_common_obj_state.common_obj = []
try:
return cloudpickle.loads(serialized)
finally:
_common_obj_state.common_obj = None
def _make_specs_for_serialized_specs(
devices: DeviceList,
) -> api.ShapeDtypeStruct:
"""Makes output specs for serialized specs."""
mesh = jax.sharding.Mesh(tuple(devices), ("x",))
replicated_sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec()
)
return api.ShapeDtypeStruct(
shape=(), dtype=np.dtypes.StringDType(), sharding=replicated_sharding # type: ignore
)
def _serialize_specs(
specs_treedef: tree_util.PyTreeDef,
specs_leaves: tuple[api.ShapeDtypeStruct, ...],
devices: DeviceList,
) -> jax.Array:
"""Serializes the output specs into a jax.Array of string type.
DO NOT USE THIS FUNCTION EXCEPT FOR THE INTERNAL IMPLEMENTATION OF
colocated_python. See serialize() for details.
"""
if not hasattr(np.dtypes, "StringDType"):
raise TypeError(
"Serializing Colocated Python requires StringDType. Please use"
" numpy to 2.0.0 or later, or explicitly provide an output spec"
" function."
)
s_bytes = _serialize((specs_treedef, specs_leaves))
s_str = base64.b64encode(s_bytes).decode("ascii")
s_np_array = np.array(s_str, dtype=np.dtypes.StringDType()) # type: ignore
# TODO(jmudigonda): Revisit this when JAX supports HLO sharding for making
# jax.Array via make_array_from_single_device_arrays. We should then use a
# sharding that spans all the execution devices - not just the addressable
# ones.
addressable_devices = devices.addressable_device_list
mesh = jax.sharding.Mesh(tuple(addressable_devices), ("x",))
replicated_sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec()
)
out_arrays = [
jax.device_put(s_np_array, device) for device in addressable_devices
]
return jax.make_array_from_single_device_arrays(
arrays=out_arrays,
sharding=replicated_sharding,
shape=(),
)
def _deserialize_specs(
serialized_specs: jax.Array,
) -> tuple[tree_util.PyTreeDef, tuple[api.ShapeDtypeStruct, ...]]:
"""Deserializes the specs from the serialized specs.
DO NOT USE THIS FUNCTION EXCEPT FOR THE INTERNAL IMPLEMENTATION OF
colocated_python. See serialize() for details.
"""
data_array = serialized_specs.addressable_shards[0].data
data = base64.b64decode(data_array.item().encode("ascii"))
return _deserialize(data)
| _CommonObjectState |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 714703,
"end": 715266
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "database_id", "deleted_comment_author")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
deleted_comment_author = sgqlc.types.Field(
Actor, graphql_name="deletedCommentAuthor"
)
| CommentDeletedEvent |
python | coleifer__peewee | playhouse/sqlite_udf.py | {
"start": 10860,
"end": 11317
} | class ____(object):
name = 'range'
def __init__(self):
self._min = self._max = None
def step(self, value):
if self._min is None or value < self._min:
self._min = value
if self._max is None or value > self._max:
self._max = value
def finalize(self):
if self._min is not None and self._max is not None:
return self._max - self._min
return None
@aggregate(MATH)
| _range |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 2194,
"end": 2510
} | class ____(RequestError):
"""Raised when an existing pool gets a request for a foreign host."""
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
| HostChangedError |
python | celery__celery | t/unit/events/test_state.py | {
"start": 356,
"end": 1180
} | class ____:
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
self.current_clock = 0
def setup(self):
pass
def next_event(self):
ev = self.events[next(self.position)]
ev['local_received'] = ev['timestamp']
try:
self.current_clock = ev['clock']
except KeyError:
ev['clock'] = self.current_clock = self.current_clock + 1
return ev
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.next_event())
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0)
return self
def play(self):
for _ in self:
pass
| replay |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_table06.py | {
"start": 481,
"end": 2114
} | class ____(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table(
"C3:F13",
{"columns": [{"header": "Foo"}, {"header": ""}, {}, {"header": "Baz"}]},
)
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Foo"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Baz"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleTable |
python | fastapi__sqlmodel | sqlmodel/sql/_expression_select_cls.py | {
"start": 1483,
"end": 1546
} | class ____(SelectBase[_T]):
inherit_cache = True
| SelectOfScalar |
python | django__django | django/core/files/storage/memory.py | {
"start": 977,
"end": 2322
} | class ____(ContentFile, TimingMixin):
"""
Helper class representing an in-memory file node.
Handle unicode/bytes conversion during I/O operations and record creation,
modification, and access times.
"""
def __init__(self, content="", name=None):
super().__init__(content, name)
self._content_type = type(content)
self._initialize_times()
def open(self, mode):
self._convert_stream_content(mode)
self._update_accessed_time()
return super().open(mode)
def write(self, data):
super().write(data)
self._update_modified_time()
def _initialize_stream(self):
"""Initialize underlying stream according to the content type."""
self.file = io.BytesIO() if self._content_type == bytes else io.StringIO()
def _convert_stream_content(self, mode):
"""Convert actual file content according to the opening mode."""
new_content_type = bytes if "b" in mode else str
# No conversion needed.
if self._content_type == new_content_type:
return
content = self.file.getvalue()
content = content.encode() if isinstance(content, str) else content.decode()
self._content_type = new_content_type
self._initialize_stream()
self.file.write(content)
| InMemoryFileNode |
python | tensorflow__tensorflow | tensorflow/python/framework/composite_tensor_test.py | {
"start": 2935,
"end": 2967
} | class ____(CTSpec):
pass
| CTSpec2 |
python | django__django | tests/admin_views/tests.py | {
"start": 80812,
"end": 85510
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_as_duplication(self):
"""'save as' creates a new person"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)), post_data
)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
new_person = Person.objects.latest("id")
self.assertRedirects(
response, reverse("admin:admin_views_person_change", args=(new_person.pk,))
)
def test_save_as_continue_false(self):
"""
Saving a new object using "Save as new" redirects to the changelist
instead of the change view when ModelAdmin.save_as_continue=False.
"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
url = reverse(
"admin:admin_views_person_change",
args=(self.per1.pk,),
current_app=site2.name,
)
response = self.client.post(url, post_data)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
self.assertRedirects(
response,
reverse("admin:admin_views_person_changelist", current_app=site2.name),
)
def test_save_as_new_with_validation_errors(self):
"""
When you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)),
{
"_saveasnew": "",
"gender": "invalid",
"_addanother": "fail",
},
)
self.assertContains(response, "Please correct the errors below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "Child",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "_invalid",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "_invalid",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "Father",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
@override_settings(ROOT_URLCONF="admin_views.urls")
| SaveAsTests |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 46824,
"end": 49626
} | class ____(ElectraPreTrainedModel):
config_class = ElectraConfig
base_model_prefix = "electra"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.electra = ElectraModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
discriminator_hidden_states = self.electra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = discriminator_hidden_states[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=discriminator_hidden_states.hidden_states,
attentions=discriminator_hidden_states.attentions,
)
@auto_docstring
| ElectraForQuestionAnswering |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 20944,
"end": 21144
} | class ____(DataBlob):
"""
SlackDataBlob is a specific type that represents the data blob for a Slack notification action.
"""
tags: str = ""
notes: str = ""
@dataclass
| SlackDataBlob |
python | facebook__pyre-check | client/commands/statistics.py | {
"start": 3417,
"end": 3569
} | class ____(SuppressionCountCollector):
def __init__(self) -> None:
super().__init__(r".*# *pyre-fixme(\[(\d* *,? *)*\])?")
| FixmeCountCollector |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/natural_language.py | {
"start": 10919,
"end": 13753
} | class ____(GoogleCloudBaseOperator):
"""
Classifies a document into categories.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageClassifyTextOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_classify_text_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_classify_text_template_fields]
def __init__(
self,
*,
document: dict | Document,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start text classify")
response = hook.classify_text(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished text classify")
return MessageToDict(response._pb)
| CloudNaturalLanguageClassifyTextOperator |
python | keras-team__keras | examples/demo_custom_jax_workflow.py | {
"start": 278,
"end": 858
} | class ____(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
w_shape = (input_dim, self.units)
w_value = initializers.GlorotUniform()(w_shape)
self.w = backend.Variable(w_value, name="kernel")
b_shape = (self.units,)
b_value = initializers.Zeros()(b_shape)
self.b = backend.Variable(b_value, name="bias")
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
| MyDense |
python | apache__airflow | providers/redis/tests/unit/redis/operators/test_redis_publish.py | {
"start": 1066,
"end": 1854
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.mock_context = MagicMock()
@patch("airflow.providers.redis.hooks.redis.RedisHook.get_conn")
def test_execute_operator(self, mock_redis_conn):
operator = RedisPublishOperator(
task_id="test_task",
dag=self.dag,
channel="test_channel",
message="test_message",
redis_conn_id="redis_default",
)
operator.execute(self.mock_context)
mock_redis_conn.assert_called_once_with()
mock_redis_conn().publish.assert_called_once_with(channel="test_channel", message="test_message")
| TestRedisPublishOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict24.py | {
"start": 132,
"end": 199
} | class ____(TypedDict):
a: str
T1 = TypeVar("T1", bound=TD1)
| TD1 |
python | rapidsai__cudf | python/cudf/cudf_pandas_tests/third_party_integration_tests/tests/test_pytorch.py | {
"start": 983,
"end": 1874
} | class ____(torch.utils.data.Dataset):
def __init__(self, x1, x2, y):
self.x1 = x1
self.x2 = x2
self.y = y
def __getitem__(self, idx):
x1 = self.x1[idx]
x2 = self.x2[idx]
y = self.y[idx]
return (x1, x2), y
def __len__(self):
return len(self.x1)
def test_dataloader_auto_batching(data):
x1, x2, y = (pd.Series(i) for i in data)
dataset = Dataset(x1, x2, y)
# default collate_fn
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10)
(x1, x2), y = next(iter(dataloader))
return x1, x2, y
def test_dataloader_manual_batching(data):
x1, x2, y = (pd.Series(i) for i in data)
dataset = Dataset(x1, x2, y)
# default collate_fn
dataloader = torch.utils.data.DataLoader(dataset, batch_size=None)
(x1, x2), y = next(iter(dataloader))
return x1, x2, y
| Dataset |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_flakiness.py | {
"start": 1018,
"end": 4827
} | class ____(Exception):
pass
def test_fails_only_once_is_flaky():
first_call = True
@given(integers())
def rude(x):
nonlocal first_call
if first_call:
first_call = False
raise Nope
with pytest.raises(FlakyFailure, match="Falsified on the first call but") as e:
rude()
exceptions = e.value.exceptions
assert len(exceptions) == 1
assert isinstance(exceptions[0], Nope)
def test_fails_differently_is_flaky():
call_count = 0
class DifferentNope(Exception):
pass
@given(integers())
@settings(database=None)
def rude(x):
nonlocal call_count
if x == 0:
call_count += 1
if call_count > 1:
raise Nope
else:
raise DifferentNope
with pytest.raises(FlakyFailure, match="Inconsistent results from replaying") as e:
rude()
exceptions = e.value.exceptions
assert len(exceptions) == 2
assert set(map(type, exceptions)) == {Nope, DifferentNope}
@skipif_threading # executing into global scope
@pytest.mark.skipif(sys.version_info < (3, 11), reason="except* syntax")
def test_exceptiongroup_wrapped_naked_exception_is_flaky():
# Defer parsing until runtime, as "except*" is syntax error pre 3.11
rude_def = """
first_call = True
def rude_fn(x):
global first_call
if first_call:
first_call = False
try:
raise Nope
except* Nope:
raise
"""
exec(rude_def, globals())
rude = given(integers())(rude_fn) # noqa: F821 # defined by exec()
with pytest.raises(FlakyFailure, match="Falsified on the first call but") as e:
rude()
exceptions = e.value.exceptions
assert list(map(type, exceptions)) == [ExceptionGroup]
assert list(map(type, exceptions[0].exceptions)) == [Nope]
def test_gives_flaky_error_if_assumption_is_flaky():
seen = set()
@given(integers())
@settings(verbosity=Verbosity.quiet, database=None)
def oops(s):
assume(s not in seen)
seen.add(s)
raise AssertionError
with pytest.raises(FlakyFailure, match="Inconsistent results from replaying") as e:
oops()
exceptions = e.value.exceptions
assert len(exceptions) == 2
assert isinstance(exceptions[0], AssertionError)
assert isinstance(exceptions[1], UnsatisfiedAssumption)
def test_flaky_with_context_when_fails_only_under_tracing(monkeypatch):
# make anything fail under tracing
monkeypatch.setattr(Tracer, "can_trace", staticmethod(lambda: True))
monkeypatch.setattr(Tracer, "__enter__", lambda *_: 1 / 0)
# ensure tracing is always entered inside _execute_once_for_engine
monkeypatch.setattr(StateForActualGivenExecution, "_should_trace", lambda _: True)
@given(integers())
def test(x):
pass
with pytest.raises(
FlakyFailure, match="failed on the first run but now succeeds"
) as e:
test()
exceptions = e.value.exceptions
assert len(exceptions) == 1
assert isinstance(exceptions[0], ZeroDivisionError)
@xfail_on_crosshair(Why.symbolic_outside_context)
def test_does_not_attempt_to_shrink_flaky_errors():
values = []
@settings(database=None)
@given(integers())
def test(x):
values.append(x)
assert len(values) != 1
with pytest.raises(FlakyFailure):
test()
# We try a total of ten calls in the generation phase, each usually a
# unique value, looking briefly (and unsuccessfully) for another bug.
assert 1 < len(set(values)) <= MIN_TEST_CALLS
# We don't try any new values while shrinking, just execute the test
# twice more (to check for flakiness and to raise the bug to the user).
assert set(values) == set(values[:-2])
| Nope |
python | getsentry__sentry | src/sentry/uptime/endpoints/serializers.py | {
"start": 1031,
"end": 1275
} | class ____(TypedDict):
url: str
method: str
body: str | None
headers: Sequence[tuple[str, str]]
intervalSeconds: int
timeoutMs: int
traceSampling: bool
@register(UptimeSubscription)
| UptimeSubscriptionSerializerResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator1.py | {
"start": 2894,
"end": 3011
} | class ____(Protocol):
def __next__(self, /) -> int: ...
def generator15() -> IntIterator:
yield 0
| IntIterator |
python | PrefectHQ__prefect | tests/server/models/test_block_types.py | {
"start": 453,
"end": 2636
} | class ____:
async def test_create_block_type(self, session):
block_type = await models.block_types.create_block_type(
session=session,
block_type=schemas.actions.BlockTypeCreate(
name="x",
slug="x",
logo_url="http://example.com/logo.png",
documentation_url="http://example.com/documentation.html",
description="A block, verily",
code_example=CODE_EXAMPLE,
),
)
assert block_type.name == "x"
assert block_type.logo_url == "http://example.com/logo.png"
assert block_type.documentation_url == "http://example.com/documentation.html"
assert block_type.description == "A block, verily"
assert block_type.code_example == CODE_EXAMPLE
db_block_type = await models.block_types.read_block_type(
session=session, block_type_id=block_type.id
)
assert db_block_type.name == block_type.name
assert db_block_type.logo_url == block_type.logo_url
assert db_block_type.documentation_url == block_type.documentation_url
assert db_block_type.description == block_type.description
assert db_block_type.code_example == block_type.code_example
async def test_create_block_type_unique_slug(self, session):
await models.block_types.create_block_type(
session=session,
block_type=schemas.actions.BlockTypeCreate(name="x", slug="x"),
)
with pytest.raises(sa.exc.IntegrityError):
await models.block_types.create_block_type(
session=session,
block_type=schemas.actions.BlockTypeCreate(name="x2", slug="x"),
)
async def test_create_block_type_same_name_different_slug(self, session):
await models.block_types.create_block_type(
session=session,
block_type=schemas.actions.BlockTypeCreate(name="x", slug="x"),
)
await models.block_types.create_block_type(
session=session,
block_type=schemas.actions.BlockTypeCreate(name="x", slug="x2"),
)
| TestCreateBlockType |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-weaviate/unit_tests/destination_test.py | {
"start": 325,
"end": 3650
} | class ____(unittest.TestCase):
def setUp(self):
self.config = {
"processing": {"text_fields": ["str_col"], "metadata_fields": [], "chunk_size": 1000},
"embedding": {"mode": "openai", "openai_key": "mykey"},
"indexing": {"host": "https://my-cluster.weaviate.network", "auth": {"mode": "no_auth"}},
}
self.config_model = ConfigModel.parse_obj(self.config)
self.logger = logging.getLogger("airbyte")
@patch("destination_weaviate.destination.WeaviateIndexer")
@patch("destination_weaviate.destination.create_from_config")
def test_check(self, MockedEmbedder, MockedWeaviateIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedEmbedder.return_value = mock_embedder
MockedWeaviateIndexer.return_value = mock_indexer
mock_embedder.check.return_value = None
mock_indexer.check.return_value = None
destination = DestinationWeaviate()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.SUCCEEDED)
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_weaviate.destination.WeaviateIndexer")
@patch("destination_weaviate.destination.create_from_config")
def test_check_with_errors(self, MockedEmbedder, MockedWeaviateIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedEmbedder.return_value = mock_embedder
MockedWeaviateIndexer.return_value = mock_indexer
embedder_error_message = "Embedder Error"
indexer_error_message = "Indexer Error"
mock_embedder.check.return_value = embedder_error_message
mock_indexer.check.return_value = indexer_error_message
destination = DestinationWeaviate()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.FAILED)
self.assertEqual(result.message, f"{embedder_error_message}\n{indexer_error_message}")
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_weaviate.destination.Writer")
@patch("destination_weaviate.destination.WeaviateIndexer")
@patch("destination_weaviate.destination.create_from_config")
def test_write(self, MockedEmbedder, MockedWeaviateIndexer, MockedWriter):
mock_embedder = Mock()
mock_indexer = Mock()
MockedEmbedder.return_value = mock_embedder
mock_writer = Mock()
MockedWeaviateIndexer.return_value = mock_indexer
MockedWriter.return_value = mock_writer
mock_writer.write.return_value = []
configured_catalog = MagicMock()
input_messages = []
destination = DestinationWeaviate()
list(destination.write(self.config, configured_catalog, input_messages))
MockedWriter.assert_called_once_with(self.config_model.processing, mock_indexer, mock_embedder, batch_size=128, omit_raw_text=False)
mock_writer.write.assert_called_once_with(configured_catalog, input_messages)
def test_spec(self):
destination = DestinationWeaviate()
result = destination.spec()
self.assertIsInstance(result, ConnectorSpecification)
| TestDestinationWeaviate |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 13867,
"end": 16268
} | class ____(StepInputSource):
"""This step input source is configuration to be passed to a type loader.
A None node_handle implies the inputs were provided at the root graph level.
"""
node_handle: Optional[NodeHandle]
input_name: str
def get_associated_input_def(self, job_def: JobDefinition) -> InputDefinition:
"""Returns the InputDefinition along the potential composition InputMapping chain
that the config was provided at.
"""
if self.node_handle:
return job_def.get_node(self.node_handle).input_def_named(self.input_name)
else:
return job_def.graph.input_def_named(self.input_name)
def get_associated_config(self, resolved_run_config: ResolvedRunConfig):
"""Returns the config specified, potentially specified at any point along graph composition
including the root.
"""
if self.node_handle:
op_config = resolved_run_config.ops.get(str(self.node_handle))
return op_config.inputs.get(self.input_name) if op_config else None
else:
input_config = resolved_run_config.inputs
return input_config.get(self.input_name) if input_config else None
def load_input_object(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
) -> Iterator[object]:
with user_code_error_boundary(
DagsterTypeLoadingError,
msg_fn=lambda: f'Error occurred while loading input "{self.input_name}" of step "{step_context.step.key}":',
log_manager=step_context.log,
):
dagster_type = self.get_associated_input_def(step_context.job_def).dagster_type
config_data = self.get_associated_config(step_context.resolved_run_config)
loader = check.not_none(dagster_type.loader)
yield loader.construct_from_config_value(
step_context.get_type_loader_context(), config_data
)
def required_resource_keys(
self, job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str
) -> AbstractSet[str]:
dagster_type = self.get_associated_input_def(job_def).dagster_type
return dagster_type.loader.required_resource_keys() if dagster_type.loader else set()
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
@record
| FromConfig |
python | dask__distributed | distributed/diagnostics/plugin.py | {
"start": 25336,
"end": 26253
} | class ____(WorkerPlugin):
"""A WorkerPlugin to upload a local file to workers.
Parameters
----------
filepath: str
A path to the file (.py, egg, or zip) to upload
Examples
--------
>>> from distributed.diagnostics.plugin import UploadFile
>>> client.register_plugin(UploadFile("/path/to/file.py")) # doctest: +SKIP
"""
name = "upload_file"
def __init__(self, filepath: str, load: bool = True):
"""
Initialize the plugin by reading in the data from the given file.
"""
self.filename = os.path.basename(filepath)
self.load = load
with open(filepath, "rb") as f:
self.data = f.read()
async def setup(self, worker):
response = await worker.upload_file(
filename=self.filename, data=self.data, load=self.load
)
assert len(self.data) == response["nbytes"]
| UploadFile |
python | django__django | tests/model_package/models/article.py | {
"start": 103,
"end": 304
} | class ____(models.Model):
sites = models.ManyToManyField(Site)
headline = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
| Article |
python | fastai__fastai | fastai/layers.py | {
"start": 3386,
"end": 3565
} | class ____(Module):
"Reshape `x` to `size`"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view(self.size)
# %% ../nbs/01_layers.ipynb 19
| View |
python | kamyu104__LeetCode-Solutions | Python/minimum-removals-to-balance-array.py | {
"start": 54,
"end": 429
} | class ____(object):
def minRemoval(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
nums.sort()
left = 0
for right in xrange(len(nums)):
if nums[left]*k < nums[right]:
left += 1
return left
# Time: O(nlogn)
# Space: O(1)
# sort, two pointers
| Solution |
python | kamyu104__LeetCode-Solutions | Python/remove-nth-node-from-end-of-list.py | {
"start": 280,
"end": 653
} | class ____(object):
# @return a ListNode
def removeNthFromEnd(self, head, n):
dummy = ListNode(-1)
dummy.next = head
slow, fast = dummy, dummy
for i in xrange(n):
fast = fast.next
while fast.next:
slow, fast = slow.next, fast.next
slow.next = slow.next.next
return dummy.next
| Solution |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 44618,
"end": 56276
} | class ____:
async def test_sync_task_sets_start_time_on_running(
self, prefect_client, events_pipeline
):
@task
def foo():
return TaskRunContext.get().task_run.id
task_run_id = run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
running = [state for state in states if state.type == StateType.RUNNING][0]
assert run.start_time
assert run.start_time == running.timestamp
async def test_async_task_sets_start_time_on_running(
self, prefect_client, events_pipeline
):
@task
async def foo():
return TaskRunContext.get().task_run.id
task_run_id = await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
running = [state for state in states if state.type == StateType.RUNNING][0]
assert run.start_time
assert run.start_time == running.timestamp
async def test_sync_task_sets_end_time_on_completed(
self, prefect_client, events_pipeline
):
@task
def foo():
return TaskRunContext.get().task_run.id
task_run_id = run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
running = [state for state in states if state.type == StateType.RUNNING][0]
completed = [state for state in states if state.type == StateType.COMPLETED][0]
assert run.end_time
assert run.end_time == completed.timestamp
assert run.total_run_time == completed.timestamp - running.timestamp
async def test_async_task_sets_end_time_on_completed(
self, prefect_client, events_pipeline
):
@task
async def foo():
return TaskRunContext.get().task_run.id
task_run_id = await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
running = [state for state in states if state.type == StateType.RUNNING][0]
completed = [state for state in states if state.type == StateType.COMPLETED][0]
assert run.end_time
assert run.end_time == completed.timestamp
assert run.total_run_time == completed.timestamp - running.timestamp
async def test_sync_task_sets_end_time_on_failed(
self, prefect_client, events_pipeline
):
ID = None
@task
def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise ValueError("failure!!!")
with pytest.raises(ValueError):
run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
failed = [state for state in states if state.type == StateType.FAILED][0]
assert run.end_time
assert run.end_time == failed.timestamp
assert run.total_run_time == failed.timestamp - running.timestamp
async def test_async_task_sets_end_time_on_failed(
self, prefect_client, events_pipeline
):
ID = None
@task
async def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise ValueError("failure!!!")
with pytest.raises(ValueError):
await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
failed = [state for state in states if state.type == StateType.FAILED][0]
assert run.end_time
assert run.end_time == failed.timestamp
assert run.total_run_time == failed.timestamp - running.timestamp
async def test_sync_task_sets_end_time_on_failed_timedout(
self, prefect_client, events_pipeline
):
ID = None
@task
def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise TimeoutError
with pytest.raises(TimeoutError):
run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
failed = [state for state in states if state.type == StateType.FAILED][0]
assert failed.name == "TimedOut"
assert run.end_time
assert run.end_time == failed.timestamp
assert run.total_run_time == failed.timestamp - running.timestamp
async def test_async_task_sets_end_time_on_failed_timedout(
self, prefect_client, events_pipeline
):
ID = None
@task
async def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise TimeoutError
with pytest.raises(TimeoutError):
await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
failed = [state for state in states if state.type == StateType.FAILED][0]
assert failed.name == "TimedOut"
assert run.end_time
assert run.end_time == failed.timestamp
assert run.total_run_time == failed.timestamp - running.timestamp
async def test_sync_task_sets_end_time_on_crashed(
self, prefect_client, events_pipeline
):
ID = None
@task
def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise SystemExit
with pytest.raises(SystemExit):
run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
crashed = [state for state in states if state.type == StateType.CRASHED][0]
assert run.end_time
assert run.end_time == crashed.timestamp
assert run.total_run_time == crashed.timestamp - running.timestamp
async def test_async_task_sets_end_time_on_crashed(
self, prefect_client, events_pipeline
):
ID = None
@task
async def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise SystemExit
with pytest.raises(SystemExit):
await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
states = await prefect_client.read_task_run_states(ID)
running = [state for state in states if state.type == StateType.RUNNING][0]
crashed = [state for state in states if state.type == StateType.CRASHED][0]
assert run.end_time
assert run.end_time == crashed.timestamp
assert run.total_run_time == crashed.timestamp - running.timestamp
async def test_sync_task_does_not_set_end_time_on_crash_pre_runnning(
self, monkeypatch, prefect_client, events_pipeline
):
monkeypatch.setattr(
SyncTaskRunEngine, "begin_run", MagicMock(side_effect=SystemExit)
)
@task
def my_task():
pass
with pytest.raises(SystemExit):
my_task()
await events_pipeline.process_events()
task_runs = await prefect_client.read_task_runs()
assert len(task_runs) == 1
run = task_runs[0]
assert run.end_time is None
async def test_async_task_does_not_set_end_time_on_crash_pre_running(
self, monkeypatch, prefect_client, events_pipeline
):
monkeypatch.setattr(
AsyncTaskRunEngine, "begin_run", MagicMock(side_effect=SystemExit)
)
@task
async def my_task():
pass
with pytest.raises(SystemExit):
await my_task()
await events_pipeline.process_events()
task_runs = await prefect_client.read_task_runs()
assert len(task_runs) == 1
run = task_runs[0]
assert run.end_time is None
async def test_sync_task_sets_expected_start_time_on_pending(
self, prefect_client, events_pipeline
):
@task
def foo():
return TaskRunContext.get().task_run.id
task_run_id = run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
pending = [state for state in states if state.type == StateType.PENDING][0]
assert run.expected_start_time
assert run.expected_start_time == pending.timestamp
async def test_async_task_sets_expected_start_time_on_pending(
self, prefect_client, events_pipeline
):
@task
async def foo():
return TaskRunContext.get().task_run.id
task_run_id = await run_task_async(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
states = await prefect_client.read_task_run_states(task_run_id)
pending = [state for state in states if state.type == StateType.PENDING][0]
assert run.expected_start_time
assert run.expected_start_time == pending.timestamp
async def test_sync_tasks_have_correct_total_run_time_with_retries(
self, prefect_client: PrefectClient, events_pipeline
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/18751
"""
attempts = 0
@task(retries=1)
async def foo():
time.sleep(1)
nonlocal attempts
if attempts < 1:
attempts += 1
raise RuntimeError()
return TaskRunContext.get().task_run.id
task_run_id = await foo()
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
assert run.total_run_time > timedelta(seconds=2)
async def test_async_tasks_have_correct_total_run_time_with_retries(
self, prefect_client: PrefectClient, events_pipeline
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/18751
"""
attempts = 0
@task(retries=1)
def foo():
time.sleep(1)
nonlocal attempts
if attempts < 1:
attempts += 1
raise RuntimeError()
return TaskRunContext.get().task_run.id
task_run_id = foo()
await events_pipeline.process_events()
run = await prefect_client.read_task_run(task_run_id)
assert run.total_run_time > timedelta(seconds=2)
| TestTaskTimeTracking |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 5623,
"end": 6203
} | class ____(nn.Module):
def __init__(self, embed_dim, kernel_size, padding, stride, projection_method="dw_bn"):
super().__init__()
if projection_method == "dw_bn":
self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride)
self.linear_projection = CvtSelfAttentionLinearProjection()
def forward(self, hidden_state):
hidden_state = self.convolution_projection(hidden_state)
hidden_state = self.linear_projection(hidden_state)
return hidden_state
| CvtSelfAttentionProjection |
python | dabeaz-course__practical-python | Solutions/7_11/portfolio.py | {
"start": 47,
"end": 1208
} | class ____:
def __init__(self):
self._holdings = []
@classmethod
def from_csv(cls, lines, **opts):
self = cls()
portdicts = fileparse.parse_csv(lines,
select=['name','shares','price'],
types=[str,int,float],
**opts)
for d in portdicts:
self.append(stock.Stock(**d))
return self
def append(self, holding):
self._holdings.append(holding)
def __iter__(self):
return self._holdings.__iter__()
def __len__(self):
return len(self._holdings)
def __getitem__(self, index):
return self._holdings[index]
def __contains__(self, name):
return any(s.name == name for s in self._holdings)
@property
def total_cost(self):
return sum(s.shares * s.price for s in self._holdings)
def tabulate_shares(self):
from collections import Counter
total_shares = Counter()
for s in self._holdings:
total_shares[s.name] += s.shares
return total_shares
| Portfolio |
python | kamyu104__LeetCode-Solutions | Python/minimum-size-subarray-in-infinite-array.py | {
"start": 801,
"end": 1464
} | class ____(object):
def minSizeSubarray(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
INF = float("inf")
q, target = divmod(target, sum(nums))
if not target:
return q*len(nums)
result = INF
lookup = {0:-1}
prefix = 0
for right in xrange((len(nums)-1)+(len(nums)-1)):
prefix += nums[right%len(nums)]
if prefix-target in lookup:
result = min(result, right-lookup[prefix-target])
lookup[prefix] = right
return result+q*len(nums) if result != INF else -1
| Solution2 |
python | doocs__leetcode | solution/2700-2799/2730.Find the Longest Semi-Repetitive Substring/Solution2.py | {
"start": 0,
"end": 290
} | class ____:
def longestSemiRepetitiveSubstring(self, s: str) -> int:
n = len(s)
cnt = l = 0
for i in range(1, n):
cnt += s[i] == s[i - 1]
if cnt > 1:
cnt -= s[l] == s[l + 1]
l += 1
return n - l
| Solution |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 389422,
"end": 399565
} | class ____:
""" Tests __array_finalize__ """
def test_receives_base(self):
# gh-11237
class SavesBase(np.ndarray):
def __array_finalize__(self, obj):
self.saved_base = self.base
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
def test_bad_finalize1(self):
class BadAttributeArray(np.ndarray):
@property
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="not callable"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize2(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="takes 1 positional"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize3(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self, obj):
raise RuntimeError("boohoo!")
with pytest.raises(RuntimeError, match="boohoo!"):
np.arange(10).view(BadAttributeArray)
@pytest.mark.thread_unsafe(reason="calls gc.collect()")
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
def __array_finalize__(self, obj):
# crash, but keep this object alive
raise Exception(self)
# a plain object can't be weakref'd
class Dummy:
pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
obj_ref = weakref.ref(obj_arr[()])
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
obj_subarray = e.exception.args[0]
del e
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
break_cycles()
assert_(obj_ref() is None, "no references should remain")
def test_can_use_super(self):
class SuperFinalize(np.ndarray):
def __array_finalize__(self, obj):
self.saved_result = super().__array_finalize__(obj)
a = np.array(1).view(SuperFinalize)
assert_(a.saved_result is None)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order='\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
@pytest.mark.parametrize("op", [operator.eq, operator.ne])
@pytest.mark.parametrize(["dt1", "dt2"], [
([("f", "i")], [("f", "i")]), # structured comparison (successful)
("M8", "d"), # impossible comparison: result is all True or False
("d", "d"), # valid comparison
])
def test_equal_subclass_no_override(op, dt1, dt2):
# Test how the three different possible code-paths deal with subclasses
class MyArr(np.ndarray):
called_wrap = 0
def __array_wrap__(self, new, context=None, return_scalar=False):
type(self).called_wrap += 1
return super().__array_wrap__(new, context, return_scalar)
numpy_arr = np.zeros(5, dtype=dt1)
my_arr = np.zeros(5, dtype=dt2).view(MyArr)
assert type(op(numpy_arr, my_arr)) is MyArr
assert type(op(my_arr, numpy_arr)) is MyArr
# We expect 2 calls (more if there were more fields):
assert MyArr.called_wrap == 2
@pytest.mark.parametrize(["dt1", "dt2"], [
("M8[ns]", "d"),
("M8[s]", "l"),
("m8[ns]", "d"),
# Missing: ("m8[ns]", "l") as timedelta currently promotes ints
("M8[s]", "m8[s]"),
("S5", "U5"),
# Structured/void dtypes have explicit paths not tested here.
])
def test_no_loop_gives_all_true_or_false(dt1, dt2):
# Make sure they broadcast to test result shape, use random values, since
# the actual value should be ignored
arr1 = np.random.randint(5, size=100).astype(dt1)
arr2 = np.random.randint(5, size=99)[:, np.newaxis].astype(dt2)
res = arr1 == arr2
assert res.shape == (99, 100)
assert res.dtype == bool
assert not res.any()
res = arr1 != arr2
assert res.shape == (99, 100)
assert res.dtype == bool
assert res.all()
# incompatible shapes raise though
arr2 = np.random.randint(5, size=99).astype(dt2)
with pytest.raises(ValueError):
arr1 == arr2
with pytest.raises(ValueError):
arr1 != arr2
# Basic test with another operation:
with pytest.raises(np._core._exceptions._UFuncNoLoopError):
arr1 > arr2
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.le, operator.lt, operator.ge,
operator.gt])
def test_comparisons_forwards_error(op):
class NotArray:
def __array__(self, dtype=None, copy=None):
raise TypeError("run you fools")
with pytest.raises(TypeError, match="run you fools"):
op(np.arange(2), NotArray())
with pytest.raises(TypeError, match="run you fools"):
op(NotArray(), np.arange(2))
def test_richcompare_scalar_boolean_singleton_return():
# These are currently guaranteed to be the boolean numpy singletons
assert (np.array(0) == "a") is np.bool_(False)
assert (np.array(0) != "a") is np.bool_(True)
assert (np.int16(0) == "a") is np.bool_(False)
assert (np.int16(0) != "a") is np.bool_(True)
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.le, operator.lt, operator.ge,
operator.gt])
def test_ragged_comparison_fails(op):
# This needs to convert the internal array to True/False, which fails:
a = np.array([1, np.array([1, 2, 3])], dtype=object)
b = np.array([1, np.array([1, 2, 3])], dtype=object)
with pytest.raises(ValueError, match="The truth value.*ambiguous"):
op(a, b)
@pytest.mark.parametrize(
["fun", "npfun"],
[
(_multiarray_tests.npy_cabs, np.absolute),
(_multiarray_tests.npy_carg, np.angle)
]
)
@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
def test_npymath_complex(fun, npfun, x, y, test_dtype):
# Smoketest npymath functions
z = test_dtype(complex(x, y))
with np.errstate(invalid='ignore'):
# Fallback implementations may emit a warning for +-inf (see gh-24876):
# RuntimeWarning: invalid value encountered in absolute
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy._core._multiarray_tests import (
npy_cosh,
npy_log10,
npy_sinh,
npy_tan,
npy_tanh,
)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
# alignment code needs to satisfy these requirements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
#
# Complex types are the main problem, whose alignment may not be the same
# as their "uint alignment".
#
# This test might only fail on certain platforms, where uint64 alignment is
# not equal to complex64 alignment. The second 2 tests will only fail
# for DEBUG=1.
d1 = np.dtype('u1,c8', align=True)
d2 = np.dtype('u4,c8', align=True)
d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
# check that C struct matches numpy struct size
s = _multiarray_tests.get_struct_alignments()
for d, (alignment, size) in zip([d1, d2, d3], s):
assert_equal(d.alignment, alignment)
assert_equal(d.itemsize, size)
# check that ufuncs don't complain in debug mode
# (this is probably OK if the aligned flag is true above)
src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often
np.exp(src) # assert fails?
# check that copy code doesn't complain in debug mode
dst = np.zeros((2, 2), dtype='c8')
dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails?
| TestArrayFinalize |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 5226,
"end": 6323
} | class ____(Operation):
def __init__(self, threshold=0.5, *, name=None):
super().__init__(name=name)
self.threshold = threshold
def call(self, x):
return backend.nn.soft_shrink(x, self.threshold)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.soft_shrink", "keras.ops.nn.soft_shrink"])
def soft_shrink(x, threshold=0.5):
"""Soft Shrink activation function.
It is defined as
`f(x) = x - threshold` if `x > threshold`,
`f(x) = x + threshold` if `x < -threshold`,
`f(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0])
>>> x_soft_shrink = keras.ops.soft_shrink(x)
>>> print(x_soft_shrink)
array([-0.5 0. 0.5], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return SoftShrink(threshold).symbolic_call(x)
return backend.nn.soft_shrink(x, threshold)
| SoftShrink |
python | pappasam__jedi-language-server | jedi_language_server/initialization_options.py | {
"start": 591,
"end": 730
} | class ____:
name_extract_variable: str = "jls_extract_var"
name_extract_function: str = "jls_extract_def"
@light_dataclass
| CodeAction |
python | pytorch__pytorch | test/profiler/test_profiler_tree.py | {
"start": 2763,
"end": 8565
} | class ____:
@staticmethod
def test(f):
"""Mark unit test that will be using ProfilerTree to test traces.
This decorator serves two purposes. First, it provides a method name
that `format` can use to tell where the test runner (which is
environment specific) ends and the unit test begins. Second, it runs
the test with replicates and allows `assertTreesMatch` to adjust
based on which replicate is running.
"""
@functools.wraps(f)
def begin_unit_test_marker(self, replicates=3):
try:
for i in range(replicates):
self.tree_replicate = i
out = f(self)
if self.tree_replicate is None:
break
return out
finally:
delattr(self, "tree_replicate")
return begin_unit_test_marker
@classmethod
def format(cls, profiler, indent: int = 0):
def flatten(nodes, depth=0, out=None):
if out is None:
out = []
for node in nodes:
cls.validate_node(node)
name = cls.fmt_name(node.name)
prune_level = PRUNE_FUNCTIONS.get(name.strip(), None)
if prune_level is None:
out.append((depth, name))
flatten(node.children, depth + 1, out)
elif prune_level == IGNORE:
flatten(node.children, depth, out)
elif prune_level == KEEP_NAME_AND_ELLIPSES:
out.append((depth, name))
if node.children:
out.append((depth + 1, "..."))
elif prune_level == KEEP_ELLIPSES:
out.append((depth, "..."))
else:
assert prune_level == PRUNE_ALL
return out
flat_nodes = flatten(profiler.kineto_results.experimental_event_tree())
# Profiler inserts a `cudaDeviceSynchronize` at the end of profiling.
# and may also insert 'Context Sync' CUDA synchronization event.
if flat_nodes and flat_nodes[-2][1] == "cudaDeviceSynchronize":
flat_nodes = flat_nodes[:-2]
if flat_nodes and flat_nodes[-1][1] == "cudaDeviceSynchronize":
flat_nodes = flat_nodes[:-1]
# Profiler inserts a `hipDeviceSynchronize` at the end of profiling.
if flat_nodes and flat_nodes[-1][1] == "hipDeviceSynchronize":
flat_nodes = flat_nodes[:-1]
min_depth = min(
[d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0]
)
return textwrap.indent(
"\n".join(
[
f"{' ' * (d - min_depth)}{name.rstrip()}"
for d, name in flat_nodes
if d >= min_depth
]
),
" " * indent,
)
@staticmethod
def fmt_name(name: str) -> str:
match = re.match(r"^(.*)\.py\(([0-9]+)\): (.*)$", name)
if match:
filename, _, fn = match.groups()
# This test can appear as `test/profiler/test_profiler_tree.py`
# depending on where it is run from.
test_file = os.path.splitext(os.path.split(__file__)[1])[0]
if filename.endswith(test_file):
filename = test_file
# We test against a string literal, so all paths have to look like POSIX paths.
filename = filename.replace(os.sep, "/")
# We don't want to have to update this test every time PyTorch changes.
# At some point we should test some line numbers, but for now it's
# too brittle.
lineno = "..."
return f"{filename}.py({lineno}): {fn}"
for kernel_pattern in (
"void at::native::elementwise_kernel",
"void at::native::reduce_kernel",
"void at::native::vectorized_elementwise_kernel",
"void at::native::unrolled_elementwise_kernel",
r"void [a-zA-Z0-9]+_kernel", # Nvidia kernels.
):
name = re.sub(
rf"{kernel_pattern}<.+>\(.+\)$",
f"{kernel_pattern.replace('[a-zA-Z0-9]+', '...')}<...>(...)",
name,
)
# HACK: this patches around the fact that PyBind11 improperly sets the
# __qualname__ attribute on functions and methods; see
# https://github.com/pybind/pybind11/issues/5774. This should be removed if
# that issue is fixed.
name = re.sub(
r"pybind11_builtins\.pybind11_detail_function_record_v[^ .]+",
"PyCapsule",
name,
)
return re.sub("object at 0x[0-9a-fA-F]+>", "object at 0xXXXXXXXXXXXX>", name)
@classmethod
def validate_node(cls, node):
extra_fields = node.extra_fields
if isinstance(extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)):
# Check that the lineage established by the profiler matches the
# caller recorded by the Python tracer.
parent = node.parent
while parent is not None:
if isinstance(parent.extra_fields, _ExtraFields_PyCall):
break
parent = parent.parent
def to_string(frame_state):
return f"{frame_state.file_name}(...): {frame_state.function_name}"
if parent:
parent_name = to_string(parent.extra_fields.callsite)
caller_name = to_string(extra_fields.caller)
assert parent_name == caller_name, f"{parent_name} vs. {caller_name}"
@unittest.skipIf(IS_ARM64, "Not working on ARM")
| ProfilerTree |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/AllowedHosts.py | {
"start": 186,
"end": 567
} | class ____(BaseModel):
class Config:
extra = Extra.allow
hosts: Optional[List[str]] = Field(
None,
description="An array of hosts that this connector can connect to. AllowedHosts not being present for the source or destination means that access to all hosts is allowed. An empty list here means that no network access is granted.",
)
| AllowedHosts |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/adls2/io_manager.py | {
"start": 8626,
"end": 11707
} | class ____(ADLS2PickleIOManager):
"""Renamed to ADLS2PickleIOManager. See ADLS2PickleIOManager for documentation."""
pass
@dagster_maintained_io_manager
@io_manager(
config_schema=ADLS2PickleIOManager.to_config_schema(),
required_resource_keys={"adls2"},
)
def adls2_pickle_io_manager(init_context: InitResourceContext) -> PickledObjectADLS2IOManager:
"""Persistent IO manager using Azure Data Lake Storage Gen2 for storage.
Serializes objects via pickling. Suitable for objects storage for distributed executors, so long
as each execution node has network connectivity and credentials for ADLS and the backing
container.
Assigns each op output to a unique filepath containing run ID, step key, and output name.
Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key
has multiple components, the final component is used as the name of the file, and the preceding
components as parent directories under the base_dir.
Subsequent materializations of an asset will overwrite previous materializations of that asset.
With a base directory of "/my/base/path", an asset with key
`AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory
with path "/my/base/path/one/two/".
Example usage:
Attach this IO manager to a set of assets.
.. code-block:: python
from dagster import Definitions, asset
from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource
@asset
def asset1():
# create df ...
return df
@asset
def asset2(asset1):
return df[:5]
Definitions(
assets=[asset1, asset2],
resources={
"io_manager": adls2_pickle_io_manager.configured(
{"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}
),
"adls2": adls2_resource,
},
)
Attach this IO manager to your job to make it available to your ops.
.. code-block:: python
from dagster import job
from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource
@job(
resource_defs={
"io_manager": adls2_pickle_io_manager.configured(
{"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}
),
"adls2": adls2_resource,
},
)
def my_job():
...
"""
adls_resource = init_context.resources.adls2
adls2_client = adls_resource.adls2_client
blob_client = adls_resource.blob_client
lease_client = adls_resource.lease_client_constructor
return PickledObjectADLS2IOManager(
init_context.resource_config["adls2_file_system"],
adls2_client,
blob_client,
lease_client,
init_context.resource_config.get("adls2_prefix"),
init_context.resource_config.get("lease_duration"),
)
| ConfigurablePickledObjectADLS2IOManager |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/streams.py | {
"start": 15200,
"end": 21197
} | class ____(DateSlicesMixin, IncrementalMixpanelStream):
"""Export event data as it is received and stored within Mixpanel, complete with all event properties
(including distinct_id) and the exact timestamp the event was fired.
API Docs: https://developer.mixpanel.com/reference/export
Endpoint: https://data.mixpanel.com/api/2.0/export
Raw Export API Rate Limit (https://help.mixpanel.com/hc/en-us/articles/115004602563-Rate-Limits-for-API-Endpoints):
A maximum of 100 concurrent queries,
3 queries per second and 60 queries per hour.
"""
primary_key: str = None
cursor_field: str = "time"
transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization)
@property
def url_base(self):
prefix = "-eu" if self.region == "EU" else ""
return f"https://data{prefix}.mixpanel.com/api/2.0/"
def path(self, **kwargs) -> str:
return "export"
def get_error_handler(self) -> Optional[ErrorHandler]:
return ExportErrorHandler(logger=self.logger, stream=self)
def iter_dicts(self, lines):
"""
The incoming stream has to be JSON lines format.
From time to time for some reason, the one record can be split into multiple lines.
We try to combine such split parts into one record only if parts go nearby.
"""
parts = []
for record_line in lines:
if record_line == "terminated early":
self.logger.warning(f"Couldn't fetch data from Export API. Response: {record_line}")
return
try:
yield json.loads(record_line)
except ValueError:
parts.append(record_line)
else:
parts = []
if len(parts) > 1:
try:
yield json.loads("".join(parts))
except ValueError:
pass
else:
parts = []
def process_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""Export API return response in JSONL format but each line is a valid JSON object
Raw item example:
{
"event": "Viewed E-commerce Page",
"properties": {
"time": 1623860880,
"distinct_id": "1d694fd9-31a5-4b99-9eef-ae63112063ed",
"$browser": "Chrome", -> will be renamed to "browser"
"$browser_version": "91.0.4472.101",
"$current_url": "https://unblockdata.com/solutions/e-commerce/",
"$insert_id": "c5eed127-c747-59c8-a5ed-d766f48e39a4",
"$mp_api_endpoint": "api.mixpanel.com",
"mp_lib": "Segment: analytics-wordpress",
"mp_processing_time_ms": 1623886083321,
"noninteraction": true
}
}
"""
# We prefer response.iter_lines() to response.text.split_lines() as the later can missparse text properties embeding linebreaks
for record in self.iter_dicts(response.iter_lines(decode_unicode=True)):
# transform record into flat dict structure
item = {"event": record["event"]}
properties = record["properties"]
for result in transform_property_names(properties.keys()):
# Convert all values to string (this is default property type)
# because API does not provide properties type information
item[result.transformed_name] = str(properties[result.source_name])
# convert timestamp to datetime string
item["time"] = pendulum.from_timestamp(int(item["time"]), tz="UTC").to_iso8601_string()
yield item
@cache
def get_json_schema(self) -> Mapping[str, Any]:
"""
:return: A dict of the JSON schema representing this stream.
The default implementation of this method looks for a JSONSchema file with the same name as this stream's "name" property.
Override as needed.
"""
schema = super().get_json_schema()
# Set whether to allow additional properties for engage and export endpoints
# Event and Engage properties are dynamic and depend on the properties provided on upload,
# when the Event or Engage (user/person) was created.
schema["additionalProperties"] = self.additional_properties
# read existing Export schema from API
schema_properties = ExportSchema(**self.get_stream_params()).read_records(sync_mode=SyncMode.full_refresh)
for result in transform_property_names(schema_properties):
# Schema does not provide exact property type
# string ONLY for event properties (no other datatypes)
# Reference: https://help.mixpanel.com/hc/en-us/articles/360001355266-Event-Properties#field-size-character-limits-for-event-properties
schema["properties"][result.transformed_name] = {"type": ["null", "string"]}
return schema
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
# additional filter by timestamp because required start date and end date only allow to filter by date
cursor_param = stream_slice.get(self.cursor_field)
if cursor_param:
timestamp = int(pendulum.parse(cursor_param).timestamp())
params["where"] = f'properties["$time"]>=datetime({timestamp})'
return params
def request_kwargs(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Mapping[str, Any]:
return {"stream": True}
| Export |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Numpy.py | {
"start": 3957,
"end": 9359
} | class ____: # [multiple-constructor-doc, missing-param-doc, missing-type-doc]
"""test_constr_params_in_class_and_init_numpy
Example of a class with missing constructor parameter documentation
in both the init docstring and the class docstring
(Numpy style)
Everything is completely analogous to functions.
Parameters
----------
y:
bla
missing constructor parameter documentation
"""
def __init__(self, x, y): # [missing-param-doc, missing-type-doc]
"""docstring foo
Parameters
----------
y:
bla
missing constructor parameter documentation
"""
def test_warns_missing_args_numpy(named_arg, *args): # [missing-param-doc]
"""The docstring
Args
----
named_arg : object
Returned
Returns
-------
object or None
Maybe named_arg
"""
if args:
return named_arg
def test_warns_missing_kwargs_numpy(named_arg, **kwargs): # [missing-param-doc]
"""The docstring
Args
----
named_arg : object
Returned
Returns
-------
object or None
Maybe named_arg
"""
if kwargs:
return named_arg
def test_finds_args_without_type_numpy( # [missing-type-doc]
named_arg, typed_arg: bool, untyped_arg, *args
):
"""The docstring
Args
----
named_arg : object
Returned
typed_arg
Other argument without numpy type annotation
untyped_arg
Other argument without any type annotation
*args :
Optional Arguments
Returns
-------
object or None
Maybe named_arg
"""
if args:
return named_arg
def test_finds_args_with_xref_type_numpy(named_arg, *args):
"""The docstring
Args
----
named_arg : `example.value`
Returned
*args :
Optional Arguments
Returns
-------
`example.value`
Maybe named_arg
"""
if args:
return named_arg
def test_finds_kwargs_without_type_numpy(named_arg, **kwargs):
"""The docstring
Args
----
named_arg : object
Returned
**kwargs :
Keyword arguments
Returns
-------
object or None
Maybe named_arg
"""
if kwargs:
return named_arg
def test_finds_kwargs_with_type_numpy(named_arg, **kwargs: dict[str, str]):
"""The docstring
Args
----
named_arg : object
Returned
**kwargs :
Keyword arguments
Returns
-------
object or None
Maybe named_arg
"""
if kwargs:
return named_arg
def test_finds_kwargs_without_asterisk_numpy(named_arg, **kwargs):
"""The docstring
Args
----
named_arg : object
Returned
kwargs :
Keyword arguments
Returns
-------
object or None
Maybe named_arg
"""
if kwargs:
return named_arg
def my_func(
named_arg_one,
named_arg_two,
named_arg_three,
named_arg_four,
named_arg_five,
named_arg_six,
named_arg_seven,
named_arg_eight,
): # pylint: disable=too-many-positional-arguments
"""The docstring
Args
----
named_arg_one : dict(str,str)
Returned
named_arg_two : dict[str,str]
Returned
named_arg_three : tuple(int)
Returned
named_arg_four : list[tokenize.TokenInfo]
Returned
named_arg_five : int or str
Returned
named_arg_six : tuple(int or str)
Returned
named_arg_seven : tuple(int) or list(int)
Returned
named_arg_eight : tuple(int or str) or list(int or str)
Returned
Returns
-------
dict(str,str)
named_arg_one
dict[str,str]
named_arg_two
tuple(int)
named_arg_three
list[tokenize.TokenInfo]
named_arg_four
int or str
named_arg_five
tuple(int or str)
named_arg_six
tuple(int) or list(int)
named_arg_seven
tuple(int or str) or list(int or str)
named_arg_eight
"""
return (
named_arg_one,
named_arg_two,
named_arg_three,
named_arg_four,
named_arg_five,
named_arg_six,
named_arg_seven,
named_arg_eight,
)
def test_ignores_optional_specifier_numpy(param, param2="all"):
"""Do something.
Parameters
----------
param : str
Description.
param2 : str, optional
Description (the default is 'all').
Returns
-------
int
Description.
"""
return param, param2
def test_with_list_of_default_values(arg, option, option2):
"""Reported in https://github.com/pylint-dev/pylint/issues/4035.
Parameters
----------
arg : int
The number of times to print it.
option : {"y", "n"}
Do I do it?
option2 : {"y", None, "n"}
Do I do it?
"""
return arg, option, option2
def test_with_descriptions_instead_of_typing(arg, axis, option):
"""We choose to accept description in place of typing as well.
See: https://github.com/pylint-dev/pylint/pull/7398.
Parameters
----------
arg : a number type.
axis : int or None
option : {"y", "n"}
Do I do it?
"""
return arg, option
| ClassFoo |
python | pytorch__pytorch | test/dynamo/test_compile.py | {
"start": 236,
"end": 480
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.linear(x))
| ToyModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 7725,
"end": 15350
} | class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "account_impression_performance_report_hourly"
report_file = "account_impression_performance_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "account_impression_performance_report_hourly_incremental"
report_file_with_records_further_start_date = "account_impression_performance_report_hourly_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = (
"account_impression_performance_report_hourly_incremental_with_records_further_cursor"
)
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AccountPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AccountPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "TopImpressionRatePercent", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AccountPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AccountPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "TopImpressionRatePercent", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AccountPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AccountPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "TopImpressionRatePercent", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestAccountImpressionPerformanceReportHourlyStream |
python | mlflow__mlflow | tests/pytorch/iris_data_module.py | {
"start": 1040,
"end": 1344
} | class ____(IrisDataModuleBase):
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=4)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=4)
def test_dataloader(self):
return DataLoader(self.test_set, batch_size=4)
| IrisDataModule |
python | sympy__sympy | sympy/polys/puiseux.py | {
"start": 1533,
"end": 2351
} | class ____(Protocol[K, V]):
"""A dict mapping from keys to values."""
def items(self) -> Iterable[tuple[K, V]]: ...
def __iter__(self) -> Iterator[K]: ...
MonI = tuple[int, ...]
MonQ = tuple[MPQ, ...]
def puiseux_ring(
symbols: str | list[Expr],
domain: Domain[Er],
/,
) -> tuple[PuiseuxRing[Er], Unpack[tuple[PuiseuxPoly[Er], ...]]]:
"""Construct a Puiseux ring.
This function constructs a Puiseux ring with the given symbols and domain.
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x, y = puiseux_ring('x y', QQ)
>>> R
PuiseuxRing((x, y), QQ)
>>> p = 5*x**QQ(1,2) + 7/y
>>> p
7*y**(-1) + 5*x**(1/2)
"""
ring = PuiseuxRing(symbols, domain)
return (ring,) + ring.gens # type: ignore
| Map |
python | django__django | django/contrib/gis/geos/prototypes/io.py | {
"start": 7580,
"end": 10184
} | class ____(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
geos_version = geos_version_tuple()
def __init__(self, dim=2):
super().__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float("NaN"), float("NaN"), srid=geom.srid)
else:
raise ValueError("Empty point is not representable in WKB.")
return geom
def write(self, geom):
"Return the WKB representation of the given geometry."
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
return memoryview(wkb)
def write_hex(self, geom):
"Return the HEXEWKB representation of the given geometry."
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError(
"Byte order parameter must be 0 (Big Endian) or 1 (Little Endian)."
)
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError("WKB output dimension must be 2 or 3")
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(wkb_writer_get_include_srid(self.ptr))
@srid.setter
def srid(self, include):
wkb_writer_set_include_srid(self.ptr, bool(include))
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
| WKBWriter |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 7206,
"end": 8636
} | class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [
HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
| HubertFeatureEncoder |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_reaching.py | {
"start": 120,
"end": 3306
} | class ____:
"""Unit tests for the global reaching centrality function."""
def test_non_positive_weights(self):
with pytest.raises(nx.NetworkXError):
G = nx.DiGraph()
nx.global_reaching_centrality(G, weight="weight")
def test_negatively_weighted(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph()
G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
nx.global_reaching_centrality(G, weight="weight")
def test_directed_star(self):
G = nx.DiGraph()
G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)])
grc = nx.global_reaching_centrality
assert grc(G, normalized=False, weight="weight") == 0.5
assert grc(G) == 1
def test_undirected_unweighted_star(self):
G = nx.star_graph(2)
grc = nx.global_reaching_centrality
assert grc(G, normalized=False, weight=None) == 0.25
def test_undirected_weighted_star(self):
G = nx.Graph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
grc = nx.global_reaching_centrality
assert grc(G, normalized=False, weight="weight") == 0.375
def test_cycle_directed_unweighted(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 1)
assert nx.global_reaching_centrality(G, weight=None) == 0
def test_cycle_undirected_unweighted(self):
G = nx.Graph()
G.add_edge(1, 2)
assert nx.global_reaching_centrality(G, weight=None) == 0
def test_cycle_directed_weighted(self):
G = nx.DiGraph()
G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)])
assert nx.global_reaching_centrality(G) == 0
def test_cycle_undirected_weighted(self):
G = nx.Graph()
G.add_edge(1, 2, weight=1)
grc = nx.global_reaching_centrality
assert grc(G, normalized=False) == 0
def test_directed_weighted(self):
G = nx.DiGraph()
G.add_edge("A", "B", weight=5)
G.add_edge("B", "C", weight=1)
G.add_edge("B", "D", weight=0.25)
G.add_edge("D", "E", weight=1)
denom = len(G) - 1
A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom
B_local = sum([1, 0.25, 0.625]) / denom
C_local = 0
D_local = sum([1]) / denom
E_local = 0
local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local]
max_local = max(local_reach_ctrs)
expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom
grc = nx.global_reaching_centrality
actual = grc(G, normalized=False, weight="weight")
assert expected == pytest.approx(actual, abs=1e-7)
def test_single_node_with_cycle(self):
G = nx.DiGraph([(1, 1)])
with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
nx.global_reaching_centrality(G)
def test_single_node_with_weighted_cycle(self):
G = nx.DiGraph()
G.add_weighted_edges_from([(1, 1, 2)])
with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
nx.global_reaching_centrality(G, weight="weight")
| TestGlobalReachingCentrality |
python | tensorflow__tensorflow | tensorflow/python/distribute/strategy_test_lib.py | {
"start": 29562,
"end": 32889
} | class ____(DistributionTestBase):
"""Tests for a Remote single worker."""
def _get_num_gpus(self):
pass
def _testNumReplicasInSync(self, distribution):
self.assertEqual(self._get_num_gpus(), distribution.num_replicas_in_sync)
def _testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def _testDeviceScope(self, distribution):
with distribution.scope():
a = array_ops.identity(1.)
with ops.device("/cpu:0"):
b = array_ops.identity(1.)
if context.executing_eagerly():
device = "/job:worker/replica:0/task:0/device:CPU:0"
else:
device = "/job:worker/replica:0/task:0"
self.assertEqual(a.device, device)
self.assertEqual(b.device, "/job:worker/replica:0/task:0/device:CPU:0")
def _testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return
num_workers = 1
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
# Dummy cached_session is used in Eager
with self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def _testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return
num_workers = 1
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
# Dummy cached_session is used in Eager
with self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False, ignore_order=True)
def _all_sum(value):
ctx = distribute_lib.get_replica_context()
return ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
def _all_mean(value):
ctx = distribute_lib.get_replica_context()
return ctx.all_reduce(reduce_util.ReduceOp.MEAN, value)
| RemoteSingleWorkerMirroredStrategyBase |
python | django__django | tests/nested_foreign_keys/tests.py | {
"start": 997,
"end": 7043
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.director = Person.objects.create(name="Terry Gilliam / Terry Jones")
cls.movie = Movie.objects.create(
title="Monty Python and the Holy Grail", director=cls.director
)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related("screening")), 2)
# This failed.
self.assertEqual(len(Event.objects.select_related("screening__movie")), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values("screening__pk")), 2)
self.assertEqual(len(Event.objects.values("screening__movie__pk")), 2)
self.assertEqual(len(Event.objects.values("screening__movie__title")), 2)
# This failed.
self.assertEqual(
len(
Event.objects.values("screening__movie__pk", "screening__movie__title")
),
2,
)
# Simple filter/exclude queries for good measure.
self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_inheritance_null_FK(self):
Event.objects.create()
ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 3)
self.assertEqual(len(Event.objects.select_related("screeningnullfk")), 3)
self.assertEqual(len(Event.objects.select_related("screeningnullfk__movie")), 3)
self.assertEqual(len(Event.objects.values()), 3)
self.assertEqual(len(Event.objects.values("screeningnullfk__pk")), 3)
self.assertEqual(len(Event.objects.values("screeningnullfk__movie__pk")), 3)
self.assertEqual(len(Event.objects.values("screeningnullfk__movie__title")), 3)
self.assertEqual(
len(
Event.objects.values(
"screeningnullfk__movie__pk", "screeningnullfk__movie__title"
)
),
3,
)
self.assertEqual(
Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1
)
self.assertEqual(
Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2
)
def test_null_exclude(self):
screening = ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(
list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)), [screening]
)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related("screening")), 2)
self.assertEqual(len(Package.objects.select_related("screening__movie")), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values("screening__pk")), 2)
self.assertEqual(len(Package.objects.values("screening__movie__pk")), 2)
self.assertEqual(len(Package.objects.values("screening__movie__title")), 2)
# This failed.
self.assertEqual(
len(
Package.objects.values(
"screening__movie__pk", "screening__movie__title"
)
),
2,
)
self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(
Package.objects.exclude(screening__movie=self.movie).count(), 1
)
# These all work because the second foreign key in the chain has null=True.
def test_explicit_ForeignKey_NullFK(self):
PackageNullFK.objects.create()
screening = ScreeningNullFK.objects.create(movie=None)
screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
PackageNullFK.objects.create(screening=screening)
PackageNullFK.objects.create(screening=screening_with_movie)
self.assertEqual(len(PackageNullFK.objects.all()), 3)
self.assertEqual(len(PackageNullFK.objects.select_related("screening")), 3)
self.assertEqual(
len(PackageNullFK.objects.select_related("screening__movie")), 3
)
self.assertEqual(len(PackageNullFK.objects.values()), 3)
self.assertEqual(len(PackageNullFK.objects.values("screening__pk")), 3)
self.assertEqual(len(PackageNullFK.objects.values("screening__movie__pk")), 3)
self.assertEqual(
len(PackageNullFK.objects.values("screening__movie__title")), 3
)
self.assertEqual(
len(
PackageNullFK.objects.values(
"screening__movie__pk", "screening__movie__title"
)
),
3,
)
self.assertEqual(
PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1
)
self.assertEqual(
PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2
)
# Some additional tests for #16715. The only difference is the depth of the
# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
# checks if promotion of join types works for deeper nesting too.
| NestedForeignKeysTests |
python | pytorch__pytorch | test/dynamo/test_guard_manager.py | {
"start": 34175,
"end": 43557
} | class ____(RecursiveDictTagTests):
def setUp(self):
self._prev = torch._dynamo.config.use_recursive_dict_tags_for_guards
torch._dynamo.config.use_recursive_dict_tags_for_guards = True
def tearDown(self):
torch._dynamo.config.use_recursive_dict_tags_for_guards = self._prev
def test_immutable_tag_safe(self):
class Bar:
pass
class Foo:
def __init__(self):
self.a = Bar()
self.b = torch.randn(4)
self.c = 3
self.d = (3, 4)
self.e = (3, Bar())
foo = Foo()
def fn(x):
if foo.a:
x = torch.sin(x)
x = x * foo.b + foo.c + foo.d[0] + foo.d[1] + foo.e[0]
if foo.e[1]:
x = torch.sin(x)
return x
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
from torch._dynamo.source import AttrSource, LocalSource
foo_source = LocalSource("foo")
foo_mgr = builder.get_guard_manager_from_source(foo_source)
for accessor in foo_mgr.get_accessors():
if isinstance(accessor, GetAttrGuardAccessor):
self.assertTrue(
accessor.get_attr_name() in ("a", "b", "c", "d", "e")
)
# Check types of foo.a
foo_a_source = AttrSource(foo_source, "a")
foo_a_mgr = builder.get_guard_manager_from_source(foo_a_source)
self.assertFalse(foo_a_mgr.is_tag_safe())
self.assertFalse(foo_a_mgr.is_tag_safe_root())
# Check types of foo.b
foo_b_source = AttrSource(foo_source, "b")
foo_b_mgr = builder.get_guard_manager_from_source(foo_b_source)
if torch._dynamo.config.skip_tensor_guards_with_matching_dict_tags:
self.assertTrue(foo_b_mgr.is_tag_safe())
else:
self.assertFalse(foo_b_mgr.is_tag_safe())
self.assertFalse(foo_b_mgr.is_tag_safe_root())
# Check types of foo.c
foo_c_source = AttrSource(foo_source, "c")
foo_c_mgr = builder.get_guard_manager_from_source(foo_c_source)
self.assertTrue(foo_c_mgr.is_tag_safe())
self.assertFalse(foo_c_mgr.is_tag_safe_root())
# Check types of foo.d
foo_d_source = AttrSource(foo_source, "d")
foo_d_mgr = builder.get_guard_manager_from_source(foo_d_source)
self.assertTrue(foo_d_mgr.is_tag_safe())
self.assertFalse(foo_d_mgr.is_tag_safe_root())
# Check types of foo.e
foo_e_source = AttrSource(foo_source, "e")
foo_e_mgr = builder.get_guard_manager_from_source(foo_e_source)
self.assertFalse(foo_e_mgr.is_tag_safe())
self.assertFalse(foo_e_mgr.is_tag_safe_root())
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(torch.randn(4, 4))
def test_dict_tag_safe(self):
class Foo:
def __init__(self):
self.a = 4
foo = Foo()
terminal_dict = {
"a": 1,
}
tag_safe_dict = {
"const": 1,
"tup": (2, 3),
"nested_dict": terminal_dict,
}
tag_unsafe_dict = {
"const": 1,
"foo": foo,
}
outer_dict = {
"safe": tag_safe_dict,
"unsafe": tag_unsafe_dict,
"terminal_dict": {"a": 1},
}
def fn(x):
x = x + outer_dict["safe"]["const"]
x = x + outer_dict["safe"]["tup"][0]
x = x + outer_dict["safe"]["tup"][1]
x = x + outer_dict["safe"]["nested_dict"]["a"]
x = x + outer_dict["unsafe"]["const"]
x = x + outer_dict["unsafe"]["foo"].a
if outer_dict["terminal_dict"]:
x = torch.sin(x)
return x
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
from torch._dynamo.source import DictGetItemSource, LocalSource
outer_source = LocalSource("outer_dict")
# Check tagness of outer dict
outer_mgr = builder.get_guard_manager_from_source(outer_source)
self.assertFalse(outer_mgr.is_tag_safe())
self.assertFalse(outer_mgr.is_tag_safe_root())
# Check tagness of outer["safe"]
outer_safe_source = DictGetItemSource(outer_source, "safe")
outer_safe_mgr = builder.get_guard_manager_from_source(outer_safe_source)
self.assertTrue(outer_safe_mgr.is_tag_safe())
self.assertFalse(outer_safe_mgr.is_tag_safe_root())
# Check tagness of outer["unsafe"]
outer_unsafe_source = DictGetItemSource(outer_source, "unsafe")
outer_unsafe_mgr = builder.get_guard_manager_from_source(
outer_unsafe_source
)
self.assertFalse(outer_unsafe_mgr.is_tag_safe())
self.assertFalse(outer_unsafe_mgr.is_tag_safe_root())
# Check tagness of outer["terminal_dict"]
outer_terminal_source = DictGetItemSource(outer_source, "terminal_dict")
outer_terminal_mgr = builder.get_guard_manager_from_source(
outer_terminal_source
)
self.assertTrue(outer_terminal_mgr.is_tag_safe())
self.assertFalse(outer_terminal_mgr.is_tag_safe_root())
# Check tagness of outer["safe"]["nested_dict"]
outer_safe_nested_source = DictGetItemSource(
outer_safe_source, "nested_dict"
)
outer_safe_nested_mgr = builder.get_guard_manager_from_source(
outer_safe_nested_source
)
self.assertTrue(outer_safe_nested_mgr.is_tag_safe())
# This should not be marked as a root
self.assertFalse(outer_safe_nested_mgr.is_tag_safe_root())
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(torch.randn(4, 4))
def test_nn_module_tag_safe(self):
class Foo(torch.nn.Module):
c = 2
def __init__(self):
super().__init__()
self.a = 4
def check(self, x):
return True
def forward(self, x):
inspect.signature(self.check).parameters.items()
return x + self.a + self.c
foo = Foo()
class Env(metaclass=abc.ABCMeta): # noqa: B024
pass
class Baz(torch.nn.Module, Env):
def __init__(self):
super().__init__()
self.foo = foo
def forward(self, x):
if "Foo" in str(type(self).__mro__):
x = torch.sin(x)
return self.foo(x)
baz = Baz()
def fn(x):
x = x + baz(x)
return x
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
from torch._dynamo.source import LocalSource
baz_source = LocalSource("baz")
# Check tagness of baz
baz_mgr = builder.get_guard_manager_from_source(baz_source)
self.assertTrue(baz_mgr.is_tag_safe())
self.assertTrue(baz_mgr.is_tag_safe_root())
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(torch.randn(4, 4))
def test_nn_module_tag_overridden_getattr_safe(self):
class Baz(torch.nn.Module, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
self.norm = 2
def __getattr__(self, key):
if key == "a":
return 5
return super().__getattr__(key)
def forward(self, x):
return x + self.a + self.norm
baz = Baz()
def fn(x):
x = x + baz(x)
return x
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
from torch._dynamo.source import LocalSource
baz_source = LocalSource("baz")
# Check tagness of baz
baz_mgr = builder.get_guard_manager_from_source(baz_source)
self.assertTrue(baz_mgr.is_tag_safe())
self.assertTrue(baz_mgr.is_tag_safe_root())
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(torch.randn(4, 4))
| TagSafetyChecks |
python | django-haystack__django-haystack | test_haystack/elasticsearch_tests/test_elasticsearch_backend.py | {
"start": 53666,
"end": 58482
} | class ____(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchAutocompleteMockModelSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sqs = SearchQuerySet("elasticsearch")
# Wipe it clean.
clear_elasticsearch_index()
# Reboot the schema.
self.sb = connections["elasticsearch"].get_backend()
self.sb.setup()
self.smmi.update(using="elasticsearch")
def tearDown(self):
# Restore.
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def test_build_schema(self):
self.sb = connections["elasticsearch"].get_backend()
content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())
self.assertEqual(
mapping,
{
"django_id": {
"index": "not_analyzed",
"type": "string",
"include_in_all": False,
},
"django_ct": {
"index": "not_analyzed",
"type": "string",
"include_in_all": False,
},
"name_auto": {"type": "string", "analyzer": "edgengram_analyzer"},
"text": {"type": "string", "analyzer": "snowball"},
"pub_date": {"type": "date"},
"name": {"type": "string", "analyzer": "snowball"},
"text_auto": {"type": "string", "analyzer": "edgengram_analyzer"},
},
)
def test_autocomplete(self):
autocomplete = self.sqs.autocomplete(text_auto="mod")
self.assertEqual(autocomplete.count(), 16)
self.assertEqual(
set([result.pk for result in autocomplete]),
set(
[
"1",
"12",
"6",
"14",
"7",
"4",
"23",
"17",
"13",
"18",
"20",
"22",
"19",
"15",
"10",
"2",
]
),
)
self.assertTrue("mod" in autocomplete[0].text.lower())
self.assertTrue("mod" in autocomplete[1].text.lower())
self.assertTrue("mod" in autocomplete[2].text.lower())
self.assertTrue("mod" in autocomplete[3].text.lower())
self.assertTrue("mod" in autocomplete[4].text.lower())
self.assertEqual(len([result.pk for result in autocomplete]), 16)
# Test multiple words.
autocomplete_2 = self.sqs.autocomplete(text_auto="your mod")
self.assertEqual(autocomplete_2.count(), 13)
self.assertEqual(
set([result.pk for result in autocomplete_2]),
set(
[
"1",
"6",
"2",
"14",
"12",
"13",
"10",
"19",
"4",
"20",
"23",
"22",
"15",
]
),
)
self.assertTrue("your" in autocomplete_2[0].text.lower())
self.assertTrue("mod" in autocomplete_2[0].text.lower())
self.assertTrue("your" in autocomplete_2[1].text.lower())
self.assertTrue("mod" in autocomplete_2[1].text.lower())
self.assertTrue("your" in autocomplete_2[2].text.lower())
self.assertEqual(len([result.pk for result in autocomplete_2]), 13)
# Test multiple fields.
autocomplete_3 = self.sqs.autocomplete(text_auto="Django", name_auto="dan")
self.assertEqual(autocomplete_3.count(), 4)
self.assertEqual(
set([result.pk for result in autocomplete_3]), set(["12", "1", "22", "14"])
)
self.assertEqual(len([result.pk for result in autocomplete_3]), 4)
# Test numbers in phrases
autocomplete_4 = self.sqs.autocomplete(text_auto="Jen 867")
self.assertEqual(autocomplete_4.count(), 1)
self.assertEqual(set([result.pk for result in autocomplete_4]), set(["20"]))
# Test numbers alone
autocomplete_4 = self.sqs.autocomplete(text_auto="867")
self.assertEqual(autocomplete_4.count(), 1)
self.assertEqual(set([result.pk for result in autocomplete_4]), set(["20"]))
| LiveElasticsearchAutocompleteTestCase |
python | pytorch__pytorch | .github/scripts/trymerge_explainer.py | {
"start": 647,
"end": 3259
} | class ____:
force: bool
labels: list[str]
pr_num: int
org: str
project: str
ignore_current: bool
has_trunk_label: bool
has_ciflow_label: bool
def __init__(
self,
force: bool,
labels: list[str],
pr_num: int,
org: str,
project: str,
ignore_current: bool,
):
self.force = force
self.labels = labels
self.pr_num = pr_num
self.org = org
self.project = project
self.ignore_current = ignore_current
def _get_flag_msg(
self,
ignore_current_checks: Optional[
list[tuple[str, Optional[str], Optional[int]]]
] = None,
) -> str:
if self.force:
return (
"Your change will be merged immediately since you used the force (-f) flag, "
+ "**bypassing any CI checks** (ETA: 1-5 minutes). "
+ "Please use `-f` as last resort and instead consider `-i/--ignore-current` "
+ "to continue the merge ignoring current failures. This will allow "
+ "currently pending tests to finish and report signal before the merge."
)
elif self.ignore_current and ignore_current_checks is not None:
msg = f"Your change will be merged while ignoring the following {len(ignore_current_checks)} checks: "
msg += ", ".join(f"[{x[0]}]({x[1]})" for x in ignore_current_checks)
return msg
else:
return "Your change will be merged once all checks pass (ETA 0-4 Hours)."
def get_merge_message(
self,
ignore_current_checks: Optional[
list[tuple[str, Optional[str], Optional[int]]]
] = None,
) -> str:
title = "### Merge started"
main_message = self._get_flag_msg(ignore_current_checks)
advanced_debugging = "\n".join(
(
"<details><summary>Advanced Debugging</summary>",
"Check the merge workflow status ",
f'<a href="{os.getenv("GH_RUN_URL")}">here</a>',
"</details>",
)
)
msg = title + "\n"
msg += main_message + "\n\n"
msg += ALTERNATIVES + "\n\n"
msg += CONTACT_US
msg += advanced_debugging
return msg
def get_revert_message(org: str, project: str, pr_num: int) -> str:
msg = (
"@pytorchbot successfully started a revert job."
+ f" Check the current status [here]({os.getenv('GH_RUN_URL')}).\n"
)
msg += CONTACT_US
return msg
| TryMergeExplainer |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 22497,
"end": 24275
} | class ____(TypedDict, total=False):
type: Required[Literal['int']]
multiple_of: int
le: int
ge: int
lt: int
gt: int
strict: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def int_schema(
*,
multiple_of: int | None = None,
le: int | None = None,
ge: int | None = None,
lt: int | None = None,
gt: int | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> IntSchema:
"""
Returns a schema that matches a int value, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.int_schema(multiple_of=2, le=6, ge=2)
v = SchemaValidator(schema)
assert v.validate_python('4') == 4
```
Args:
multiple_of: The value must be a multiple of this number
le: The value must be less than or equal to this number
ge: The value must be greater than or equal to this number
lt: The value must be strictly less than this number
gt: The value must be strictly greater than this number
strict: Whether the value should be a int or a value that can be converted to a int
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='int',
multiple_of=multiple_of,
le=le,
ge=ge,
lt=lt,
gt=gt,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| IntSchema |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-netsuite/source_netsuite/streams.py | {
"start": 12088,
"end": 12244
} | class ____(IncrementalNetsuiteStream):
@property
def cursor_field(self) -> str:
return CUSTOM_INCREMENTAL_CURSOR
| CustomIncrementalNetsuiteStream |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 915561,
"end": 916265
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Ref."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RefEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Ref"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| RefConnection |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 6373,
"end": 6508
} | class ____:
def _get_generics(self):
return tuple(c_set.py__class__() for c_set in self.get_mapping_item_values())
| _DictMixin |
python | lazyprogrammer__machine_learning_examples | airline/ann.py | {
"start": 965,
"end": 5556
} | class ____(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, Y, activation=T.tanh, learning_rate=1e-3, mu=0.5, reg=0, epochs=5000, batch_sz=None, print_period=100, show_fig=True):
X = X.astype(np.float32)
Y = Y.astype(np.float32)
# initialize hidden layers
N, D = X.shape
self.hidden_layers = []
M1 = D
count = 0
for M2 in self.hidden_layer_sizes:
h = HiddenLayer(M1, M2, activation, count)
self.hidden_layers.append(h)
M1 = M2
count += 1
W = np.random.randn(M1) / np.sqrt(M1)
b = 0.0
self.W = theano.shared(W, 'W_last')
self.b = theano.shared(b, 'b_last')
if batch_sz is None:
batch_sz = N
# collect params for later use
self.params = [self.W, self.b]
for h in self.hidden_layers:
self.params += h.params
# for momentum
dparams = [theano.shared(np.zeros(p.get_value().shape)) for p in self.params]
# set up theano functions and variables
thX = T.matrix('X')
thY = T.vector('Y')
Yhat = self.forward(thX)
rcost = reg*T.mean([(p*p).sum() for p in self.params])
cost = T.mean((thY - Yhat).dot(thY - Yhat)) + rcost
prediction = self.forward(thX)
grads = T.grad(cost, self.params)
# momentum only
updates = [
(p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)
]
train_op = theano.function(
inputs=[thX, thY],
outputs=[cost, prediction],
updates=updates,
)
self.predict_op = theano.function(
inputs=[thX],
outputs=prediction,
)
n_batches = N / batch_sz
# print "N:", N, "batch_sz:", batch_sz
# print "n_batches:", n_batches
costs = []
for i in xrange(epochs):
X, Y = shuffle(X, Y)
for j in xrange(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
c, p = train_op(Xbatch, Ybatch)
costs.append(c)
if (j+1) % print_period == 0:
print "i:", i, "j:", j, "nb:", n_batches, "cost:", c
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X):
Z = X
for h in self.hidden_layers:
Z = h.forward(Z)
return Z.dot(self.W) + self.b
def score(self, X, Y):
Yhat = self.predict_op(X)
return myr2(Y, Yhat)
def predict(self, X):
return self.predict_op(X)
# we need to skip the 3 footer rows
# skipfooter does not work with the default engine, 'c'
# so we need to explicitly set it to 'python'
df = pd.read_csv('international-airline-passengers.csv', engine='python', skipfooter=3)
# rename the columns because they are ridiculous
df.columns = ['month', 'num_passengers']
# plot the data so we know what it looks like
# plt.plot(df.num_passengers)
# plt.show()
# let's try with only the time series itself
series = df.num_passengers.as_matrix()
# series = (series - series.mean()) / series.std() # normalize the values so they have mean 0 and variance 1
series = series.astype(np.float32)
series = series - series.min()
series = series / series.max()
# let's see if we can use D past values to predict the next value
N = len(series)
for D in (2,3,4,5,6,7):
n = N - D
X = np.zeros((n, D))
for d in xrange(D):
X[:,d] = series[d:d+n]
Y = series[D:D+n]
print "series length:", n
Xtrain = X[:n/2]
Ytrain = Y[:n/2]
Xtest = X[n/2:]
Ytest = Y[n/2:]
model = ANN([200])
model.fit(Xtrain, Ytrain, activation=T.tanh)
print "train score:", model.score(Xtrain, Ytrain)
print "test score:", model.score(Xtest, Ytest)
# plot the prediction with true values
plt.plot(series)
train_series = np.empty(n)
train_series[:n/2] = model.predict(Xtrain)
train_series[n/2:] = np.nan
# prepend d nan's since the train series is only of size N - D
plt.plot(np.concatenate([np.full(d, np.nan), train_series]))
test_series = np.empty(n)
test_series[:n/2] = np.nan
test_series[n/2:] = model.predict(Xtest)
plt.plot(np.concatenate([np.full(d, np.nan), test_series]))
plt.show()
| ANN |
python | joke2k__faker | faker/providers/phone_number/hu_HU/__init__.py | {
"start": 49,
"end": 284
} | class ____(PhoneNumberProvider):
formats = (
"+36 ## ###-####",
"(06)##/###-####",
"(##)/###-####",
"##/###-####",
"##/### ####",
"06-#/### ####",
"06-##/### ####",
)
| Provider |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 328176,
"end": 332532
} | class ____(Response):
"""
Response of tasks.enqueue_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
:param queue_watched: Returns Trueif there are workers or autscalers working
with the queue
:type queue_watched: bool
"""
_service = "tasks"
_action = "enqueue_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"queue_watched": {
"description": "Returns Trueif there are workers or autscalers working with the queue",
"type": ["boolean", "null"],
},
"succeeded": {
"items": {
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": "object",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
"queued": {
"description": "Indicates whether the task was queued",
"type": "boolean",
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": "integer",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, succeeded=None, failed=None, queue_watched=None, **kwargs):
super(EnqueueManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
self.queue_watched = queue_watched
@schema_property("succeeded")
def succeeded(self):
return self._property_succeeded
@succeeded.setter
def succeeded(self, value):
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self):
return self._property_failed
@failed.setter
def failed(self, value):
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
@schema_property("queue_watched")
def queue_watched(self):
return self._property_queue_watched
@queue_watched.setter
def queue_watched(self, value):
if value is None:
self._property_queue_watched = None
return
self.assert_isinstance(value, "queue_watched", (bool,))
self._property_queue_watched = value
| EnqueueManyResponse |
python | mlflow__mlflow | dev/clint/src/clint/linter.py | {
"start": 4012,
"end": 8682
} | class ____:
code: str
range: Range
def _get_indent(s: str) -> int:
return len(s) - len(s.lstrip())
_CODE_BLOCK_HEADER_REGEX = re.compile(r"^\.\.\s+code-block::\s*py(thon)?")
_CODE_BLOCK_OPTION_REGEX = re.compile(r"^:\w+:")
def _get_header_indent(s: str) -> int | None:
if _CODE_BLOCK_HEADER_REGEX.match(s.lstrip()):
return _get_indent(s)
return None
def _iter_code_blocks(s: str) -> Iterator[CodeBlock]:
code_block_range: Range | None = None
header_indent: int | None = None
code_lines: list[str] = []
line_iter = enumerate(s.splitlines())
while t := next(line_iter, None):
idx, line = t
if code_block_range:
indent = _get_indent(line)
# If we encounter a non-blank line with an indent less than the code block header
# we are done parsing the code block. Here's an example:
#
# .. code-block:: python
#
# print("hello") # indent > header_indent
# # blank
# <non-blank> # non-blank and indent <= header_indent
if line.strip() and (header_indent is not None) and indent <= header_indent:
code = textwrap.dedent("\n".join(code_lines))
yield CodeBlock(code=code, range=code_block_range)
code_block_range = None
code_lines.clear()
# It's possible that another code block follows the current one
header_indent = _get_header_indent(line)
continue
code_lines.append(line)
elif header_indent is not None:
# Advance the iterator to the code body
#
# .. code-block:: python
# :option: # we're here
# :another-option: # skip
# # skip
# import mlflow # stop here
# ...
while True:
if line.strip() and not _CODE_BLOCK_OPTION_REGEX.match(line.lstrip()):
# We are at the first line of the code block
code_lines.append(line)
break
if next_line := next(line_iter, None):
idx, line = next_line
code_block_range = Range(Position(idx, _get_indent(line)))
else:
header_indent = _get_header_indent(line)
# The docstring ends with a code block
if code_lines and code_block_range:
code = textwrap.dedent("\n".join(code_lines))
yield CodeBlock(code=code, range=code_block_range)
_MD_OPENING_FENCE_REGEX = re.compile(r"^(`{3,})\s*python\s*$")
def _iter_md_code_blocks(s: str) -> Iterator[CodeBlock]:
"""
Iterates over code blocks in a Markdown string.
"""
code_block_range: Range | None = None
code_lines: list[str] = []
closing_fence: str | None = None
line_iter = enumerate(s.splitlines())
while t := next(line_iter, None):
idx, line = t
if code_block_range:
if line.strip() == closing_fence:
code = textwrap.dedent("\n".join(code_lines))
yield CodeBlock(code=code, range=code_block_range)
code_block_range = None
code_lines.clear()
closing_fence = None
continue
code_lines.append(line)
elif m := _MD_OPENING_FENCE_REGEX.match(line.lstrip()):
closing_fence = m.group(1)
code_block_range = Range(Position(idx + 1, _get_indent(line)))
# Code block at EOF
if code_lines and code_block_range:
code = textwrap.dedent("\n".join(code_lines))
yield CodeBlock(code=code, range=code_block_range)
def _parse_docstring_args(docstring: str) -> list[str]:
args: list[str] = []
args_header_indent: int | None = None
first_arg_indent: int | None = None
arg_name_regex = re.compile(r"(\w+)")
for line in docstring.split("\n"):
if args_header_indent is not None:
indent = _get_indent(line)
# If we encounter a non-blank line with an indent less than the args header,
# we are done parsing the args section.
if 0 < indent <= args_header_indent:
break
if not args and first_arg_indent is None:
first_arg_indent = indent
if m := arg_name_regex.match(line[first_arg_indent:]):
args.append(m.group(1))
elif line.lstrip().startswith("Args:"):
args_header_indent = _get_indent(line)
return args
| CodeBlock |
python | Pylons__pyramid | src/pyramid/config/views.py | {
"start": 1922,
"end": 7493
} | class ____:
def __init__(self, name):
self.name = name
self.media_views = {}
self.views = []
self.accepts = []
def __discriminator__(self, context, request):
# used by introspection systems like so:
# view = adapters.lookup(....)
# view.__discriminator__(context, request) -> view's discriminator
# so that superdynamic systems can feed the discriminator to
# the introspection system to get info about it
view = self.match(context, request)
return view.__discriminator__(context, request)
def add(self, view, order, phash=None, accept=None, accept_order=None):
if phash is not None:
for i, (s, v, h) in enumerate(list(self.views)):
if phash == h:
self.views[i] = (order, view, phash)
return
if accept is None:
self.views.append((order, view, phash))
self.views.sort(key=operator.itemgetter(0))
else:
subset = self.media_views.setdefault(accept, [])
for i, (s, v, h) in enumerate(list(subset)):
if phash == h:
subset[i] = (order, view, phash)
return
else:
subset.append((order, view, phash))
subset.sort(key=operator.itemgetter(0))
# dedupe accepts and sort appropriately
accepts = set(self.accepts)
accepts.add(accept)
if accept_order:
accept_order = [v for _, v in accept_order.sorted()]
self.accepts = sort_accept_offers(accepts, accept_order)
def get_views(self, request):
if self.accepts and hasattr(request, 'accept'):
views = []
for offer, _ in request.accept.acceptable_offers(self.accepts):
views.extend(self.media_views[offer])
views.extend(self.views)
return views
return self.views
def match(self, context, request):
for order, view, phash in self.get_views(request):
if not hasattr(view, '__predicated__'):
return view
if view.__predicated__(context, request):
return view
raise PredicateMismatch(self.name)
def __permitted__(self, context, request):
view = self.match(context, request)
if hasattr(view, '__permitted__'):
return view.__permitted__(context, request)
return True
def __call_permissive__(self, context, request):
view = self.match(context, request)
view = getattr(view, '__call_permissive__', view)
return view(context, request)
def __call__(self, context, request):
for order, view, phash in self.get_views(request):
try:
return view(context, request)
except PredicateMismatch:
continue
raise PredicateMismatch(self.name)
def attr_wrapped_view(view, info):
accept, order, phash = (
info.options.get('accept', None),
getattr(info, 'order', MAX_ORDER),
getattr(info, 'phash', DEFAULT_PHASH),
)
# this is a little silly but we don't want to decorate the original
# function with attributes that indicate accept, order, and phash,
# so we use a wrapper
if (accept is None) and (order == MAX_ORDER) and (phash == DEFAULT_PHASH):
return view # defaults
def attr_view(context, request):
return view(context, request)
attr_view.__accept__ = accept
attr_view.__order__ = order
attr_view.__phash__ = phash
attr_view.__view_attr__ = info.options.get('attr')
attr_view.__permission__ = info.options.get('permission')
return attr_view
attr_wrapped_view.options = ('accept', 'attr', 'permission')
def predicated_view(view, info):
preds = info.predicates
if not preds:
return view
def predicate_wrapper(context, request):
for predicate in preds:
if not predicate(context, request):
view_name = getattr(view, '__name__', view)
raise PredicateMismatch(
'predicate mismatch for view %s (%s)'
% (view_name, predicate.text())
)
return view(context, request)
def checker(context, request):
return all(predicate(context, request) for predicate in preds)
predicate_wrapper.__predicated__ = checker
predicate_wrapper.__predicates__ = preds
return predicate_wrapper
def viewdefaults(wrapped):
"""Decorator for add_view-like methods which takes into account
__view_defaults__ attached to view it is passed. Not a documented API but
used by some external systems."""
def wrapper(self, *arg, **kw):
defaults = {}
if arg:
view = arg[0]
else:
view = kw.get('view')
view = self.maybe_dotted(view)
if inspect.isclass(view):
defaults = getattr(view, '__view_defaults__', {}).copy()
if '_backframes' not in kw:
kw['_backframes'] = 1 # for action_method
defaults.update(kw)
return wrapped(self, *arg, **defaults)
return functools.wraps(wrapped)(wrapper)
def combine_decorators(*decorators):
def decorated(view_callable):
# reversed() allows a more natural ordering in the api
for decorator in reversed(decorators):
view_callable = decorator(view_callable)
return view_callable
return decorated
| MultiView |
python | python-attrs__attrs | tests/test_slots.py | {
"start": 923,
"end": 9605
} | class ____:
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
def method(self):
return self.x
@classmethod
def classmethod(cls):
return "clsmethod"
@staticmethod
def staticmethod():
return "staticmethod"
def my_class(self):
return __class__
def my_super(self):
"""Just to test out the no-arg super."""
return super().__repr__()
def test_slots_being_used():
"""
The class is really using __slots__.
"""
non_slot_instance = C1(x=1, y="test")
slot_instance = C1Slots(x=1, y="test")
assert "__dict__" not in dir(slot_instance)
assert "__slots__" in dir(slot_instance)
assert "__dict__" in dir(non_slot_instance)
assert "__slots__" not in dir(non_slot_instance)
assert {"__weakref__", "x", "y"} == set(slot_instance.__slots__)
if has_pympler:
assert asizeof(slot_instance) < asizeof(non_slot_instance)
non_slot_instance.t = "test"
with pytest.raises(AttributeError):
slot_instance.t = "test"
assert 1 == non_slot_instance.method()
assert 1 == slot_instance.method()
assert attr.fields(C1Slots) == attr.fields(C1)
assert attr.asdict(slot_instance) == attr.asdict(non_slot_instance)
def test_slots_base_of_slotted():
"""
The (hopefully gc'ed) temporary base class of a slotted class contains a
reference to the slotted class.
"""
class Base:
pass
Slotted = attr.s(slots=True)(Base)
assert Slotted is Base.__attrs_base_of_slotted__()
def test_basic_attr_funcs():
"""
Comparison, `__eq__`, `__hash__`, `__repr__`, `attrs.asdict` work.
"""
a = C1Slots(x=1, y=2)
b = C1Slots(x=1, y=3)
a_ = C1Slots(x=1, y=2)
# Comparison.
assert b > a
assert a_ == a
# Hashing.
hash(b) # Just to assert it doesn't raise.
# Repr.
assert "C1Slots(x=1, y=2)" == repr(a)
assert {"x": 1, "y": 2} == attr.asdict(a)
def test_inheritance_from_nonslots():
"""
Inheritance from a non-slotted class works.
Note that a slotted class inheriting from an ordinary class loses most of
the benefits of slotted classes, but it should still work.
"""
@attr.s(slots=True, unsafe_hash=True)
class C2Slots(C1):
z = attr.ib()
c2 = C2Slots(x=1, y=2, z="test")
assert 1 == c2.x
assert 2 == c2.y
assert "test" == c2.z
c2.t = "test" # This will work, using the base class.
assert "test" == c2.t
assert 1 == c2.method()
assert "clsmethod" == c2.classmethod()
assert "staticmethod" == c2.staticmethod()
assert {"z"} == set(C2Slots.__slots__)
c3 = C2Slots(x=1, y=3, z="test")
assert c3 > c2
c2_ = C2Slots(x=1, y=2, z="test")
assert c2 == c2_
assert "C2Slots(x=1, y=2, z='test')" == repr(c2)
hash(c2) # Just to assert it doesn't raise.
assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)
def test_nonslots_these():
"""
Enhancing a dict class using 'these' works.
This will actually *replace* the class with another one, using slots.
"""
class SimpleOrdinaryClass:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def method(self):
return self.x
@classmethod
def classmethod(cls):
return "clsmethod"
@staticmethod
def staticmethod():
return "staticmethod"
C2Slots = attr.s(
these={"x": attr.ib(), "y": attr.ib(), "z": attr.ib()},
init=False,
slots=True,
unsafe_hash=True,
)(SimpleOrdinaryClass)
c2 = C2Slots(x=1, y=2, z="test")
assert 1 == c2.x
assert 2 == c2.y
assert "test" == c2.z
with pytest.raises(AttributeError):
c2.t = "test" # We have slots now.
assert 1 == c2.method()
assert "clsmethod" == c2.classmethod()
assert "staticmethod" == c2.staticmethod()
assert {"__weakref__", "x", "y", "z"} == set(C2Slots.__slots__)
c3 = C2Slots(x=1, y=3, z="test")
assert c3 > c2
c2_ = C2Slots(x=1, y=2, z="test")
assert c2 == c2_
assert "SimpleOrdinaryClass(x=1, y=2, z='test')" == repr(c2)
hash(c2) # Just to assert it doesn't raise.
assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)
def test_inheritance_from_slots():
"""
Inheriting from an attrs slotted class works.
"""
@attr.s(slots=True, unsafe_hash=True)
class C2Slots(C1Slots):
z = attr.ib()
@attr.s(slots=True, unsafe_hash=True)
class C2(C1):
z = attr.ib()
c2 = C2Slots(x=1, y=2, z="test")
assert 1 == c2.x
assert 2 == c2.y
assert "test" == c2.z
assert {"z"} == set(C2Slots.__slots__)
assert 1 == c2.method()
assert "clsmethod" == c2.classmethod()
assert "staticmethod" == c2.staticmethod()
with pytest.raises(AttributeError):
c2.t = "test"
non_slot_instance = C2(x=1, y=2, z="test")
if has_pympler:
assert asizeof(c2) < asizeof(non_slot_instance)
c3 = C2Slots(x=1, y=3, z="test")
assert c3 > c2
c2_ = C2Slots(x=1, y=2, z="test")
assert c2 == c2_
assert "C2Slots(x=1, y=2, z='test')" == repr(c2)
hash(c2) # Just to assert it doesn't raise.
assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)
def test_inheritance_from_slots_with_attribute_override():
"""
Inheriting from a slotted class doesn't re-create existing slots
"""
class HasXSlot:
__slots__ = ("x",)
@attr.s(slots=True, unsafe_hash=True)
class C2Slots(C1Slots):
# y re-defined here but it shouldn't get a slot
y = attr.ib()
z = attr.ib()
@attr.s(slots=True, unsafe_hash=True)
class NonAttrsChild(HasXSlot):
# Parent class has slot for "x" already, so we skip it
x = attr.ib()
y = attr.ib()
z = attr.ib()
c2 = C2Slots(1, 2, "test")
assert 1 == c2.x
assert 2 == c2.y
assert "test" == c2.z
assert {"z"} == set(C2Slots.__slots__)
na = NonAttrsChild(1, 2, "test")
assert 1 == na.x
assert 2 == na.y
assert "test" == na.z
assert {"__weakref__", "y", "z"} == set(NonAttrsChild.__slots__)
def test_inherited_slot_reuses_slot_descriptor():
"""
We reuse slot descriptor for an attr.ib defined in a slotted attr.s
"""
class HasXSlot:
__slots__ = ("x",)
class OverridesX(HasXSlot):
@property
def x(self):
return None
@attr.s(slots=True)
class Child(OverridesX):
x = attr.ib()
assert Child.x is not OverridesX.x
assert Child.x is HasXSlot.x
c = Child(1)
assert 1 == c.x
assert set() == set(Child.__slots__)
ox = OverridesX()
assert ox.x is None
def test_bare_inheritance_from_slots():
"""
Inheriting from a bare attrs slotted class works.
"""
@attr.s(
init=False,
eq=False,
order=False,
unsafe_hash=False,
repr=False,
slots=True,
)
class C1BareSlots:
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
def method(self):
return self.x
@classmethod
def classmethod(cls):
return "clsmethod"
@staticmethod
def staticmethod():
return "staticmethod"
@attr.s(init=False, eq=False, order=False, unsafe_hash=False, repr=False)
class C1Bare:
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
def method(self):
return self.x
@classmethod
def classmethod(cls):
return "clsmethod"
@staticmethod
def staticmethod():
return "staticmethod"
@attr.s(slots=True, unsafe_hash=True)
class C2Slots(C1BareSlots):
z = attr.ib()
@attr.s(slots=True, unsafe_hash=True)
class C2(C1Bare):
z = attr.ib()
c2 = C2Slots(x=1, y=2, z="test")
assert 1 == c2.x
assert 2 == c2.y
assert "test" == c2.z
assert 1 == c2.method()
assert "clsmethod" == c2.classmethod()
assert "staticmethod" == c2.staticmethod()
with pytest.raises(AttributeError):
c2.t = "test"
non_slot_instance = C2(x=1, y=2, z="test")
if has_pympler:
assert asizeof(c2) < asizeof(non_slot_instance)
c3 = C2Slots(x=1, y=3, z="test")
assert c3 > c2
c2_ = C2Slots(x=1, y=2, z="test")
assert c2 == c2_
assert "C2Slots(x=1, y=2, z='test')" == repr(c2)
hash(c2) # Just to assert it doesn't raise.
assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)
| C1Slots |
python | django__django | tests/m2m_recursive/models.py | {
"start": 1118,
"end": 1312
} | class ____(models.Model):
first = models.ForeignKey(Person, models.CASCADE)
second = models.ForeignKey(Person, models.CASCADE, related_name="+")
first_meet = models.DateField()
| Colleague |
python | django__django | django/db/models/fields/reverse_related.py | {
"start": 578,
"end": 8013
} | class ____(FieldCacheMixin):
"""
Used by ForeignObject to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
null = True
empty_strings_allowed = False
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
on_delete=None,
):
self.field = field
self.model = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
self.multiple = True
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@cached_property
def hidden(self):
"""Should the related object be hidden?"""
return bool(self.related_name) and self.related_name[-1] == "+"
@cached_property
def name(self):
return self.field.related_query_name()
@property
def remote_field(self):
return self.field
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.path_infos[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"Can't use target_field for multicolumn relations."
)
return target_fields[0]
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class "
"has been called."
)
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def get_lookup(self, lookup_name):
return self.field.get_lookup(lookup_name)
def get_lookups(self):
return self.field.get_lookups()
def get_transform(self, name):
return self.field.get_transform(name)
def get_internal_type(self):
return self.field.get_internal_type()
@property
def db_type(self):
return self.field.db_type
def __repr__(self):
return "<%s: %s.%s>" % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
@property
def identity(self):
return (
self.field,
self.model,
self.related_name,
self.related_query_name,
make_hashable(self.limit_choices_to),
self.parent_link,
self.on_delete,
self.symmetrical,
self.multiple,
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.identity == other.identity
def __hash__(self):
return hash(self.identity)
def __getstate__(self):
state = self.__dict__.copy()
# Delete the path_infos cached property because it can be recalculated
# at first invocation after deserialization. The attribute must be
# removed because subclasses like ManyToOneRel may have a PathInfo
# which contains an intermediate M2M table that's been dynamically
# created and doesn't exist in the .models module.
# This is a reverse relation, so there is no reverse_path_infos to
# delete.
state.pop("path_infos", None)
return state
def get_choices(
self,
include_blank=True,
blank_choice=BLANK_CHOICE_DASH,
limit_choices_to=None,
ordering=(),
):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
limit_choices_to = limit_choices_to or self.limit_choices_to
qs = self.related_model._default_manager.complex_filter(limit_choices_to)
if ordering:
qs = qs.order_by(*ordering)
return (blank_choice if include_blank else []) + [(x.pk, str(x)) for x in qs]
def get_joining_fields(self):
return self.field.get_reverse_joining_fields()
def get_extra_restriction(self, alias, related_alias):
return self.field.get_extra_restriction(related_alias, alias)
def set_field_name(self):
"""
Set the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
@cached_property
def accessor_name(self):
return self.get_accessor_name()
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lowercased object_name + "_set",
# but this can be overridden with the "related_name" option. Due to
# backwards compatibility ModelForms need to be able to provide an
# alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no
# reverse accessor.
if self.symmetrical and model == self.model:
return None
if self.related_name:
return self.related_name
return opts.model_name + ("_set" if self.multiple else "")
def get_path_info(self, filtered_relation=None):
if filtered_relation:
return self.field.get_reverse_path_info(filtered_relation)
else:
return self.field.reverse_path_infos
@cached_property
def path_infos(self):
return self.get_path_info()
@cached_property
def cache_name(self):
"""
Return the name of the cache key to use for storing an instance of the
forward model on the reverse model.
"""
return self.accessor_name
| ForeignObjectRel |
python | prabhupant__python-ds | data_structures/graphs/topological_sort.py | {
"start": 38,
"end": 671
} | class ____:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
def topo_sort_util(self, v, visited, stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topo_sort_util(i, visited, stack)
stack.insert(0, v)
def topo_sort(self):
visited = [False] * self.V
stack = []
for i in range(self.V):
if visited[i] == False:
self.topo_sort_util(i, visited, stack)
print(stack)
| Graph |
python | kamyu104__LeetCode-Solutions | Python/minimum-edge-weight-equilibrium-queries-in-a-tree.py | {
"start": 6160,
"end": 6892
} | class ____(object):
def minOperationsQueries(self, n, edges, queries):
"""
:type n: int
:type edges: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
w -= 1
adj[u].append((v, w))
adj[v].append((u, w))
tree_infos = TreeInfos2(adj)
result = [0]*len(queries)
for i, (a, b) in enumerate(queries):
lca = tree_infos.lca(a, b)
result[i] = (tree_infos.D[a]+tree_infos.D[b]-2*tree_infos.D[lca])-max(tree_infos.CNT[a][w]+tree_infos.CNT[b][w]-2*tree_infos.CNT[lca][w] for w in xrange(MAX_W))
return result
| Solution2 |
python | falconry__falcon | tests/test_request_media.py | {
"start": 831,
"end": 1071
} | class ____:
def on_post(self, req, resp, **kwargs):
self.captured_req_media = req.media
# NOTE(kgriffs): Ensure that the media object is cached
assert self.captured_req_media is req.get_media()
| ResourceCachedMedia |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 13162,
"end": 13353
} | class ____:
inner: sympy.Symbol
# the original symbolic expression represented by inner
inner_expr: sympy.Expr
def __str__(self):
return str(self.inner)
| SymbolicCallArg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.