language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/event_manager/grouping/test_group_creation_lock.py | {
"start": 303,
"end": 3840
} | class ____:
@staticmethod
@contextlib.contextmanager
def atomic(*args, **kwds):
yield
def save_event(project_id: int, return_values: list[GroupInfo]) -> None:
event = Event(
project_id,
"11212012123120120415201309082013",
data={"timestamp": time.time()},
)
group_info = assign_event_to_group(
event=event,
job={"event_metadata": {}, "release": "dogpark", "event": event, "data": {}},
metric_tags={},
)
assert group_info is not None
return_values.append(group_info)
@django_db_all(transaction=True)
@pytest.mark.parametrize(
"lock_disabled",
[
# Group creation with transaction isolation (which is what powers the lock) disabled, to
# show that without it, multiple groups are created when there's a race condition while
# ingesting events with the same data. This variant exists so that we can ensure the test
# would detect a malfunctioning lock in principle, and does not just always pass because of
# low parallelism. In a sense this variant tests the efficacy of this test, not actual
# business logic.
#
# If this variant fails, CONCURRENCY needs to be increased or e.g. thread barriers need to
# be used to ensure data races. This does not seem to be necessary so far.
True,
# Regular group creation, in which the lock should be working
False,
],
ids=(" lock_disabled: True ", " lock_disabled: False "),
)
def test_group_creation_race(default_project, lock_disabled) -> None:
with contextlib.ExitStack() as ctx:
if lock_disabled:
# Disable transaction isolation just within event manager, but not in
# GroupHash.objects.create_or_update
ctx.enter_context(patch("sentry.event_manager.transaction", FakeTransactionModule))
# `select_for_update` cannot be used outside of transactions
ctx.enter_context(
patch("django.db.models.QuerySet.select_for_update", lambda self: self)
)
with (
patch(
"sentry.grouping.ingest.hashing._calculate_event_grouping",
return_value=(["pound sign", "octothorpe"], {}),
),
patch(
"sentry.event_manager._get_group_processing_kwargs",
return_value={"level": 10, "culprit": "", "data": {}},
),
patch("sentry.event_manager._materialize_metadata_many"),
):
return_values: list[GroupInfo] = []
threads = []
# Save the same event data in multiple threads. If the lock is working, only one new group
# should be created
for _ in range(CONCURRENCY):
thread = Thread(target=save_event, args=[default_project.id, return_values])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if not lock_disabled:
# assert only one new group was created
assert len({group_info.group.id for group_info in return_values}) == 1
assert sum(group_info.is_new for group_info in return_values) == 1
else:
# assert multiple new groups were created
assert 1 < len({group_info.group.id for group_info in return_values}) <= CONCURRENCY
assert 1 < sum(group_info.is_new for group_info in return_values) <= CONCURRENCY
| FakeTransactionModule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 687054,
"end": 687624
} | class ____(sgqlc.types.Type):
"""Describes a License's conditions, permissions, and limitations"""
__schema__ = github_schema
__field_names__ = ("description", "key", "label")
description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="description")
"""A description of the rule"""
key = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="key")
"""The machine-readable rule key"""
label = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="label")
"""The human-readable rule label"""
| LicenseRule |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace_item_attributes.py | {
"start": 67753,
"end": 69006
} | class ____(
OrganizationTraceItemAttributeValuesEndpointBaseTest, TraceMetricsTestCase
):
feature_flags = {"organizations:tracemetrics-enabled": True}
item_type = SupportedTraceItemType.TRACEMETRICS
def test_no_feature(self) -> None:
response = self.do_request(features={}, key="test.attribute")
assert response.status_code == 404, response.content
def test_attribute_values(self) -> None:
metrics = [
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=123.45,
metric_type="distribution",
attributes={"http.method": "GET"},
),
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=234.56,
metric_type="distribution",
attributes={"http.method": "POST"},
),
]
self.store_trace_metrics(metrics)
response = self.do_request(key="http.method")
assert response.status_code == 200
values = {item["value"] for item in response.data}
assert "GET" in values
assert "POST" in values
| OrganizationTraceItemAttributeValuesEndpointTraceMetricsTest |
python | huggingface__transformers | tests/models/owlvit/test_modeling_owlvit.py | {
"start": 22418,
"end": 33430
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTModel.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTModel.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
processor.image_processor.size = {"height": 800, "width": 800}
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[3.6278, 0.8861]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
expected_shape = torch.Size((1, 626, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
# OwlViTForObjectDetection part.
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
num_queries = int((inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0680, 0.0422, 0.1347], [0.2071, 0.0450, 0.4146], [0.2000, 0.0418, 0.3476]]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True)
# No need to check the logits, we just check inference runs fine.
num_queries = int((inputs.pixel_values.shape[-1] / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
# Deactivate interpolate_pos_encoding on same model, and use default image size.
# Verify the dynamic change caused by the activation/deactivation of interpolate_pos_encoding of variables: (self.sqrt_num_patch_h, self.sqrt_num_patch_w), self.box_bias from (OwlViTForObjectDetection).
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=False)
num_queries = int((inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_default_box_bias = torch.tensor(
[
[-3.1332, -3.1332, -3.1332, -3.1332],
[-2.3968, -3.1332, -3.1332, -3.1332],
[-1.9452, -3.1332, -3.1332, -3.1332],
]
)
torch.testing.assert_close(model.box_bias[:3, :4], expected_default_box_bias, rtol=1e-4, atol=1e-4)
# Interpolate with any resolution size.
processor.image_processor.size = {"height": 1264, "width": 1024}
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
num_queries = int(
(inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size)
* (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size)
)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0499, 0.0301, 0.0983], [0.2244, 0.0365, 0.4663], [0.1387, 0.0314, 0.1859]]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True)
# No need to check the logits, we just check inference runs fine.
num_queries = int(
(inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size)
* (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size)
)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
@slow
def test_inference_object_detection(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
text_labels = [["a photo of a cat", "a photo of a dog"]]
inputs = processor(
text=text_labels,
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
# test post-processing
post_processed_output = processor.post_process_grounded_object_detection(outputs)
self.assertIsNone(post_processed_output[0]["text_labels"])
post_processed_output_with_text_labels = processor.post_process_grounded_object_detection(
outputs, text_labels=text_labels
)
objects_labels = post_processed_output_with_text_labels[0]["labels"].tolist()
self.assertListEqual(objects_labels, [0, 0])
objects_text_labels = post_processed_output_with_text_labels[0]["text_labels"]
self.assertIsNotNone(objects_text_labels)
self.assertListEqual(objects_text_labels, ["a photo of a cat", "a photo of a cat"])
@slow
def test_inference_one_shot_object_detection(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
torch.testing.assert_close(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
@slow
@require_torch_accelerator
@require_torch_fp16
def test_inference_one_shot_object_detection_fp16(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name, dtype=torch.float16).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
# No need to check the logits, we just check inference runs fine.
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
| OwlViTModelIntegrationTest |
python | kamyu104__LeetCode-Solutions | Python/confusing-number.py | {
"start": 35,
"end": 498
} | class ____(object):
def confusingNumber(self, N):
"""
:type N: int
:rtype: bool
"""
lookup = {"0":"0", "1":"1", "6":"9", "8":"8", "9":"6"}
S = str(N)
result = []
for i in xrange(len(S)):
if S[i] not in lookup:
return False
for i in xrange((len(S)+1)//2):
if S[i] != lookup[S[-(i+1)]]:
return True
return False
| Solution |
python | pypa__setuptools | setuptools/config/setupcfg.py | {
"start": 25836,
"end": 26588
} | class ____(SetuptoolsDeprecationWarning):
_SUMMARY = "Ambiguous requirement marker."
_DETAILS = """
One of the parsed requirements in `{field}` looks like a valid environment marker:
{req!r}
Please make sure that the configuration file is correct.
You can use dangling lines to avoid this problem.
"""
_SEE_DOCS = "userguide/declarative_config.html#opt-2"
# TODO: should we include due_date here? Initially introduced in 6 Aug 2022.
# Does this make sense with latest version of packaging?
@classmethod
def message(cls, **kw):
docs = f"https://setuptools.pypa.io/en/latest/{cls._SEE_DOCS}"
return cls._format(cls._SUMMARY, cls._DETAILS, see_url=docs, format_args=kw)
| _AmbiguousMarker |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/konlpy.py | {
"start": 238,
"end": 1049
} | class ____(TextSplitter):
"""Splitting text using Konlpy package.
It is good for splitting Korean text.
"""
def __init__(
self,
separator: str = "\n\n",
**kwargs: Any,
) -> None:
"""Initialize the Konlpy text splitter."""
super().__init__(**kwargs)
self._separator = separator
if not _HAS_KONLPY:
msg = """
Konlpy is not installed, please install it with
`pip install konlpy`
"""
raise ImportError(msg)
self.kkma = konlpy.tag.Kkma()
def split_text(self, text: str) -> list[str]:
"""Split incoming text and return chunks."""
splits = self.kkma.sentences(text)
return self._merge_splits(splits, self._separator)
| KonlpyTextSplitter |
python | getsentry__sentry | src/sentry/integrations/msteams/client.py | {
"start": 7102,
"end": 7603
} | class ____(ApiClient):
integration_name = IntegrationProviderSlug.MSTEAMS.value
# 24 hour cache is recommended: https://docs.microsoft.com/en-us/azure/bot-service/rest-api/bot-framework-rest-connector-authentication?view=azure-bot-service-4.0#connector-to-bot-step-3
cache_time = 60 * 60 * 24
OPEN_ID_CONFIG_URL = "https://login.botframework.com/v1/.well-known/openidconfiguration"
def get_open_id_config(self):
return self.get_cached(self.OPEN_ID_CONFIG_URL)
| MsTeamsJwtClient |
python | scipy__scipy | scipy/special/tests/test_kolmogorov.py | {
"start": 4219,
"end": 8757
} | class ____:
def test_nan(self):
assert_(np.isnan(smirnovi(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.4, 0.6),
(1, 0.6, 0.4),
(1, 0.99, 0.01),
(1, 0.01, 0.99),
(2, 0.125 * 0.125, 0.875),
(3, 0.125 * 0.125 * 0.125, 0.875),
(10, 1.0 / 16 ** 10, 1 - 1.0 / 16)]
dataset = np.asarray(dataset)
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_n_equals_1(self):
pp = np.linspace(0, 1, 101, endpoint=True)
# dataset = np.array([(1, p, 1-p) for p in pp])
dataset = np.column_stack([[1]*len(pp), pp, 1-pp])
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_round_trip(self):
def _sm_smi(n, p):
return smirnov(n, smirnovi(n, p))
def _smc_smci(n, p):
return _smirnovc(n, _smirnovci(n, p))
dataset = [(1, 0.4, 0.4),
(1, 0.6, 0.6),
(2, 0.875, 0.875),
(3, 0.875, 0.875),
(3, 0.125, 0.125),
(10, 0.999, 0.999),
(10, 0.0001, 0.0001)]
dataset = np.asarray(dataset)
FuncData(
_sm_smi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
FuncData(
_smc_smci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.366025403784),
(2, 0.25, 0.5),
(3, 0.5, 0.297156508177),
(4, 0.5, 0.255520481121),
(5, 0.5, 0.234559536069),
(6, 0.5, 0.21715965898),
(7, 0.5, 0.202722580034),
(8, 0.5, 0.190621765256),
(9, 0.5, 0.180363501362),
(10, 0.5, 0.17157867006)]
dataset = np.asarray(dataset)
FuncData(
smirnovi, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(
_smirnovci, dataset, (0, 1), 2, rtol=_rtol
).check(dtypes=[int, float, float])
| TestSmirnovi |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/test_partition_status_cache.py | {
"start": 132,
"end": 321
} | class ____(TestPartitionStatusCache):
@pytest.fixture
def instance(self):
with dg.instance_for_test() as the_instance:
yield the_instance
| TestSqlPartitionStatusCache |
python | pytorch__pytorch | torch/nn/modules/padding.py | {
"start": 30090,
"end": 31660
} | class ____(ConstantPad3d):
r"""Pads the input tensor boundaries with zero.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
:math:`(C, D_{out}, H_{out}, W_{out})`, where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ZeroPad3d(3)
>>> input = torch.randn(16, 3, 10, 20, 30)
>>> output = m(input)
>>> # using different paddings for different sides
>>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
>>> output = m(input)
"""
padding: tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
super().__init__(padding, 0.0)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"{self.padding}"
| ZeroPad3d |
python | getsentry__sentry | src/sentry/models/grouplink.py | {
"start": 1080,
"end": 2639
} | class ____(Model):
"""
Link a group with an external resource like a commit, issue, or pull request
"""
__relocation_scope__ = RelocationScope.Excluded
class Relationship:
unknown = 0
resolves = 1
references = 2
class LinkedType:
unknown = 0
commit = 1
pull_request = 2
issue = 3
group = FlexibleForeignKey("sentry.Group", db_constraint=False, db_index=False)
project = FlexibleForeignKey("sentry.Project", db_constraint=False, db_index=True)
linked_type = BoundedPositiveIntegerField(
default=LinkedType.commit,
choices=(
(LinkedType.commit, _("Commit")),
(LinkedType.pull_request, _("Pull Request")),
(LinkedType.issue, _("Tracker Issue")),
),
)
linked_id = BoundedBigIntegerField()
relationship = BoundedPositiveIntegerField(
default=Relationship.references,
choices=((Relationship.resolves, _("Resolves")), (Relationship.references, _("Linked"))),
)
data = models.JSONField(default=dict)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
objects: ClassVar[GroupLinkManager] = GroupLinkManager()
class Meta:
app_label = "sentry"
db_table = "sentry_grouplink"
unique_together = (("group", "linked_type", "linked_id"),)
indexes = [models.Index(fields=["project", "linked_id", "linked_type", "group"])]
__repr__ = sane_repr("group_id", "linked_type", "linked_id", "relationship", "datetime")
| GroupLink |
python | getsentry__sentry | src/sentry/codecov/endpoints/repository_tokens/serializers.py | {
"start": 189,
"end": 409
} | class ____(serializers.Serializer):
"""
Serializer for individual repository nodes from GraphQL response
"""
name = serializers.CharField()
token = serializers.CharField()
| RepositoryTokenNodeSerializer |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 2627,
"end": 3327
} | class ____(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for text model's outputs that also contains a pooling of the last hidden states.
"""
)
| CLIPVisionModelOutput |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 22892,
"end": 23158
} | class ____(RayError):
"""Indicates an error in the underlying RPC system."""
def __init__(self, message, rpc_code=None):
self.message = message
self.rpc_code = rpc_code
def __str__(self):
return self.message
@DeveloperAPI
| RpcError |
python | astropy__astropy | astropy/nddata/nduncertainty.py | {
"start": 2802,
"end": 3015
} | class ____(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
| MissingDataAssociationException |
python | tensorflow__tensorflow | tensorflow/python/autograph/core/config_lib.py | {
"start": 1456,
"end": 1731
} | class ____(Rule):
"""Indicates that this module should be converted."""
def __str__(self):
return 'Convert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.CONVERT
return Action.NONE
| Convert |
python | sympy__sympy | sympy/stats/frv.py | {
"start": 5342,
"end": 6513
} | class ____(Distribution, NamedArgsMixin):
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k: self.pmf(k) for k in self.set}
def pmf(self, *args): # to be overridden by specific distribution
raise NotImplementedError()
@property
def set(self): # to be overridden by specific distribution
raise NotImplementedError()
values = property(lambda self: self.dict.values)
items = property(lambda self: self.dict.items)
is_symbolic = property(lambda self: False)
__iter__ = property(lambda self: self.dict.__iter__)
__getitem__ = property(lambda self: self.dict.__getitem__)
def __call__(self, *args):
return self.pmf(*args)
def __contains__(self, other):
return other in self.set
#=============================================
#========= Probability Space ===============
#=============================================
| SingleFiniteDistribution |
python | pyca__cryptography | tests/x509/test_ocsp.py | {
"start": 43296,
"end": 57707
} | class ____:
def test_bad_response(self):
with pytest.raises(ValueError):
ocsp.load_der_ocsp_response(b"invalid")
def test_load_response(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-sha256.der"),
ocsp.load_der_ocsp_response,
)
issuer = _load_cert(
os.path.join("x509", "letsencryptx3.pem"),
x509.load_pem_x509_certificate,
)
assert isinstance(resp, ocsp.OCSPResponse)
assert resp.response_status == ocsp.OCSPResponseStatus.SUCCESSFUL
assert (
resp.signature_algorithm_oid
== x509.SignatureAlgorithmOID.RSA_WITH_SHA256
)
assert isinstance(resp.signature_hash_algorithm, hashes.SHA256)
assert resp.signature == base64.b64decode(
b"I9KUlyLV/2LbNCVu1BQphxdNlU/jBzXsPYVscPjW5E93pCrSO84GkIWoOJtqsnt"
b"78DLcQPnF3W24NXGzSGKlSWfXIsyoXCxnBm0mIbD5ZMnKyXEnqSR33Z9He/A+ML"
b"A8gbrDUipGNPosesenkKUnOtFIzEGv29hV5E6AMP2ORPVsVlTAZegPJFbbVIWc0"
b"rZGFCXKxijDxtUtgWzBhpBAI50JbPHi+IVuaOe4aDJLYgZ0BIBNa6bDI+rScyoy"
b"5U0DToV7SZn6CoJ3U19X7BHdYn6TLX0xi43eXuzBGzdHnSzmsc7r/DvkAKJm3vb"
b"dVECXqe/gFlXJUBcZ25jhs70MUA=="
)
assert resp.tbs_response_bytes == base64.b64decode(
b"MIHWoUwwSjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzA"
b"hBgNVBAMTGkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzGA8yMDE4MDgzMDExMT"
b"UwMFowdTBzMEswCQYFKw4DAhoFAAQUfuZq53Kas/z4oiBkbBahLWBxCF0EFKhKa"
b"mMEfd265tE5t6ZFZe/zqOyhAhIDHHh6fckClQB7xfIiCztSevCAABgPMjAxODA4"
b"MzAxMTAwMDBaoBEYDzIwMTgwOTA2MTEwMDAwWg=="
)
public_key = issuer.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
public_key.verify(
resp.signature,
resp.tbs_response_bytes,
PKCS1v15(),
resp.signature_hash_algorithm,
)
assert resp.certificates == []
assert resp.responder_key_hash is None
assert resp.responder_name == issuer.subject
with pytest.warns(utils.DeprecatedIn43):
assert resp.produced_at == datetime.datetime(2018, 8, 30, 11, 15)
assert resp.produced_at_utc == datetime.datetime(
2018, 8, 30, 11, 15, tzinfo=datetime.timezone.utc
)
assert resp.certificate_status == ocsp.OCSPCertStatus.GOOD
assert resp.revocation_reason is None
_check_ocsp_response_times(
resp,
this_update=datetime.datetime(2018, 8, 30, 11, 0),
next_update=datetime.datetime(2018, 9, 6, 11, 0),
revocation_time=None,
)
assert resp.issuer_key_hash == (
b"\xa8Jjc\x04}\xdd\xba\xe6\xd19\xb7\xa6Ee\xef\xf3\xa8\xec\xa1"
)
assert resp.issuer_name_hash == (
b"~\xe6j\xe7r\x9a\xb3\xfc\xf8\xa2 dl\x16\xa1-`q\x08]"
)
assert isinstance(resp.hash_algorithm, hashes.SHA1)
assert resp.serial_number == 271024907440004808294641238224534273948400
assert len(resp.extensions) == 0
def test_load_multi_valued_response(self):
resp = _load_data(
os.path.join("x509", "ocsp", "ocsp-army.deps.mil-resp.der"),
ocsp.load_der_ocsp_response,
)
with pytest.raises(ValueError):
resp.serial_number
assert isinstance(next(resp.responses), ocsp.OCSPSingleResponse)
assert len(list(resp.responses)) == 20
def test_multi_valued_responses(self):
req_valid = _load_data(
os.path.join("x509", "ocsp", "ocsp-army.valid-req.der"),
ocsp.load_der_ocsp_request,
)
req_revoked = _load_data(
os.path.join("x509", "ocsp", "ocsp-army.revoked-req.der"),
ocsp.load_der_ocsp_request,
)
req_irrelevant = _load_data(
os.path.join("x509", "ocsp", "ocsp-army.inapplicable-req.der"),
ocsp.load_der_ocsp_request,
)
resp = _load_data(
os.path.join("x509", "ocsp", "ocsp-army.deps.mil-resp.der"),
ocsp.load_der_ocsp_response,
)
for elem in resp.responses:
serial = elem.serial_number
assert req_irrelevant.serial_number != serial
if req_valid.serial_number == serial:
assert elem.issuer_key_hash == req_valid.issuer_key_hash
assert elem.issuer_name_hash == req_valid.issuer_name_hash
assert (
elem.hash_algorithm.name == req_valid.hash_algorithm.name
)
assert elem.certificate_status == ocsp.OCSPCertStatus.GOOD
with pytest.warns(utils.DeprecatedIn43):
assert elem.this_update == datetime.datetime(
2020, 2, 22, 0, 0
)
assert elem.this_update_utc == datetime.datetime(
2020, 2, 22, 0, 0, tzinfo=datetime.timezone.utc
)
with pytest.warns(utils.DeprecatedIn43):
assert elem.next_update == datetime.datetime(
2020, 2, 29, 1, 0
)
assert elem.next_update_utc == datetime.datetime(
2020, 2, 29, 1, 0, tzinfo=datetime.timezone.utc
)
elif req_revoked.serial_number == serial:
assert elem.certificate_status == ocsp.OCSPCertStatus.REVOKED
assert (
elem.revocation_reason
== x509.ReasonFlags.cessation_of_operation
)
with pytest.warns(utils.DeprecatedIn43):
assert elem.revocation_time == datetime.datetime(
2018, 5, 30, 14, 1, 39
)
assert elem.revocation_time_utc == datetime.datetime(
2018, 5, 30, 14, 1, 39, tzinfo=datetime.timezone.utc
)
def test_load_unauthorized(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-unauthorized.der"),
ocsp.load_der_ocsp_response,
)
assert resp.response_status == ocsp.OCSPResponseStatus.UNAUTHORIZED
with pytest.raises(ValueError):
resp.signature_algorithm_oid
with pytest.raises(ValueError):
resp.signature_hash_algorithm
with pytest.raises(ValueError):
resp.signature
with pytest.raises(ValueError):
resp.tbs_response_bytes
with pytest.raises(ValueError):
resp.certificates
with pytest.raises(ValueError):
resp.responder_key_hash
with pytest.raises(ValueError):
resp.responder_name
with pytest.raises(ValueError), pytest.warns(utils.DeprecatedIn43):
resp.produced_at
with pytest.raises(ValueError):
resp.produced_at_utc
with pytest.raises(ValueError):
resp.certificate_status
with pytest.raises(ValueError), pytest.warns(utils.DeprecatedIn43):
resp.revocation_time
with pytest.raises(ValueError):
resp.revocation_time_utc
with pytest.raises(ValueError):
resp.revocation_reason
with pytest.raises(ValueError), pytest.warns(utils.DeprecatedIn43):
resp.this_update
with pytest.raises(ValueError):
resp.this_update_utc
with pytest.raises(ValueError), pytest.warns(utils.DeprecatedIn43):
resp.next_update
with pytest.raises(ValueError):
resp.next_update_utc
with pytest.raises(ValueError):
resp.issuer_key_hash
with pytest.raises(ValueError):
resp.issuer_name_hash
with pytest.raises(ValueError):
resp.hash_algorithm
with pytest.raises(ValueError):
resp.serial_number
with pytest.raises(ValueError):
resp.extensions
def test_load_revoked(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-revoked.der"),
ocsp.load_der_ocsp_response,
)
assert resp.certificate_status == ocsp.OCSPCertStatus.REVOKED
with pytest.warns(utils.DeprecatedIn43):
assert resp.revocation_time == datetime.datetime(
2016, 9, 2, 21, 28, 48
)
assert resp.revocation_time_utc == datetime.datetime(
2016, 9, 2, 21, 28, 48, tzinfo=datetime.timezone.utc
)
assert resp.revocation_reason is None
def test_load_delegate_unknown_cert(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-delegate-unknown-cert.der"),
ocsp.load_der_ocsp_response,
)
assert len(resp.certificates) == 1
assert isinstance(resp.certificates[0], x509.Certificate)
assert resp.certificate_status == ocsp.OCSPCertStatus.UNKNOWN
def test_load_invalid_signature_oid(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-invalid-signature-oid.der"),
ocsp.load_der_ocsp_response,
)
assert resp.signature_algorithm_oid == x509.ObjectIdentifier(
"1.2.840.113549.1.1.2"
)
with raises_unsupported_algorithm(None):
resp.signature_hash_algorithm
def test_unknown_hash_algorithm(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-unknown-hash-alg.der"),
ocsp.load_der_ocsp_response,
)
with raises_unsupported_algorithm(None):
resp.hash_algorithm
def test_load_responder_key_hash(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-responder-key-hash.der"),
ocsp.load_der_ocsp_response,
)
assert resp.responder_name is None
assert resp.responder_key_hash == (
b"\x0f\x80a\x1c\x821a\xd5/(\xe7\x8dF8\xb4,\xe1\xc6\xd9\xe2"
)
def test_load_revoked_reason(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-revoked-reason.der"),
ocsp.load_der_ocsp_response,
)
assert resp.revocation_reason is x509.ReasonFlags.superseded
def test_load_revoked_no_next_update(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-revoked-no-next-update.der"),
ocsp.load_der_ocsp_response,
)
assert resp.serial_number == 16160
with pytest.warns(utils.DeprecatedIn43):
assert resp.next_update is None
assert resp.next_update_utc is None
def test_response_extensions(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-revoked-reason.der"),
ocsp.load_der_ocsp_response,
)
assert len(resp.extensions) == 1
ext = resp.extensions[0]
assert ext.critical is False
assert ext.value == x509.OCSPNonce(
b'5\x957\x9fa\x03\x83\x87\x89rW\x8f\xae\x99\xf7"'
)
def test_response_unknown_extension(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-unknown-extension.der"),
ocsp.load_der_ocsp_response,
)
assert len(resp.extensions) == 1
ext = resp.extensions[0]
assert ext.critical is False
assert ext.value == x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.3.6.1.5.5.7.48.1.2.200"),
b'\x04\x105\x957\x9fa\x03\x83\x87\x89rW\x8f\xae\x99\xf7"',
)
def test_serialize_reponse(self):
resp_bytes = load_vectors_from_file(
filename=os.path.join("x509", "ocsp", "resp-revoked.der"),
loader=lambda data: data.read(),
mode="rb",
)
resp = ocsp.load_der_ocsp_response(resp_bytes)
assert resp.public_bytes(serialization.Encoding.DER) == resp_bytes
def test_invalid_serialize_encoding(self):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-revoked.der"),
ocsp.load_der_ocsp_response,
)
with pytest.raises(ValueError):
resp.public_bytes("invalid")
with pytest.raises(ValueError):
resp.public_bytes(serialization.Encoding.PEM)
def test_single_extensions_sct(self, backend):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-sct-extension.der"),
ocsp.load_der_ocsp_response,
)
assert len(resp.single_extensions) == 1
ext = resp.single_extensions[0]
assert ext.oid == x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.5")
assert len(ext.value) == 4
log_ids = [base64.b64encode(sct.log_id) for sct in ext.value]
assert log_ids == [
b"RJRlLrDuzq/EQAfYqP4owNrmgr7YyzG1P9MzlrW2gag=",
b"b1N2rDHwMRnYmQCkURX/dxUcEdkCwQApBo2yCJo32RM=",
b"u9nfvB+KcbWTlCOXqpJ7RzhXlQqrUugakJZkNo4e0YU=",
b"7ku9t3XOYLrhQmkfq+GeZqMPfl+wctiDAMR7iXqo/cs=",
]
def test_single_extensions(self, backend):
resp = _load_data(
os.path.join("x509", "ocsp", "resp-single-extension-reason.der"),
ocsp.load_der_ocsp_response,
)
assert len(resp.single_extensions) == 1
ext = resp.single_extensions[0]
assert ext.oid == x509.CRLReason.oid
assert ext.value == x509.CRLReason(x509.ReasonFlags.unspecified)
def test_unknown_response_type(self):
with pytest.raises(ValueError):
_load_data(
os.path.join(
"x509", "ocsp", "resp-response-type-unknown-oid.der"
),
ocsp.load_der_ocsp_response,
)
def test_response_bytes_absent(self):
with pytest.raises(ValueError):
_load_data(
os.path.join(
"x509", "ocsp", "resp-successful-no-response-bytes.der"
),
ocsp.load_der_ocsp_response,
)
def test_unknown_response_status(self):
with pytest.raises(ValueError):
_load_data(
os.path.join(
"x509", "ocsp", "resp-unknown-response-status.der"
),
ocsp.load_der_ocsp_response,
)
| TestOCSPResponse |
python | pandas-dev__pandas | pandas/tests/series/methods/test_argsort.py | {
"start": 125,
"end": 2539
} | class ____:
def test_argsort_axis(self):
# GH#54257
ser = Series(range(3))
msg = "No axis named 2 for object type Series"
with pytest.raises(ValueError, match=msg):
ser.argsort(axis=2)
def test_argsort_numpy(self, datetime_series):
ser = datetime_series
res = np.argsort(ser).values
expected = np.argsort(np.array(ser))
tm.assert_numpy_array_equal(res, expected)
def test_argsort_numpy_missing(self):
data = [0.1, np.nan, 0.2, np.nan, 0.3]
ser = Series(data)
result = np.argsort(ser)
expected = np.argsort(np.array(data))
tm.assert_numpy_array_equal(result.values, expected)
def test_argsort(self, datetime_series):
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
def test_argsort_dt64(self, unit):
# GH#2967 (introduced bug in 0.11-dev I think)
ser = Series(
[Timestamp(f"201301{i:02d}") for i in range(1, 6)], dtype=f"M8[{unit}]"
)
assert ser.dtype == f"datetime64[{unit}]"
shifted = ser.shift(-1)
assert shifted.dtype == f"datetime64[{unit}]"
assert isna(shifted[4])
result = ser.argsort()
expected = Series(range(5), dtype=np.intp)
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [4], dtype=np.intp)
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
ser = Series(np.random.default_rng(2).integers(0, 100, size=10000))
mindexer = ser.argsort(kind="mergesort")
qindexer = ser.argsort()
mexpected = np.argsort(ser.values, kind="mergesort")
qexpected = np.argsort(ser.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
| TestSeriesArgsort |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/base.py | {
"start": 656,
"end": 824
} | class ____(str, Enum):
"""The kinds of rag data examples."""
HUMAN = "human"
AI = "ai"
def __str__(self) -> str:
return self.value
| CreatedByType |
python | huggingface__transformers | src/transformers/models/dots1/modeling_dots1.py | {
"start": 20704,
"end": 21702
} | class ____(PreTrainedModel):
config: Dots1Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Dots1DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Dots1DecoderLayer,
"attentions": Dots1Attention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Dots1TopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Dots1NaiveMoe):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
| Dots1PreTrainedModel |
python | great-expectations__great_expectations | tests/integration/fixtures/partition_and_sample_data/sampler_test_cases_and_fixtures.py | {
"start": 157,
"end": 785
} | class ____:
def __init__(self, test_df: pd.DataFrame, test_column_name: str):
self._test_df = test_df
self._test_column_name = test_column_name
@property
def test_df(self):
return self._test_df
@property
def test_column_name(self):
return self._test_column_name
def first_n_rows(self, n: int) -> pd.DataFrame:
"""Return first n rows of the test df.
Args:
n: Number of rows to include.
Returns:
The first n rows of the loaded test dataframe.
"""
return self.test_df.head(n=n)
@dataclass
| SamplerTaxiTestData |
python | getsentry__sentry | src/sentry/similarity/features.py | {
"start": 920,
"end": 1215
} | class ____:
def __init__(self, function):
self.function = function
def extract(self, event):
try:
interface = event.interfaces["logentry"]
except KeyError:
raise InterfaceDoesNotExist()
return self.function(interface)
| MessageFeature |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 13158,
"end": 13229
} | class ____(nodes.Part, nodes.TextElement):
"""Deprecated."""
| centered |
python | tensorflow__tensorflow | tensorflow/compiler/tests/image_ops_test.py | {
"start": 26334,
"end": 29872
} | class ____(parameterized.TestCase, xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.1, atol=0.01)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
@parameterized.named_parameters(
[("1x2To3x3", 1, 2, 3, 3), ("2x2To1x1", 2, 2, 1, 1),
("2x2To3x3", 2, 2, 3, 3), ("3x3To2x2", 3, 3, 2, 2),
("4x4To3x3", 4, 4, 3, 3), ("3x3To9x9", 3, 3, 9, 9),
("4x4To8x8", 4, 4, 8, 8), ("8x8To16x16", 8, 8, 16, 16),
("64x64To512x512", 64, 64, 512, 512),
("80x80To512x512", 80, 80, 512, 512),
("96x96To512x512", 96, 96, 512, 512),
("112x112To512x512", 112, 112, 512, 512),
("256x48To2048x384", 256, 48, 2048, 384),
("320x60To2048x384", 320, 60, 2048, 384),
("448x84To2048x384", 448, 84, 2048, 384),
("69x69To545x545", 69, 69, 545, 545),
("86x86To545x545", 86, 86, 545, 545),
("103x103To545x545", 103, 103, 545, 545),
("120x120To545x545", 120, 120, 545, 545),
("57x57To456x456", 57, 57, 456, 456),
("72x72To456x456", 72, 72, 456, 456),
("86x86To456x456", 86, 86, 456, 456),
("100x100To456x456", 100, 100, 456, 456),
("64x64To224x224", 64, 64, 224, 224),
("128x128To224x224", 128, 128, 224, 224),
("256x256To224x224", 256, 256, 224, 224),
("512x512To224x224", 512, 512, 224, 224),
("64x64To299x299", 64, 64, 299, 299),
("128x128To299x299", 128, 128, 299, 299),
("256x256To299x299", 256, 256, 299, 299),
("512x512To299x299", 512, 512, 299, 299),
("224x224To224x224", 224, 224, 224, 224)] +
# On windows, initialization of the following or any larger np.arrays
# where we set the dtype explicitly fails with:
# TypeError: expected number, got int
([] if os.name == "nt" else [("224x224To224x224-bfloat", 224, 224, 224,
224, dtypes.bfloat16.as_numpy_dtype)]),
# This test is disabled because it is very slow. It is slow because
# 383 is prime, 383 and 2047 are coprime, and 2048 is large.
# ("Disabled_384x72To2048x384", 384, 72, 2048, 384),
)
def test(self, src_y, src_x, dst_y, dst_x, dtype=np.float32):
max_y = max(src_y - 1, 1) * (dst_y - 1) + 1
max_x = max(src_x - 1, 1) * (dst_x - 1) + 1
input_data = [
range(y * max_x, (y + 1) * max_x, max(dst_x - 1, 1))
for y in range(0, max_y, max(dst_y - 1, 1))
]
result = [
range(y * max_x, (y + 1) * max_x, max(src_x - 1, 1))
for y in range(0, max_y, max(src_y - 1, 1))
]
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [dst_y, dst_x],
expected=np.array(result, dtype=np.float32),
large_tolerance=True)
| ResizeBilinearTest |
python | django__django | django/views/generic/dates.py | {
"start": 12852,
"end": 13354
} | class ____(BaseDateListView):
"""
Base view for archives of date-based items.
This requires subclassing to provide a response mixin.
"""
context_object_name = "latest"
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, ordering="DESC")
if not date_list:
qs = qs.none()
return (date_list, qs, {})
| BaseArchiveIndexView |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_repositories.py | {
"start": 9718,
"end": 11880
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
with patch.object(DummyRepositoryProvider, "needs_auth", return_value=False):
url = reverse("sentry-api-0-organization-repositories", args=[org.slug])
response = self.client.post(url, data={"provider": "dummy", "name": "getsentry/sentry"})
assert response.status_code == 201, (response.status_code, response.content)
assert response.data["id"]
repo = Repository.objects.get(id=response.data["id"])
assert repo.provider == "dummy"
assert repo.name == "getsentry/sentry"
def test_admin_ok(self) -> None:
org = self.create_organization(owner=self.user, name="baz")
team = self.create_team(name="people", organization=org)
user = self.create_user(email="admin@example.org")
self.create_member(organization=org, user=user, teams=[team], role="admin")
self.login_as(user=user)
with patch.object(DummyRepositoryProvider, "needs_auth", return_value=False):
url = reverse("sentry-api-0-organization-repositories", args=[org.slug])
response = self.client.post(url, data={"provider": "dummy", "name": "getsentry/sentry"})
assert response.status_code == 201, (response.status_code, response.content)
def test_member_ok(self) -> None:
org = self.create_organization(owner=self.user, name="baz")
team = self.create_team(name="people", organization=org)
user = self.create_user(email="member@example.org")
self.create_member(organization=org, user=user, teams=[team], role="member")
self.login_as(user=user)
with patch.object(DummyRepositoryProvider, "needs_auth", return_value=False):
url = reverse("sentry-api-0-organization-repositories", args=[org.slug])
response = self.client.post(url, data={"provider": "dummy", "name": "getsentry/sentry"})
assert response.status_code == 201, (response.status_code, response.content)
| OrganizationRepositoriesCreateTest |
python | milvus-io__pymilvus | tests/test_schema.py | {
"start": 203,
"end": 2200
} | class ____:
@pytest.fixture(scope="function")
def raw_dict(self):
return {
"description": "TestCollectionSchema_description",
"enable_dynamic_field": True,
"fields": [
{
"name": "vec1",
"description": "desc1",
"type": DataType.FLOAT_VECTOR,
"params": {"dim": 128},
},
{
"name": "vec2",
"description": "desc2",
"type": DataType.BINARY_VECTOR,
"params": {"dim": 128},
},
{
"name": "ID",
"description": "ID",
"type": DataType.INT64,
"is_primary": True,
"auto_id": False
},
]
}
def test_constructor_from_dict(self, raw_dict):
schema = CollectionSchema.construct_from_dict(raw_dict)
assert schema.enable_dynamic_field == raw_dict.get("enable_dynamic_field", False)
assert schema.description, raw_dict['description']
assert len(schema.fields) == len(raw_dict['fields'])
f = schema.primary_field
assert isinstance(f, FieldSchema)
assert f.name == raw_dict['fields'][2]['name']
def test_to_dict(self, raw_dict):
schema = CollectionSchema.construct_from_dict(raw_dict)
target = schema.to_dict()
target.pop("auto_id", None)
assert target == raw_dict
assert target is not raw_dict
def test_init_with_functions(self, raw_dict):
functions = [
Function("func1", FunctionType.BM25, ["field1"], ["field2"])
]
schema = CollectionSchema.construct_from_dict(raw_dict)
schema_with_func = CollectionSchema(schema.fields, schema.description, functions=functions)
assert schema_with_func.functions == functions
| TestCollectionSchema |
python | astropy__astropy | astropy/time/tests/test_ut1.py | {
"start": 2558,
"end": 4560
} | class ____:
"""Test Time.ut1 using IERS tables"""
def test_ut1_to_utc(self):
"""Also test the reverse, around the leap second
(round-trip test closes #2077)"""
with iers_conf.set_temp("auto_download", False):
t = Time(
[
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-07-01 00:00:00",
"2012-07-01 00:00:01",
"2012-07-01 12:00:00",
],
scale="ut1",
)
t_utc_jd = t.utc.jd
t_comp = np.array(
[
2456109.0000010049,
2456109.4999836441,
2456109.4999952177,
2456109.5000067917,
2456109.9999952167,
]
)
assert allclose_jd(t_utc_jd, t_comp)
t_back = t.utc.ut1
assert allclose_jd(t.jd, t_back.jd)
def test_empty_ut1(self):
"""Testing for a zero-length Time object from UTC to UT1
when an empty array is passed"""
from astropy import units as u
with iers_conf.set_temp("auto_download", False):
t = Time(["2012-06-30 12:00:00"]) + np.arange(24) * u.hour
t_empty = t[[]].ut1
assert isinstance(t_empty, Time)
assert t_empty.scale == "ut1"
assert t_empty.size == 0
def test_delta_ut1_utc(self):
"""Accessing delta_ut1_utc should try to get it from IERS
(closes #1924 partially)"""
with iers_conf.set_temp("auto_download", False):
t = Time("2012-06-30 12:00:00", scale="utc")
assert not hasattr(t, "_delta_ut1_utc")
# accessing delta_ut1_utc calculates it
assert allclose_sec(t.delta_ut1_utc, -0.58682110003124965)
# and keeps it around
assert allclose_sec(t._delta_ut1_utc, -0.58682110003124965)
| TestTimeUT1 |
python | django-extensions__django-extensions | django_extensions/management/commands/show_template_tags.py | {
"start": 2112,
"end": 3915
} | class ____(BaseCommand):
help = "Displays template tags and filters available in the current project."
results = ""
def add_result(self, s, depth=0):
self.results += "%s\n" % s.rjust(depth * 4 + len(s))
@signalcommand
def handle(self, *args, **options):
if options["no_color"]:
style = no_style()
else:
style = color_style()
for app_config in apps.get_app_configs():
app = app_config.name
try:
templatetag_mod = __import__(app + ".templatetags", {}, {}, [""])
except ImportError:
continue
mod_path = inspect.getabsfile(templatetag_mod)
mod_files = os.listdir(os.path.dirname(mod_path))
tag_files = [
i.rstrip(".py") for i in mod_files if i.endswith(".py") and i[0] != "_"
]
app_labeled = False
for taglib in tag_files:
lib = load_tag_library(taglib)
if lib is None:
continue
if not app_labeled:
self.add_result("App: %s" % style.MODULE_NAME(app))
app_labeled = True
self.add_result("load: %s" % style.TAGLIB(taglib), 1)
libstuff = [
(lib.tags, "Tag:", style.TAG),
(lib.filters, "Filter:", style.FILTER),
]
for items, label, style_func in libstuff:
for item in items:
self.add_result("%s %s" % (label, style_func(item)), 2)
doc = inspect.getdoc(items[item])
if doc:
self.add_result(format_block(doc, 12))
return self.results
| Command |
python | huggingface__transformers | src/transformers/models/exaone4/modular_exaone4.py | {
"start": 14252,
"end": 14291
} | class ____(Olmo2MLP):
pass
| Exaone4MLP |
python | pypa__hatch | tests/project/test_core.py | {
"start": 3011,
"end": 3280
} | class ____:
def test_selected(self, temp_dir):
project = Project(temp_dir, name="foo")
assert project.chosen_name == "foo"
def test_cwd(self, temp_dir):
project = Project(temp_dir)
assert project.chosen_name is None
| TestChosenName |
python | anthropics__anthropic-sdk-python | src/anthropic/types/model_info.py | {
"start": 215,
"end": 646
} | class ____(BaseModel):
id: str
"""Unique model identifier."""
created_at: datetime
"""RFC 3339 datetime string representing the time at which the model was released.
May be set to an epoch value if the release date is unknown.
"""
display_name: str
"""A human-readable name for the model."""
type: Literal["model"]
"""Object type.
For Models, this is always `"model"`.
"""
| ModelInfo |
python | google__pytype | pytype/tools/xref/callgraph.py | {
"start": 240,
"end": 348
} | class ____:
name: str
node_type: str
type: Any
attrib: str
location: str
@dataclasses.dataclass
| Attr |
python | python__mypy | mypy/types.py | {
"start": 50697,
"end": 52831
} | class ____:
"""Summary of module attributes and types.
This is used for instances of types.ModuleType, because they can have different
attributes per instance, and for type narrowing with hasattr() checks.
"""
def __init__(
self,
attrs: dict[str, Type],
immutable: set[str] | None = None,
mod_name: str | None = None,
) -> None:
self.attrs = attrs
if immutable is None:
immutable = set()
self.immutable = immutable
self.mod_name = mod_name
def __hash__(self) -> int:
return hash((tuple(self.attrs.items()), tuple(sorted(self.immutable))))
def __eq__(self, other: object) -> bool:
if not isinstance(other, ExtraAttrs):
return NotImplemented
return self.attrs == other.attrs and self.immutable == other.immutable
def copy(self) -> ExtraAttrs:
return ExtraAttrs(self.attrs.copy(), self.immutable.copy(), self.mod_name)
def __repr__(self) -> str:
return f"ExtraAttrs({self.attrs!r}, {self.immutable!r}, {self.mod_name!r})"
def serialize(self) -> JsonDict:
return {
".class": "ExtraAttrs",
"attrs": {k: v.serialize() for k, v in self.attrs.items()},
"immutable": sorted(self.immutable),
"mod_name": self.mod_name,
}
@classmethod
def deserialize(cls, data: JsonDict) -> ExtraAttrs:
assert data[".class"] == "ExtraAttrs"
return ExtraAttrs(
{k: deserialize_type(v) for k, v in data["attrs"].items()},
set(data["immutable"]),
data["mod_name"],
)
def write(self, data: WriteBuffer) -> None:
write_tag(data, EXTRA_ATTRS)
write_type_map(data, self.attrs)
write_str_list(data, sorted(self.immutable))
write_str_opt(data, self.mod_name)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> ExtraAttrs:
ret = ExtraAttrs(read_type_map(data), set(read_str_list(data)), read_str_opt(data))
assert read_tag(data) == END_TAG
return ret
| ExtraAttrs |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_details.py | {
"start": 1185,
"end": 3613
} | class ____(ProjectEndpoint):
owner = ApiOwner.REPLAY
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ReplayDetailsPermission,)
def get(self, request: Request, project: Project, replay_id: str) -> Response:
if not features.has(
"organizations:session-replay", project.organization, actor=request.user
):
return Response(status=404)
filter_params = self.get_filter_params(request, project)
try:
replay_id = str(uuid.UUID(replay_id))
except ValueError:
return Response(status=404)
snuba_response = query_replay_instance(
project_id=project.id,
replay_id=replay_id,
start=filter_params["start"],
end=filter_params["end"],
organization=project.organization,
request_user_id=request.user.id,
)
response = process_raw_response(
snuba_response,
fields=request.query_params.getlist("field"),
)
if len(response) == 0:
return Response(status=404)
else:
return Response({"data": response[0]}, status=200)
@extend_schema(
operation_id="Delete a Replay Instance",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
ReplayParams.REPLAY_ID,
],
responses={
204: RESPONSE_NO_CONTENT,
404: RESPONSE_NOT_FOUND,
},
examples=None,
)
def delete(self, request: Request, project: Project, replay_id: str) -> Response:
"""
Delete a replay.
"""
if not features.has(
"organizations:session-replay", project.organization, actor=request.user
):
return Response(status=404)
if has_archived_segment(project.id, replay_id):
return Response(status=404)
# We don't check Seer features because an org may have previously had them on, then turned them off.
has_seer_data = features.has("organizations:replay-ai-summaries", project.organization)
delete_replay.delay(
project_id=project.id,
replay_id=replay_id,
has_seer_data=has_seer_data,
)
return Response(status=204)
| ProjectReplayDetailsEndpoint |
python | redis__redis-py | tests/test_cache.py | {
"start": 43995,
"end": 48440
} | class ____:
def test_type(self):
policy = LRUPolicy()
assert policy.type == EvictionPolicyType.time_based
def test_evict_next(self, mock_connection):
cache = DefaultCache(
CacheConfig(max_size=5, eviction_policy=EvictionPolicy.LRU)
)
policy = cache.eviction_policy
cache_key1 = CacheKey(
command="GET", redis_keys=("foo",), redis_args=("GET", "foo")
)
cache_key2 = CacheKey(
command="GET", redis_keys=("bar",), redis_args=("GET", "bar")
)
assert cache.set(
CacheEntry(
cache_key=cache_key1,
cache_value=b"bar",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert cache.set(
CacheEntry(
cache_key=cache_key2,
cache_value=b"foo",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert policy.evict_next() == cache_key1
assert cache.get(cache_key1) is None
def test_evict_many(self, mock_connection):
cache = DefaultCache(
CacheConfig(max_size=5, eviction_policy=EvictionPolicy.LRU)
)
policy = cache.eviction_policy
cache_key1 = CacheKey(
command="GET", redis_keys=("foo",), redis_args=("GET", "foo")
)
cache_key2 = CacheKey(
command="GET", redis_keys=("bar",), redis_args=("GET", "bar")
)
cache_key3 = CacheKey(
command="GET", redis_keys=("baz",), redis_args=("GET", "baz")
)
assert cache.set(
CacheEntry(
cache_key=cache_key1,
cache_value=b"bar",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert cache.set(
CacheEntry(
cache_key=cache_key2,
cache_value=b"foo",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert cache.set(
CacheEntry(
cache_key=cache_key3,
cache_value=b"baz",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert policy.evict_many(2) == [cache_key1, cache_key2]
assert cache.get(cache_key1) is None
assert cache.get(cache_key2) is None
with pytest.raises(ValueError, match="Evictions count is above cache size"):
policy.evict_many(99)
def test_touch(self, mock_connection):
cache = DefaultCache(
CacheConfig(max_size=5, eviction_policy=EvictionPolicy.LRU)
)
policy = cache.eviction_policy
cache_key1 = CacheKey(
command="GET", redis_keys=("foo",), redis_args=("GET", "foo")
)
cache_key2 = CacheKey(
command="GET", redis_keys=("bar",), redis_args=("GET", "bar")
)
cache.set(
CacheEntry(
cache_key=cache_key1,
cache_value=b"bar",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
cache.set(
CacheEntry(
cache_key=cache_key2,
cache_value=b"foo",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
assert cache.collection.popitem(last=True)[0] == cache_key2
cache.set(
CacheEntry(
cache_key=cache_key2,
cache_value=b"foo",
status=CacheEntryStatus.VALID,
connection_ref=mock_connection,
)
)
policy.touch(cache_key1)
assert cache.collection.popitem(last=True)[0] == cache_key1
def test_throws_error_on_invalid_cache(self):
policy = LRUPolicy()
with pytest.raises(
ValueError, match="Eviction policy should be associated with valid cache."
):
policy.evict_next()
policy.cache = "wrong_type"
with pytest.raises(
ValueError, match="Eviction policy should be associated with valid cache."
):
policy.evict_next()
| TestUnitLRUPolicy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass6.py | {
"start": 1298,
"end": 1601
} | class ____(Protocol):
def __call__(self) -> None:
pass
def func7(obj: CallableProto):
match obj:
case Callable():
reveal_type(obj, expected_text="CallableProto")
return obj()
case x:
reveal_type(obj, expected_text="Never")
| CallableProto |
python | tensorflow__tensorflow | tensorflow/python/training/saving/saveable_object_util_test.py | {
"start": 3964,
"end": 4449
} | class ____(saveable_object.SaveableObject):
def __init__(self, obj, name):
self.obj = obj
specs = [
saveable_object.SaveSpec(obj.a, "", name + "-a"),
saveable_object.SaveSpec(obj.b, "", name + "-b")]
super(_MultiSpecSaveable, self).__init__(None, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # Unused.
self.obj.a.assign(restored_tensors[0])
self.obj.b.assign(restored_tensors[1])
| _MultiSpecSaveable |
python | getsentry__sentry | src/sentry/backup/services/import_export/model.py | {
"start": 7075,
"end": 7573
} | class ____(str, Enum):
"""
Scope values are rendered as strings for JSON interchange, but can easily be mapped back to
their set-based values when necessary.
"""
User = "User"
Organization = "Organization"
Config = "Config"
Global = "Global"
def from_rpc(self) -> ExportScope:
return ExportScope[self.name]
@classmethod
def into_rpc(cls, base_scope: ExportScope) -> "RpcExportScope":
return RpcExportScope[base_scope.name]
| RpcExportScope |
python | uqfoundation__dill | dill/_dill.py | {
"start": 23023,
"end": 36074
} | class ____:
"""
Make avaialable a limited structural pattern matching-like syntax for Python < 3.10
Patterns can be only tuples (without types) currently.
Inspired by the package pattern-matching-PEP634.
Usage:
>>> with match(args) as m:
>>> if m.case(('x', 'y')):
>>> # use m.x and m.y
>>> elif m.case(('x', 'y', 'z')):
>>> # use m.x, m.y and m.z
Equivalent native code for Python >= 3.10:
>>> match args:
>>> case (x, y):
>>> # use x and y
>>> case (x, y, z):
>>> # use x, y and z
"""
def __init__(self, value):
self.value = value
self._fields = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
return False
def case(self, args): # *args, **kwargs):
"""just handles tuple patterns"""
if len(self.value) != len(args): # + len(kwargs):
return False
#if not all(isinstance(arg, pat) for arg, pat in zip(self.value[len(args):], kwargs.values())):
# return False
self.args = args # (*args, *kwargs)
return True
@property
def fields(self):
# Only bind names to values if necessary.
if self._fields is None:
self._fields = dict(zip(self.args, self.value))
return self._fields
def __getattr__(self, item):
return self.fields[item]
ALL_CODE_PARAMS = [
# Version New attribute CodeType parameters
((3,11,'a'), 'co_endlinetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable endlinetable columntable exceptiontable freevars cellvars'),
((3,11), 'co_exceptiontable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable exceptiontable freevars cellvars'),
((3,11,'p'), 'co_qualname', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable freevars cellvars'),
((3,10), 'co_linetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno linetable freevars cellvars'),
((3,8), 'co_posonlyargcount', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'),
((3,7), 'co_kwonlyargcount', 'argcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'),
]
for version, new_attr, params in ALL_CODE_PARAMS:
if hasattr(CodeType, new_attr):
CODE_VERSION = version
CODE_PARAMS = params.split()
break
ENCODE_PARAMS = set(CODE_PARAMS).intersection(
['code', 'lnotab', 'linetable', 'endlinetable', 'columntable', 'exceptiontable'])
def _create_code(*args):
if not isinstance(args[0], int): # co_lnotab stored from >= 3.10
LNOTAB, *args = args
else: # from < 3.10 (or pre-LNOTAB storage)
LNOTAB = b''
with match(args) as m:
# Python 3.11/3.12a (18 members)
if m.case((
'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14]
'linetable', 'exceptiontable', 'freevars', 'cellvars' # args[14:]
)):
if CODE_VERSION == (3,11):
return CodeType(
*args[:6],
args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
*args[7:14],
args[14].encode() if hasattr(args[14], 'encode') else args[14], # linetable
args[15].encode() if hasattr(args[15], 'encode') else args[15], # exceptiontable
args[16],
args[17],
)
fields = m.fields
# PyPy 3.11 7.3.19+ (17 members)
elif m.case((
'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', # args[6:13]
'firstlineno', 'linetable', 'freevars', 'cellvars' # args[13:]
)):
if CODE_VERSION == (3,11,'p'):
return CodeType(
*args[:6],
args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
*args[7:14],
args[14].encode() if hasattr(args[14], 'encode') else args[14], # linetable
args[15],
args[16],
)
fields = m.fields
# Python 3.10 or 3.8/3.9 (16 members)
elif m.case((
'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[6:13]
'LNOTAB_OR_LINETABLE', 'freevars', 'cellvars' # args[13:]
)):
if CODE_VERSION == (3,10) or CODE_VERSION == (3,8):
return CodeType(
*args[:6],
args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
*args[7:13],
args[13].encode() if hasattr(args[13], 'encode') else args[13], # lnotab/linetable
args[14],
args[15],
)
fields = m.fields
if CODE_VERSION >= (3,10):
fields['linetable'] = m.LNOTAB_OR_LINETABLE
else:
fields['lnotab'] = LNOTAB if LNOTAB else m.LNOTAB_OR_LINETABLE
# Python 3.7 (15 args)
elif m.case((
'argcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:5]
'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[5:12]
'lnotab', 'freevars', 'cellvars' # args[12:]
)):
if CODE_VERSION == (3,7):
return CodeType(
*args[:5],
args[5].encode() if hasattr(args[5], 'encode') else args[5], # code
*args[6:12],
args[12].encode() if hasattr(args[12], 'encode') else args[12], # lnotab
args[13],
args[14],
)
fields = m.fields
# Python 3.11a (20 members)
elif m.case((
'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14]
'linetable', 'endlinetable', 'columntable', 'exceptiontable', 'freevars', 'cellvars' # args[14:]
)):
if CODE_VERSION == (3,11,'a'):
return CodeType(
*args[:6],
args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
*args[7:14],
*(a.encode() if hasattr(a, 'encode') else a for a in args[14:18]), # linetable-exceptiontable
args[18],
args[19],
)
fields = m.fields
else:
raise UnpicklingError("pattern match for code object failed")
# The args format doesn't match this version.
fields.setdefault('posonlyargcount', 0) # from python <= 3.7
fields.setdefault('lnotab', LNOTAB) # from python >= 3.10
fields.setdefault('linetable', b'') # from python <= 3.9
fields.setdefault('qualname', fields['name']) # from python <= 3.10
fields.setdefault('exceptiontable', b'') # from python <= 3.10
fields.setdefault('endlinetable', None) # from python != 3.11a
fields.setdefault('columntable', None) # from python != 3.11a
args = (fields[k].encode() if k in ENCODE_PARAMS and hasattr(fields[k], 'encode') else fields[k]
for k in CODE_PARAMS)
return CodeType(*args)
def _create_ftype(ftypeobj, func, args, kwds):
if kwds is None:
kwds = {}
if args is None:
args = ()
return ftypeobj(func, *args, **kwds)
def _create_typing_tuple(argz, *args): #NOTE: workaround python/cpython#94245
if not argz:
return typing.Tuple[()].copy_with(())
if argz == ((),):
return typing.Tuple[()]
return typing.Tuple[argz]
if ThreadHandleType:
def _create_thread_handle(ident, done, *args): #XXX: ignores 'blocking'
from threading import _make_thread_handle
handle = _make_thread_handle(ident)
if done:
handle._set_done()
return handle
def _create_lock(locked, *args): #XXX: ignores 'blocking'
from threading import Lock
lock = Lock()
if locked:
if not lock.acquire(False):
raise UnpicklingError("Cannot acquire lock")
return lock
def _create_rlock(count, owner, *args): #XXX: ignores 'blocking'
lock = RLockType()
if owner is not None:
lock._acquire_restore((count, owner))
if owner and not lock._is_owned():
raise UnpicklingError("Cannot acquire lock")
return lock
# thanks to matsjoyce for adding all the different file modes
def _create_filehandle(name, mode, position, closed, open, strictio, fmode, fdata): # buffering=0
# only pickles the handle, not the file contents... good? or StringIO(data)?
# (for file contents see: http://effbot.org/librarybook/copy-reg.htm)
# NOTE: handle special cases first (are there more special cases?)
names = {'<stdin>':sys.__stdin__, '<stdout>':sys.__stdout__,
'<stderr>':sys.__stderr__} #XXX: better fileno=(0,1,2) ?
if name in list(names.keys()):
f = names[name] #XXX: safer "f=sys.stdin"
elif name == '<tmpfile>':
f = os.tmpfile()
elif name == '<fdopen>':
import tempfile
f = tempfile.TemporaryFile(mode)
else:
try:
exists = os.path.exists(name)
except Exception:
exists = False
if not exists:
if strictio:
raise FileNotFoundError("[Errno 2] No such file or directory: '%s'" % name)
elif "r" in mode and fmode != FILE_FMODE:
name = '<fdopen>' # or os.devnull?
current_size = 0 # or maintain position?
else:
current_size = os.path.getsize(name)
if position > current_size:
if strictio:
raise ValueError("invalid buffer size")
elif fmode == CONTENTS_FMODE:
position = current_size
# try to open the file by name
# NOTE: has different fileno
try:
#FIXME: missing: *buffering*, encoding, softspace
if fmode == FILE_FMODE:
f = open(name, mode if "w" in mode else "w")
f.write(fdata)
if "w" not in mode:
f.close()
f = open(name, mode)
elif name == '<fdopen>': # file did not exist
import tempfile
f = tempfile.TemporaryFile(mode)
# treat x mode as w mode
elif fmode == CONTENTS_FMODE \
and ("w" in mode or "x" in mode):
# stop truncation when opening
flags = os.O_CREAT
if "+" in mode:
flags |= os.O_RDWR
else:
flags |= os.O_WRONLY
f = os.fdopen(os.open(name, flags), mode)
# set name to the correct value
r = getattr(f, "buffer", f)
r = getattr(r, "raw", r)
r.name = name
assert f.name == name
else:
f = open(name, mode)
except (IOError, FileNotFoundError):
err = sys.exc_info()[1]
raise UnpicklingError(err)
if closed:
f.close()
elif position >= 0 and fmode != HANDLE_FMODE:
f.seek(position)
return f
def _create_stringi(value, position, closed):
f = StringIO(value)
if closed: f.close()
else: f.seek(position)
return f
def _create_stringo(value, position, closed):
f = StringIO()
if closed: f.close()
else:
f.write(value)
f.seek(position)
return f
| match |
python | kamyu104__LeetCode-Solutions | Python/remove-adjacent-almost-equal-characters.py | {
"start": 38,
"end": 420
} | class ____(object):
def removeAlmostEqualCharacters(self, word):
"""
:type word: str
:rtype: int
"""
result = 0
for i in xrange(len(word)-1):
if (i+1)+result >= len(word):
break
if abs(ord(word[(i+1)+result])-ord(word[i+result])) <= 1:
result += 1
return result
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/test_subquery_relations.py | {
"start": 121175,
"end": 123637
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class SubItem(Base):
__tablename__ = "sub_items"
id = Column(Integer, primary_key=True, autoincrement=True)
item_id = Column(Integer, ForeignKey("items.id"))
name = Column(String(50))
number = Column(Integer)
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50))
number = Column(Integer)
sub_items = relationship("SubItem", backref="item")
@classmethod
def insert_data(cls, connection):
Item, SubItem = cls.classes("Item", "SubItem")
with Session(connection) as sess:
number_of_items = 50
number_of_sub_items = 5
items = [
Item(name=f"Item:{i}", number=i)
for i in range(number_of_items)
]
sess.add_all(items)
for item in items:
item.sub_items = [
SubItem(name=f"SubItem:{item.id}:{i}", number=i)
for i in range(number_of_sub_items)
]
sess.commit()
@testing.variation("use_in", [True, False])
def test_multiple_queries(self, use_in):
Item, SubItem = self.classes("Item", "SubItem")
for sub_item_number in (1, 2, 3):
s = fixture_session()
base_query = s.query(Item)
base_query = base_query.filter(Item.number > 5, Item.number <= 10)
if use_in:
base_query = base_query.options(
subqueryload(
Item.sub_items.and_(
SubItem.number.in_([sub_item_number, 18, 12])
)
)
)
else:
base_query = base_query.options(
subqueryload(
Item.sub_items.and_(SubItem.number == sub_item_number)
)
)
items = list(base_query)
eq_(len(items), 5)
for item in items:
sub_items = list(item.sub_items)
eq_(len(sub_items), 1)
for sub_item in sub_items:
eq_(sub_item.number, sub_item_number)
| Issue11173Test |
python | pypa__warehouse | tests/unit/admin/views/test_sponsors.py | {
"start": 3210,
"end": 5099
} | class ____:
def test_serialize_form_to_create_sponsor(self, db_request):
result = views.create_sponsor(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.SponsorForm)
def test_serialize_form_errors_if_invalid_post(self, db_request):
db_request.method = "POST"
db_request.POST["name"] = ""
db_request.POST["link_url"] = ""
db_request.POST = MultiDict(db_request.POST)
result = views.create_sponsor(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.SponsorForm)
assert result["form"].errors
def test_create_sponsor(self, db_request):
db_request.method = "POST"
db_request.POST["name"] = "Sponsor"
db_request.POST["link_url"] = "https://newsponsor.com"
db_request.POST["color_logo"] = COLOR_LOGO_FILE
db_request.POST = MultiDict(db_request.POST)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_url = pretend.call_recorder(lambda r: "/admin/sponsors/")
storage_service = pretend.stub(
store=pretend.call_recorder(
lambda path, file_path, ct: f"http://files/sponsorlogos/{path}"
)
)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None, context=None: {
ISponsorLogoStorage: storage_service,
}.get(svc)
)
resp = views.create_sponsor(db_request)
assert resp.status_code == 303
assert resp.location == "/admin/sponsors/"
assert db_request.session.flash.calls == [
pretend.call("Added new sponsor 'Sponsor'", queue="success")
]
assert db_request.route_url.calls == [pretend.call("admin.sponsor.list")]
| TestCreateSponsor |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 2917,
"end": 3004
} | class ____(TestRss2Feed):
feed_type = feedgenerator.RssUserland091Feed
| TestRss091Feed |
python | pallets__jinja | src/jinja2/lexer.py | {
"start": 13395,
"end": 13527
} | class ____(t.NamedTuple):
pattern: t.Pattern[str]
tokens: str | tuple[str, ...] | tuple[Failure]
command: str | None
| _Rule |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial002_py39.py | {
"start": 610,
"end": 3197
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
teams: list[Team] = Relationship(back_populates="heroes", link_model=HeroTeamLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
teams=[team_z_force, team_preventers],
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
teams=[team_preventers],
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador", teams=[team_preventers]
)
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Deadpond:", hero_deadpond)
print("Deadpond teams:", hero_deadpond.teams)
print("Rusty-Man:", hero_rusty_man)
print("Rusty-Man Teams:", hero_rusty_man.teams)
print("Spider-Boy:", hero_spider_boy)
print("Spider-Boy Teams:", hero_spider_boy.teams)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
team_z_force = session.exec(select(Team).where(Team.name == "Z-Force")).one()
team_z_force.heroes.append(hero_spider_boy)
session.add(team_z_force)
session.commit()
print("Updated Spider-Boy's Teams:", hero_spider_boy.teams)
print("Z-Force heroes:", team_z_force.heroes)
hero_spider_boy.teams.remove(team_z_force)
session.add(team_z_force)
session.commit()
print("Reverted Z-Force's heroes:", team_z_force.heroes)
print("Reverted Spider-Boy's teams:", hero_spider_boy.teams)
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
| Hero |
python | getsentry__sentry | src/sentry/notifications/notification_action/types.py | {
"start": 14664,
"end": 20071
} | class ____(ABC):
ACTIVITIES_TO_INVOKE_ON = [ActivityType.SET_RESOLVED.value]
@classmethod
def build_notification_context(cls, action: Action) -> NotificationContext:
return NotificationContext.from_action_model(action)
@classmethod
def build_alert_context(
cls,
detector: Detector,
evidence_data: MetricIssueEvidenceData,
group_status: GroupStatus,
detector_priority_level: DetectorPriorityLevel,
) -> AlertContext:
return AlertContext.from_workflow_engine_models(
detector, evidence_data, group_status, detector_priority_level
)
@classmethod
def build_metric_issue_context(
cls,
group: Group,
evidence_data: MetricIssueEvidenceData,
detector_priority_level: DetectorPriorityLevel,
) -> MetricIssueContext:
return MetricIssueContext.from_group_event(group, evidence_data, detector_priority_level)
@classmethod
def build_open_period_context(cls, group: Group) -> OpenPeriodContext:
return OpenPeriodContext.from_group(group)
@classmethod
def get_trigger_status(cls, group: Group) -> TriggerStatus:
if group.status == GroupStatus.RESOLVED or group.status == GroupStatus.IGNORED:
return TriggerStatus.RESOLVED
return TriggerStatus.ACTIVE
@classmethod
def send_alert(
cls,
notification_context: NotificationContext,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
trigger_status: TriggerStatus,
notification_uuid: str,
organization: Organization,
project: Project,
) -> None:
raise NotImplementedError
@staticmethod
def _extract_from_group_event(
event: GroupEvent,
) -> tuple[MetricIssueEvidenceData, DetectorPriorityLevel]:
"""
Extract evidence data and priority from a GroupEvent
"""
if event.occurrence is None:
raise ValueError("Event occurrence is required for alert context")
if event.occurrence.priority is None:
raise ValueError("Event occurrence priority is required for alert context")
evidence_data = MetricIssueEvidenceData(**event.occurrence.evidence_data)
priority = DetectorPriorityLevel(event.occurrence.priority)
return evidence_data, priority
@staticmethod
def _extract_from_activity(
event: Activity,
) -> tuple[MetricIssueEvidenceData, DetectorPriorityLevel]:
"""
Extract evidence data and priority from an Activity event
"""
if event.type != ActivityType.SET_RESOLVED.value:
raise ValueError(
"Activity type must be SET_RESOLVED to invoke metric alert legacy registry"
)
if event.data is None or not event.data:
raise ValueError("Activity data is required for alert context")
evidence_data_dict = dict(event.data)
priority = DetectorPriorityLevel.OK
evidence_data = MetricIssueEvidenceData(**evidence_data_dict)
return evidence_data, priority
@classmethod
def invoke_legacy_registry(
cls,
event_data: WorkflowEventData,
action: Action,
detector: Detector,
) -> None:
event = event_data.event
# Extract evidence data and priority based on event type
if isinstance(event, GroupEvent):
evidence_data, priority = cls._extract_from_group_event(event)
elif isinstance(event, Activity):
evidence_data, priority = cls._extract_from_activity(event)
else:
raise ValueError(
"WorkflowEventData.event must be a GroupEvent or Activity to invoke metric alert legacy registry"
)
notification_context = cls.build_notification_context(action)
alert_context = cls.build_alert_context(
detector, evidence_data, event_data.group.status, priority
)
metric_issue_context = cls.build_metric_issue_context(
event_data.group, evidence_data, priority
)
open_period_context = cls.build_open_period_context(event_data.group)
trigger_status = cls.get_trigger_status(event_data.group)
notification_uuid = str(uuid.uuid4())
logger.info(
"notification_action.execute_via_metric_alert_handler",
extra={
"action_id": action.id,
"detector_id": detector.id,
"event_data": asdict(event_data),
"notification_context": asdict(notification_context),
"alert_context": asdict(alert_context),
"metric_issue_context": asdict(metric_issue_context),
"open_period_context": asdict(open_period_context),
"trigger_status": trigger_status,
},
)
cls.send_alert(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
trigger_status=trigger_status,
notification_uuid=notification_uuid,
organization=detector.project.organization,
project=detector.project,
)
| BaseMetricAlertHandler |
python | pandas-dev__pandas | pandas/tests/libs/test_libalgos.py | {
"start": 1399,
"end": 3474
} | class ____:
def test_backfill(self):
old = np.array([1, 5, 10], dtype=np.int64)
new = np.array(list(range(12)), dtype=np.int64)
filler = libalgos.backfill["int64_t"](old, new)
expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = np.array([1, 4], dtype=np.int64)
new = np.array(list(range(5, 10)), dtype=np.int64)
filler = libalgos.backfill["int64_t"](old, new)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = np.array([1, 5, 10], dtype=np.int64)
new = np.array(list(range(12)), dtype=np.int64)
filler = libalgos.pad["int64_t"](old, new)
expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = np.array([5, 10], dtype=np.int64)
new = np.arange(5, dtype=np.int64)
filler = libalgos.pad["int64_t"](old, new)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad_backfill_object_segfault(self):
old = np.array([], dtype="O")
new = np.array([datetime(2010, 12, 31)], dtype="O")
result = libalgos.pad["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
| TestPadBackfill |
python | plotly__plotly.py | plotly/graph_objs/mesh3d/colorbar/title/_font.py | {
"start": 233,
"end": 9908
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "mesh3d.colorbar.title"
_path_str = "mesh3d.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.mesh3d.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | encode__starlette | starlette/responses.py | {
"start": 817,
"end": 5807
} | class ____:
media_type = None
charset = "utf-8"
def __init__(
self,
content: Any = None,
status_code: int = 200,
headers: Mapping[str, str] | None = None,
media_type: str | None = None,
background: BackgroundTask | None = None,
) -> None:
self.status_code = status_code
if media_type is not None:
self.media_type = media_type
self.background = background
self.body = self.render(content)
self.init_headers(headers)
def render(self, content: Any) -> bytes | memoryview:
if content is None:
return b""
if isinstance(content, bytes | memoryview):
return content
return content.encode(self.charset) # type: ignore
def init_headers(self, headers: Mapping[str, str] | None = None) -> None:
if headers is None:
raw_headers: list[tuple[bytes, bytes]] = []
populate_content_length = True
populate_content_type = True
else:
raw_headers = [(k.lower().encode("latin-1"), v.encode("latin-1")) for k, v in headers.items()]
keys = [h[0] for h in raw_headers]
populate_content_length = b"content-length" not in keys
populate_content_type = b"content-type" not in keys
body = getattr(self, "body", None)
if (
body is not None
and populate_content_length
and not (self.status_code < 200 or self.status_code in (204, 304))
):
content_length = str(len(body))
raw_headers.append((b"content-length", content_length.encode("latin-1")))
content_type = self.media_type
if content_type is not None and populate_content_type:
if content_type.startswith("text/") and "charset=" not in content_type.lower():
content_type += "; charset=" + self.charset
raw_headers.append((b"content-type", content_type.encode("latin-1")))
self.raw_headers = raw_headers
@property
def headers(self) -> MutableHeaders:
if not hasattr(self, "_headers"):
self._headers = MutableHeaders(raw=self.raw_headers)
return self._headers
def set_cookie(
self,
key: str,
value: str = "",
max_age: int | None = None,
expires: datetime | str | int | None = None,
path: str | None = "/",
domain: str | None = None,
secure: bool = False,
httponly: bool = False,
samesite: Literal["lax", "strict", "none"] | None = "lax",
partitioned: bool = False,
) -> None:
cookie: http.cookies.BaseCookie[str] = http.cookies.SimpleCookie()
cookie[key] = value
if max_age is not None:
cookie[key]["max-age"] = max_age
if expires is not None:
if isinstance(expires, datetime):
cookie[key]["expires"] = format_datetime(expires, usegmt=True)
else:
cookie[key]["expires"] = expires
if path is not None:
cookie[key]["path"] = path
if domain is not None:
cookie[key]["domain"] = domain
if secure:
cookie[key]["secure"] = True
if httponly:
cookie[key]["httponly"] = True
if samesite is not None:
assert samesite.lower() in [
"strict",
"lax",
"none",
], "samesite must be either 'strict', 'lax' or 'none'"
cookie[key]["samesite"] = samesite
if partitioned:
if sys.version_info < (3, 14):
raise ValueError("Partitioned cookies are only supported in Python 3.14 and above.") # pragma: no cover
cookie[key]["partitioned"] = True # pragma: no cover
cookie_val = cookie.output(header="").strip()
self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1")))
def delete_cookie(
self,
key: str,
path: str = "/",
domain: str | None = None,
secure: bool = False,
httponly: bool = False,
samesite: Literal["lax", "strict", "none"] | None = "lax",
) -> None:
self.set_cookie(
key,
max_age=0,
expires=0,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
samesite=samesite,
)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
prefix = "websocket." if scope["type"] == "websocket" else ""
await send(
{
"type": prefix + "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
await send({"type": prefix + "http.response.body", "body": self.body})
if self.background is not None:
await self.background()
| Response |
python | PyCQA__pylint | doc/data/messages/b/bad-classmethod-argument/good.py | {
"start": 0,
"end": 78
} | class ____:
@classmethod
def get_instance(cls):
return cls()
| Klass |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels38.py | {
"start": 315,
"end": 1659
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels38.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48498944, 48508928]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"border": {"color": "red", "width": 1, "dash_type": "dash"},
"pattern": {"pattern": "light_vertical", "fg_color": "#00B050"},
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 1285,
"end": 1473
} | class ____(Package):
version("2.1")
version("2.0")
variant("v1", default=True)
depends_on("y4@4.1", when="+v1")
depends_on("y4")
""",
)
_pkgy3 = (
"y3",
"""\
| Y2 |
python | pytest-dev__pytest | src/_pytest/logging.py | {
"start": 1801,
"end": 2746
} | class ____(logging.Formatter):
"""A logging formatter which formats record with
:func:`datetime.datetime.strftime` formatter instead of
:func:`time.strftime` in case of microseconds in format string.
"""
def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str:
if datefmt and "%f" in datefmt:
ct = self.converter(record.created)
tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone)
# Construct `datetime.datetime` object from `struct_time`
# and msecs information from `record`
# Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861).
dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz)
return dt.strftime(datefmt)
# Use `logging.Formatter` for non-microsecond formats
return super().formatTime(record, datefmt)
| DatetimeFormatter |
python | django__django | tests/model_forms/models.py | {
"start": 6474,
"end": 6534
} | class ____(models.Model):
url = models.URLField()
| Homepage |
python | django__django | tests/model_enums/tests.py | {
"start": 9896,
"end": 10846
} | class ____(SimpleTestCase):
def test_labels_valid(self):
enums = (
Separator,
Constants,
Set,
MoonLandings,
DateAndTime,
MealTimes,
Frequency,
Number,
IPv4Address,
IPv6Address,
IPv4Network,
IPv6Network,
)
for choice_enum in enums:
with self.subTest(choice_enum.__name__):
self.assertNotIn(None, choice_enum.labels)
def test_bool_unsupported(self):
msg = "type 'bool' is not an acceptable base type"
with self.assertRaisesMessage(TypeError, msg):
class Boolean(bool, models.Choices):
pass
def test_uuid_unsupported(self):
with self.assertRaises(TypeError):
class Identifier(uuid.UUID, models.Choices):
A = "972ce4eb-a95f-4a56-9339-68c208a76f18"
| CustomChoicesTests |
python | fastai__fastai | fastai/torch_core.py | {
"start": 23962,
"end": 24171
} | class ____(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
| TitledStr |
python | keon__algorithms | algorithms/linkedlist/remove_duplicates.py | {
"start": 0,
"end": 1264
} | class ____():
def __init__(self, val = None):
self.val = val
self.next = None
def remove_dups(head):
"""
Time Complexity: O(N)
Space Complexity: O(N)
"""
hashset = set()
prev = Node()
while head:
if head.val in hashset:
prev.next = head.next
else:
hashset.add(head.val)
prev = head
head = head.next
def remove_dups_wothout_set(head):
"""
Time Complexity: O(N^2)
Space Complexity: O(1)
"""
current = head
while current:
runner = current
while runner.next:
if runner.next.val == current.val:
runner.next = runner.next.next
else:
runner = runner.next
current = current.next
def print_linked_list(head):
string = ""
while head.next:
string += head.val + " -> "
head = head.next
string += head.val
print(string)
# A A B C D C F G
a1 = Node("A")
a2 = Node("A")
b = Node("B")
c1 = Node("C")
d = Node("D")
c2 = Node("C")
f = Node("F")
g = Node("G")
a1.next = a2
a2.next = b
b.next = c1
c1.next = d
d.next = c2
c2.next = f
f.next = g
remove_dups(a1)
print_linked_list(a1)
remove_dups_wothout_set(a1)
print_linked_list(a1)
| Node |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 8792,
"end": 10382
} | class ____(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim, _freeze=True)
def create_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
return out
@torch.no_grad()
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
| InformerSinusoidalPositionalEmbedding |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/caching_instance_queryer.py | {
"start": 2276,
"end": 49375
} | class ____(DynamicPartitionsStore):
"""Provides utility functions for querying for asset-materialization related data from the
instance which will attempt to limit redundant expensive calls. Intended for use within the
scope of a single "request" (e.g. GQL request, sensor tick).
Args:
instance (DagsterInstance): The instance to query.
"""
def __init__(
self,
instance: DagsterInstance,
asset_graph: BaseAssetGraph,
loading_context: LoadingContext,
evaluation_time: Optional[datetime] = None,
logger: Optional[logging.Logger] = None,
):
self._instance = instance
self._loading_context = loading_context
self._asset_graph = asset_graph
self._logger = logger or logging.getLogger("dagster")
self._asset_partitions_cache: dict[Optional[int], dict[AssetKey, set[str]]] = defaultdict(
dict
)
self._dynamic_partitions_cache: dict[str, Sequence[str]] = {}
self._evaluation_time = evaluation_time if evaluation_time else get_current_datetime()
self._respect_materialization_data_versions = (
self._instance.auto_materialize_respect_materialization_data_versions
)
@property
def instance(self) -> DagsterInstance:
return self._instance
@property
def asset_graph(self) -> BaseAssetGraph:
return self._asset_graph
@property
def evaluation_time(self) -> datetime:
return self._evaluation_time
####################
# QUERY BATCHING
####################
def prefetch_asset_records(self, asset_keys: Iterable[AssetKey]):
"""For performance, batches together queries for selected assets."""
from dagster._core.storage.event_log.base import AssetRecord
AssetRecord.blocking_get_many(self._loading_context, asset_keys)
####################
# ASSET STATUS CACHE
####################
@cached_method
def _get_updated_cache_value(self, *, asset_key: AssetKey) -> Optional["AssetStatusCacheValue"]:
from dagster._core.storage.partition_status_cache import (
get_and_update_asset_status_cache_value,
)
partitions_def = check.not_none(self.asset_graph.get(asset_key).partitions_def)
return get_and_update_asset_status_cache_value(
instance=self.instance,
asset_key=asset_key,
partitions_def=partitions_def,
dynamic_partitions_loader=self,
loading_context=self._loading_context,
)
@cached_method
def get_failed_or_in_progress_subset(self, *, asset_key: AssetKey) -> PartitionsSubset:
"""Returns a PartitionsSubset representing the set of partitions that are either in progress
or whose last materialization attempt failed.
"""
partitions_def = check.not_none(self.asset_graph.get(asset_key).partitions_def)
cache_value = self._get_updated_cache_value(asset_key=asset_key)
if cache_value is None:
return partitions_def.empty_subset()
return cache_value.deserialize_failed_partition_subsets(
partitions_def
) | cache_value.deserialize_in_progress_partition_subsets(partitions_def)
@cached_method
def get_materialized_asset_subset(
self, *, asset_key: AssetKey
) -> SerializableEntitySubset[AssetKey]:
"""Returns an AssetSubset representing the subset of the asset that has been materialized."""
partitions_def = self.asset_graph.get(asset_key).partitions_def
if partitions_def:
cache_value = self._get_updated_cache_value(asset_key=asset_key)
if cache_value is None:
value = partitions_def.empty_subset()
else:
value = cache_value.deserialize_materialized_partition_subsets(partitions_def)
else:
value = self.asset_partition_has_materialization_or_observation(
AssetKeyPartitionKey(asset_key)
)
return SerializableEntitySubset(key=asset_key, value=value)
@cached_method
def get_in_progress_asset_subset(
self, *, asset_key: AssetKey
) -> SerializableEntitySubset[AssetKey]:
"""Returns an AssetSubset representing the subset of the asset that is currently in progress."""
partitions_def = self.asset_graph.get(asset_key).partitions_def
if partitions_def:
cache_value = self._get_updated_cache_value(asset_key=asset_key)
if cache_value is None:
value = partitions_def.empty_subset()
else:
value = cache_value.deserialize_in_progress_partition_subsets(partitions_def)
else:
# NOTE: this computation is not correct in all cases for unpartitioned assets. it is
# possible (though rare) for run A to be launched targeting an asset, then later run B
# be launched, and then run B completes before run A. In these cases, the computation
# below will consider the asset to not be in progress, as the latest planned event
# will be associated with a completed run.
asset_record = self.get_asset_record(asset_key)
last_materialized_run_id = (
asset_record.asset_entry.last_materialization_record.run_id
if asset_record and asset_record.asset_entry.last_materialization_record
else None
)
planned_materialization_run_id = None
if self.instance.event_log_storage.asset_records_have_last_planned_and_failed_materializations:
planned_materialization_run_id = (
asset_record.asset_entry.last_planned_materialization_run_id
if asset_record
else None
)
else:
planned_materialization_info = (
self.instance.event_log_storage.get_latest_planned_materialization_info(
asset_key
)
)
planned_materialization_run_id = (
planned_materialization_info.run_id if planned_materialization_info else None
)
if (
not planned_materialization_run_id
# if the latest materialization happened in the same run as the latest planned materialization,
# it's no longer in progress
or last_materialized_run_id == planned_materialization_run_id
):
value = False
else:
dagster_run = self.instance.get_run_by_id(planned_materialization_run_id)
value = dagster_run is not None and dagster_run.status in [
*IN_PROGRESS_RUN_STATUSES,
# an asset is considered to be "in progress" if there is planned work for it that has not
# yet completed, which is not identical to the "in progress" status of the run
DagsterRunStatus.QUEUED,
]
return SerializableEntitySubset(key=asset_key, value=value)
@cached_method
def get_failed_asset_subset(self, *, asset_key: AssetKey) -> SerializableEntitySubset[AssetKey]:
"""Returns an AssetSubset representing the subset of the asset that failed to be
materialized its most recent run.
"""
partitions_def = self.asset_graph.get(asset_key).partitions_def
if partitions_def:
cache_value = self._get_updated_cache_value(asset_key=asset_key)
if cache_value is None:
value = partitions_def.empty_subset()
else:
value = cache_value.deserialize_failed_partition_subsets(partitions_def)
else:
# ideally, unpartitioned assets would also be handled by the asset status cache
planned_materialization_info = (
self.instance.event_log_storage.get_latest_planned_materialization_info(asset_key)
)
if not planned_materialization_info:
value = False
else:
dagster_run = self.instance.get_run_by_id(planned_materialization_info.run_id)
value = dagster_run is not None and dagster_run.status == DagsterRunStatus.FAILURE
return SerializableEntitySubset(key=asset_key, value=value)
####################
# ASSET RECORDS / STORAGE IDS
####################
def get_asset_record(self, asset_key: AssetKey) -> Optional["AssetRecord"]:
from dagster._core.storage.event_log.base import AssetRecord
return AssetRecord.blocking_get(self._loading_context, asset_key)
def _event_type_for_key(self, asset_key: AssetKey) -> DagsterEventType:
if self.asset_graph.get(asset_key).is_observable:
return DagsterEventType.ASSET_OBSERVATION
else:
return DagsterEventType.ASSET_MATERIALIZATION
@cached_method
def _get_latest_materialization_or_observation_record(
self, *, asset_partition: AssetKeyPartitionKey, before_cursor: Optional[int] = None
) -> Optional["EventLogRecord"]:
"""Returns the latest event log record for the given asset partition of an asset. For
observable source assets, this will be an AssetObservation, otherwise it will be an
AssetMaterialization.
"""
# in the simple case, just use the asset record
if (
before_cursor is None
and asset_partition.partition_key is None
and not (
self.asset_graph.has(asset_partition.asset_key)
and self.asset_graph.get(asset_partition.asset_key).is_observable
)
):
asset_record = self.get_asset_record(asset_partition.asset_key)
if asset_record is None:
return None
return asset_record.asset_entry.last_materialization_record
records_filter = AssetRecordsFilter(
asset_key=asset_partition.asset_key,
asset_partitions=(
[asset_partition.partition_key] if asset_partition.partition_key else None
),
before_storage_id=before_cursor,
)
# For observable assets, we fetch the most recent observation and materialization and return
# whichever is more recent. For non-observable assets, we just fetch the most recent
# materialization.
materialization_records = self.instance.fetch_materializations(
records_filter, ascending=False, limit=1
).records
if self.asset_graph.get(asset_partition.asset_key).is_observable:
observation_records = self.instance.fetch_observations(
records_filter, ascending=False, limit=1
).records
all_records = sorted(
[*materialization_records, *observation_records],
key=lambda x: x.timestamp,
reverse=True,
)
else:
all_records = materialization_records
return next(iter(all_records), None)
@cached_method
def _get_latest_materialization_or_observation_storage_ids_by_asset_partition(
self, *, asset_key: AssetKey
) -> Mapping[AssetKeyPartitionKey, Optional[int]]:
"""Returns a mapping from asset partition to the latest storage id for that asset partition
for all asset partitions associated with the given asset key.
Note that for partitioned assets, an asset partition with a None partition key will be
present in the mapping, representing the latest storage id for the asset as a whole.
"""
asset_partition = AssetKeyPartitionKey(asset_key)
latest_record = self._get_latest_materialization_or_observation_record(
asset_partition=asset_partition
)
latest_storage_ids = {
asset_partition: latest_record.storage_id if latest_record is not None else None
}
if self.asset_graph.get(asset_key).is_partitioned:
latest_storage_ids.update(
{
AssetKeyPartitionKey(asset_key, partition_key): storage_id
for partition_key, storage_id in self.instance.get_latest_storage_id_by_partition(
asset_key, event_type=self._event_type_for_key(asset_key)
).items()
}
)
return latest_storage_ids
def get_latest_materialization_or_observation_storage_id(
self, asset_partition: AssetKeyPartitionKey
) -> Optional[int]:
"""Returns the latest storage id for the given asset partition. If the asset has never been
materialized, returns None.
Args:
asset_partition (AssetKeyPartitionKey): The asset partition to query.
"""
if asset_partition.partition_key is None:
record = self._get_latest_materialization_or_observation_record(
asset_partition=asset_partition
)
return record.storage_id if record else None
return self._get_latest_materialization_or_observation_storage_ids_by_asset_partition(
asset_key=asset_partition.asset_key
).get(asset_partition)
def asset_partition_has_materialization_or_observation(
self,
asset_partition: AssetKeyPartitionKey,
after_cursor: Optional[int] = None,
) -> bool:
"""Returns True if there is a materialization record for the given asset partition after
the specified cursor.
Args:
asset_partition (AssetKeyPartitionKey): The asset partition to query.
after_cursor (Optional[int]): Filter parameter such that only records with a storage_id
greater than this value will be considered.
"""
asset_key = asset_partition.asset_key
if self.asset_graph.has(asset_key) and self.asset_graph.get(asset_key).is_materializable:
asset_record = self.get_asset_record(asset_key)
if (
asset_record is None
or asset_record.asset_entry.last_materialization_record is None
or (
after_cursor
and asset_record.asset_entry.last_materialization_record.storage_id
<= after_cursor
)
):
return False
return (self.get_latest_materialization_or_observation_storage_id(asset_partition) or 0) > (
after_cursor or 0
)
def get_latest_materialization_or_observation_record(
self,
asset_partition: AssetKeyPartitionKey,
after_cursor: Optional[int] = None,
before_cursor: Optional[int] = None,
) -> Optional["EventLogRecord"]:
"""Returns the latest record for the given asset partition given the specified cursors.
Args:
asset_partition (AssetKeyPartitionKey): The asset partition to query.
after_cursor (Optional[int]): Filter parameter such that only records with a storage_id
greater than this value will be considered.
before_cursor (Optional[int]): Filter parameter such that only records with a storage_id
less than this value will be considered.
"""
check.param_invariant(
not (after_cursor and before_cursor),
"before_cursor",
"Cannot set both before_cursor and after_cursor",
)
# first, do a quick check to eliminate the case where we know there is no record
if not self.asset_partition_has_materialization_or_observation(
asset_partition, after_cursor
):
return None
# then, if the before_cursor is after our latest record's storage id, we can just return
# the latest record
elif (before_cursor or 0) > (
self.get_latest_materialization_or_observation_storage_id(asset_partition) or 0
):
return self._get_latest_materialization_or_observation_record(
asset_partition=asset_partition
)
# otherwise, do the explicit query
return self._get_latest_materialization_or_observation_record(
asset_partition=asset_partition, before_cursor=before_cursor
)
####################
# OBSERVATIONS
####################
@cached_method
def next_version_record(
self,
*,
asset_key: AssetKey,
after_cursor: Optional[int],
data_version: Optional[DataVersion],
) -> Optional["EventLogRecord"]:
has_more = True
cursor = None
while has_more:
result = self.instance.fetch_observations(
AssetRecordsFilter(asset_key=asset_key, after_storage_id=after_cursor),
limit=RECORD_BATCH_SIZE,
cursor=cursor,
ascending=True,
)
has_more = result.has_more
cursor = result.cursor
for record in result.records:
record_version = extract_data_version_from_entry(record.event_log_entry)
if record_version is not None and record_version != data_version:
return record
# no records found with a new data version
return None
####################
# RUNS
####################
@cached_method
def _get_run_record_by_id(self, *, run_id: str) -> Optional[RunRecord]:
return self.instance.get_run_record_by_id(run_id)
def _get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:
run_record = self._get_run_record_by_id(run_id=run_id)
if run_record is not None:
return run_record.dagster_run
return None
@cached_method
def _get_planned_materializations_for_run_from_snapshot(
self, *, snapshot_id: str
) -> AbstractSet[AssetKey]:
execution_plan_snapshot = check.not_none(
self._instance.get_execution_plan_snapshot(snapshot_id)
)
return execution_plan_snapshot.asset_selection
@cached_method
def _get_planned_materializations_for_run_from_events(
self, *, run_id: str
) -> AbstractSet[AssetKey]:
"""Provides a fallback for fetching the planned materializations for a run from
the ASSET_MATERIALIZATION_PLANNED events in the event log, in cases where this information
is not available on the DagsterRun object.
Args:
run_id (str): The run id
"""
materializations_planned = self.instance.get_records_for_run(
run_id=run_id, of_type=DagsterEventType.ASSET_MATERIALIZATION_PLANNED
).records
return set(cast("AssetKey", record.asset_key) for record in materializations_planned)
def get_planned_materializations_for_run(self, run_id: str) -> AbstractSet[AssetKey]:
"""Returns the set of asset keys that are planned to be materialized by the run.
Args:
run_id (str): The run id
"""
run = self._get_run_by_id(run_id=run_id)
if run is None:
return set()
elif run.asset_selection:
return run.asset_selection
elif run.execution_plan_snapshot_id:
return self._get_planned_materializations_for_run_from_snapshot(
snapshot_id=check.not_none(run.execution_plan_snapshot_id)
)
else:
# must resort to querying the event log
return self._get_planned_materializations_for_run_from_events(run_id=run_id)
def is_asset_planned_for_run(
self, run_id: str, asset: Union[AssetKey, AssetKeyPartitionKey]
) -> bool:
"""Returns True if the asset is planned to be materialized by the run."""
run = self._get_run_by_id(run_id=run_id)
if not run:
return False
if isinstance(asset, AssetKeyPartitionKey):
asset_key = asset.asset_key
if run.tags.get(PARTITION_NAME_TAG) != asset.partition_key:
return False
else:
asset_key = asset
return asset_key in self.get_planned_materializations_for_run(run_id=run_id)
@cached_method
def get_current_materializations_for_run(self, *, run_id: str) -> AbstractSet[AssetKey]:
"""Returns the set of asset keys that have been materialized by a given run.
Args:
run_id (str): The run id
"""
materializations = self.instance.get_records_for_run(
run_id=run_id,
of_type=DagsterEventType.ASSET_MATERIALIZATION,
).records
return set(cast("AssetKey", record.asset_key) for record in materializations)
####################
# BACKFILLS
####################
@cached_method
def get_active_backfill_datas(self) -> Sequence["AssetBackfillData"]:
from dagster._core.execution.backfill import BulkActionsFilter, BulkActionStatus
active_backfills = [
backfill
for backfill in self.instance.get_backfills(
filters=BulkActionsFilter(statuses=[BulkActionStatus.REQUESTED])
)
if backfill.is_asset_backfill
]
backfill_datas = []
for backfill in active_backfills:
try:
backfill_datas.append(backfill.get_asset_backfill_data(self.asset_graph))
except DagsterDefinitionChangedDeserializationError:
self._logger.warning(
f"Not considering assets in backfill {backfill.backfill_id} since its"
" data could not be deserialized"
)
# Backfill can't be loaded, so no risk of the assets interfering
continue
return backfill_datas
@cached_method
def get_active_backfill_target_asset_graph_subset(self) -> AssetGraphSubset:
"""Returns an AssetGraphSubset representing the set of assets that are currently targeted by
an active asset backfill.
"""
result = AssetGraphSubset()
for data in self.get_active_backfill_datas():
result |= data.target_subset
return result
@cached_method
def get_active_backfill_in_progress_asset_graph_subset(self) -> AssetGraphSubset:
"""Returns an AssetGraphSubset representing the set of assets that are currently targeted by
an active asset backfill and have not yet been materialized or failed.
"""
result = AssetGraphSubset()
for data in self.get_active_backfill_datas():
in_progress_subset = (
data.target_subset - data.materialized_subset - data.failed_and_downstream_subset
)
result |= in_progress_subset
return result
####################
# PARTITIONS
####################
def get_materialized_partitions(
self, asset_key: AssetKey, before_cursor: Optional[int] = None
) -> set[str]:
"""Returns a list of the partitions that have been materialized for the given asset key.
Args:
asset_key (AssetKey): The asset key.
before_cursor (Optional[int]): The cursor before which to look for materialized
partitions. If not provided, will look at all materializations.
"""
if (
before_cursor not in self._asset_partitions_cache
or asset_key not in self._asset_partitions_cache[before_cursor]
):
self._asset_partitions_cache[before_cursor][asset_key] = (
self.instance.get_materialized_partitions(
asset_key=asset_key, before_cursor=before_cursor
)
)
return self._asset_partitions_cache[before_cursor][asset_key]
####################
# DYNAMIC PARTITIONS
####################
def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:
"""Returns a list of partitions for a partitions definition."""
if partitions_def_name not in self._dynamic_partitions_cache:
self._dynamic_partitions_cache[partitions_def_name] = (
self.instance.get_dynamic_partitions(partitions_def_name)
)
return self._dynamic_partitions_cache[partitions_def_name]
def get_paginated_dynamic_partitions(
self, partitions_def_name: str, limit: int, ascending: bool, cursor: Optional[str] = None
) -> PaginatedResults[str]:
if partitions_def_name not in self._dynamic_partitions_cache:
return self.instance.get_paginated_dynamic_partitions(
partitions_def_name=partitions_def_name,
limit=limit,
ascending=ascending,
cursor=cursor,
)
# the full set of partition keys are cached... create a sequence connection from the cached keys
partition_keys = self._dynamic_partitions_cache[partitions_def_name]
return PaginatedResults.create_from_sequence(
seq=partition_keys, limit=limit, ascending=ascending, cursor=cursor
)
def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:
return partition_key in self.get_dynamic_partitions(partitions_def_name)
def get_dynamic_partitions_definition_id(self, partitions_def_name: str) -> str:
return self.instance.get_dynamic_partitions_definition_id(partitions_def_name)
@cached_method
def asset_partitions_with_newly_updated_parents_and_new_cursor(
self,
*,
latest_storage_id: Optional[int],
child_asset_key: AssetKey,
map_old_time_partitions: bool = True,
max_child_partitions: Optional[int] = None,
) -> tuple[AbstractSet[AssetKeyPartitionKey], Optional[int]]:
"""Finds asset partitions of the given child whose parents have been materialized since
latest_storage_id.
"""
max_storage_ids = [
self.get_latest_materialization_or_observation_storage_id(
AssetKeyPartitionKey(child_asset_key)
)
]
child_asset = self.asset_graph.get(child_asset_key)
if not child_asset.parent_keys:
return set(), max(filter(None, [latest_storage_id, *max_storage_ids]), default=None)
child_time_partitions_def = get_time_partitions_def(child_asset.partitions_def)
child_asset_partitions_with_updated_parents = set()
for parent_asset_key in self.asset_graph.get(child_asset_key).parent_keys:
# ignore non-existent parents
if not self.asset_graph.has(parent_asset_key):
continue
# if the parent has not been updated at all since the latest_storage_id, then skip
if not self.get_asset_partitions_updated_after_cursor(
asset_key=parent_asset_key,
asset_partitions=None,
after_cursor=latest_storage_id,
respect_materialization_data_versions=False,
):
continue
# keep track of the maximum storage id that we've seen for a given parent
max_storage_ids.append(
self.get_latest_materialization_or_observation_storage_id(
AssetKeyPartitionKey(parent_asset_key)
)
)
parent_partitions_def: PartitionsDefinition = self.asset_graph.get(
parent_asset_key
).partitions_def
if parent_partitions_def is None:
latest_parent_record = check.not_none(
self.get_latest_materialization_or_observation_record(
AssetKeyPartitionKey(parent_asset_key), after_cursor=latest_storage_id
)
)
for child_partition_key in (
self.asset_graph.get_child_partition_keys_of_parent(
parent_partition_key=None,
parent_asset_key=parent_asset_key,
child_asset_key=child_asset_key,
)
if child_asset.partitions_def
else [None]
):
if not (
# when mapping from unpartitioned assets to time partitioned assets, we ignore
# historical time partitions
not map_old_time_partitions
and child_time_partitions_def is not None
and get_time_partition_key(child_asset.partitions_def, child_partition_key)
!= child_time_partitions_def.get_last_partition_key()
) and not self.is_asset_planned_for_run(
latest_parent_record.run_id, child_asset_key
):
child_asset_partitions_with_updated_parents.add(
AssetKeyPartitionKey(child_asset_key, child_partition_key)
)
else:
# we know a parent updated, and because the parent has a partitions def and the
# child does not, the child could not have been materialized in the same run
if child_asset.partitions_def is None:
child_asset_partitions_with_updated_parents = {
AssetKeyPartitionKey(child_asset_key)
}
break
# the set of asset partitions which have been updated since the latest storage id
parent_partitions_subset = self.get_asset_subset_updated_after_cursor(
asset_key=parent_asset_key,
after_cursor=latest_storage_id,
require_data_version_update=False,
).subset_value
# we are mapping from the partitions of the parent asset to the partitions of
# the child asset
partition_mapping = self.asset_graph.get_partition_mapping(
key=child_asset_key, parent_asset_key=parent_asset_key
)
try:
child_partitions_subset = (
partition_mapping.get_downstream_partitions_for_partitions(
parent_partitions_subset,
upstream_partitions_def=parent_partitions_def,
downstream_partitions_def=child_asset.partitions_def,
)
)
except DagsterInvalidDefinitionError as e:
# add a more helpful error message to the stack
raise DagsterInvalidDefinitionError(
f"Could not map partitions between parent {parent_asset_key.to_string()} "
f"and child {child_asset_key.to_string()}."
) from e
# Prefer more recent time-based partitions, particularly if we end up filtering
# using max_child_partitions (not a strict guarantee that this will always return
# the most recent partitions in time though)
child_partitions = sorted(
child_partitions_subset.get_partition_keys(), reverse=True
)
if max_child_partitions is not None:
child_partitions = child_partitions[:max_child_partitions]
for child_partition in child_partitions:
# we need to see if the child is planned for the same run, but this is
# expensive, so we try to avoid doing so in as many situations as possible
child_asset_partition = AssetKeyPartitionKey(child_asset_key, child_partition)
if (
# if child has a different partitions def than the parent, then it must
# have been executed in a different run, so it's a valid candidate
child_asset.partitions_def != parent_partitions_def
# if child partition key is not the same as any newly materialized
# parent key, then it could not have been executed in the same run as
# its parent
or child_partition not in parent_partitions_subset
# if child partition is not failed or in progress, then if it was
# executed in the same run as its parent, then it must have been
# materialized more recently than its parent
or child_partition
not in self.get_failed_or_in_progress_subset(asset_key=child_asset_key)
):
child_asset_partitions_with_updated_parents.add(child_asset_partition)
else:
# manually query to see if this asset partition was intended to be
# executed in the same run as its parent
latest_partition_record = check.not_none(
self.get_latest_materialization_or_observation_record(
AssetKeyPartitionKey(parent_asset_key, child_partition),
after_cursor=latest_storage_id,
)
)
if not self.is_asset_planned_for_run(
latest_partition_record.run_id, child_asset_key
):
child_asset_partitions_with_updated_parents.add(child_asset_partition)
# the new latest storage id will be the greatest observed storage id among this asset and
# its parents
new_latest_storage_id = max(
filter(None, [latest_storage_id, *max_storage_ids]), default=None
)
return (child_asset_partitions_with_updated_parents, new_latest_storage_id)
####################
# RECONCILIATION
####################
def _asset_partition_versions_updated_after_cursor(
self,
asset_key: AssetKey,
asset_partitions: AbstractSet[AssetKeyPartitionKey],
after_cursor: int,
) -> AbstractSet[AssetKeyPartitionKey]:
if not self.asset_graph.get(asset_key).is_partitioned:
asset_partition = AssetKeyPartitionKey(asset_key)
latest_record = self.get_latest_materialization_or_observation_record(
asset_partition, after_cursor=after_cursor
)
latest_data_version = (
extract_data_version_from_entry(latest_record.event_log_entry)
if latest_record
else None
)
previous_record = self.get_latest_materialization_or_observation_record(
asset_partition, before_cursor=after_cursor + 1
)
previous_data_version = (
extract_data_version_from_entry(previous_record.event_log_entry)
if previous_record
else None
)
return set([asset_partition]) if latest_data_version != previous_data_version else set()
partition_keys = [
asset_partition.partition_key
for asset_partition in asset_partitions
if asset_partition.partition_key
]
updated_partition_keys = (
self.instance.event_log_storage.get_updated_data_version_partitions(
asset_key=asset_key,
partitions=partition_keys,
since_storage_id=after_cursor,
)
)
return set(
AssetKeyPartitionKey(asset_key, partition_key)
for partition_key in updated_partition_keys
)
@cached_method
def get_asset_materializations_updated_after_cursor(
self,
asset_key: AssetKey,
after_cursor: int,
) -> Sequence["EventLogRecord"]:
from dagster._utils.storage import get_materialization_chunk_size
has_more = True
cursor = None
new_materializations = []
while has_more:
result = self.instance.fetch_materializations(
AssetRecordsFilter(asset_key=asset_key, after_storage_id=after_cursor),
cursor=cursor,
limit=get_materialization_chunk_size(),
)
cursor = result.cursor
has_more = result.has_more
new_materializations.extend(result.records)
return new_materializations
def get_asset_partitions_updated_after_cursor(
self,
asset_key: AssetKey,
asset_partitions: Optional[AbstractSet[AssetKeyPartitionKey]],
after_cursor: Optional[int],
respect_materialization_data_versions: bool,
) -> AbstractSet[AssetKeyPartitionKey]:
unvalidated_asset_partitions = self._get_unvalidated_asset_partitions_updated_after_cursor(
asset_key, asset_partitions, after_cursor, respect_materialization_data_versions
)
partitions_def = self.asset_graph.get(asset_key).partitions_def
if partitions_def is None:
return {ap for ap in unvalidated_asset_partitions if ap.partition_key is None}
else:
return {
ap
for ap in unvalidated_asset_partitions
if ap.partition_key is not None
and partitions_def.has_partition_key(partition_key=ap.partition_key)
}
def _get_unvalidated_asset_partitions_updated_after_cursor(
self,
asset_key: AssetKey,
asset_partitions: Optional[AbstractSet[AssetKeyPartitionKey]],
after_cursor: Optional[int],
respect_materialization_data_versions: bool,
) -> AbstractSet[AssetKeyPartitionKey]:
"""Returns the set of asset partitions that have been updated after the given cursor.
Args:
asset_key (AssetKey): The asset key to check.
asset_partitions (Optional[Sequence[AssetKeyPartitionKey]]): If supplied, will filter
the set of checked partitions to the given partitions.
after_cursor (Optional[int]): The cursor after which to look for updates.
respect_materialization_data_versions (bool): If True, will use data versions to filter
out asset partitions which were materialized, but not have not had their data
versions changed since the given cursor.
NOTE: This boolean has been temporarily disabled
"""
if not self.asset_partition_has_materialization_or_observation(
AssetKeyPartitionKey(asset_key), after_cursor=after_cursor
):
return set()
last_storage_id_by_asset_partition = (
self._get_latest_materialization_or_observation_storage_ids_by_asset_partition(
asset_key=asset_key
)
)
if asset_partitions is None:
updated_after_cursor = {
asset_partition
for asset_partition, latest_storage_id in last_storage_id_by_asset_partition.items()
if (latest_storage_id or 0) > (after_cursor or 0)
}
else:
# Optimized for the case where there are many partitions and last_storage_id_by_asset_partition
# is large, but we're only looking for the result for a small number of partitions
updated_after_cursor = set()
for asset_partition in asset_partitions:
latest_storage_id = last_storage_id_by_asset_partition.get(asset_partition)
if latest_storage_id is not None and latest_storage_id > (after_cursor or 0):
updated_after_cursor.add(asset_partition)
if not updated_after_cursor:
return set()
if after_cursor is None or (
not self.asset_graph.get(asset_key).is_observable
and not respect_materialization_data_versions
):
return updated_after_cursor
return self._asset_partition_versions_updated_after_cursor(
asset_key, updated_after_cursor, after_cursor
)
@cached_method
def get_asset_subset_updated_after_cursor(
self, *, asset_key: AssetKey, after_cursor: Optional[int], require_data_version_update: bool
) -> SerializableEntitySubset[AssetKey]:
"""Returns the AssetSubset of the given asset that has been updated after the given cursor."""
partitions_def = self.asset_graph.get(asset_key).partitions_def
validated_asset_partitions = self.get_asset_partitions_updated_after_cursor(
asset_key,
asset_partitions=None,
after_cursor=after_cursor,
respect_materialization_data_versions=require_data_version_update,
)
# TODO: replace this return value with EntitySubset
return ValidAssetSubset.from_asset_partitions_set(
asset_key, partitions_def, validated_asset_partitions
)
@cached_method
def get_asset_subset_updated_after_time(
self, *, asset_key: AssetKey, after_time: datetime
) -> SerializableEntitySubset[AssetKey]:
"""Returns the AssetSubset of the given asset that has been updated after the given time."""
partitions_def = self.asset_graph.get(asset_key).partitions_def
method = (
self.instance.fetch_materializations
if self._event_type_for_key(asset_key) == DagsterEventType.ASSET_MATERIALIZATION
else self.instance.fetch_observations
)
first_event_after_time = next(
iter(
method(
AssetRecordsFilter(asset_key=asset_key, after_timestamp=after_time.timestamp()),
limit=1,
ascending=True,
).records
),
None,
)
if not first_event_after_time:
# TODO: replace this return value with EntitySubset
return ValidAssetSubset.empty(asset_key, partitions_def=partitions_def)
else:
return self.get_asset_subset_updated_after_cursor(
asset_key=asset_key,
after_cursor=first_event_after_time.storage_id - 1,
require_data_version_update=False,
)
def get_parent_asset_partitions_updated_after_child(
self,
*,
asset_partition: AssetKeyPartitionKey,
parent_asset_partitions: AbstractSet[AssetKeyPartitionKey],
respect_materialization_data_versions: bool,
ignored_parent_keys: AbstractSet[AssetKey],
) -> AbstractSet[AssetKeyPartitionKey]:
"""Returns values inside parent_asset_partitions that correspond to asset partitions that
have been updated since the latest materialization of asset_partition.
"""
parent_asset_partitions_by_key: dict[AssetKey, set[AssetKeyPartitionKey]] = defaultdict(set)
for parent in parent_asset_partitions:
parent_asset_partitions_by_key[parent.asset_key].add(parent)
partitions_def = self.asset_graph.get(asset_partition.asset_key).partitions_def
updated_parents = set()
for parent_key, parent_asset_partitions in parent_asset_partitions_by_key.items():
# ignore updates to particular parents
if parent_key in ignored_parent_keys:
continue
# ignore non-existent parents
if not self.asset_graph.has(parent_key):
continue
# when mapping from unpartitioned assets to time partitioned assets, we ignore
# historical time partitions
if (
isinstance(partitions_def, TimeWindowPartitionsDefinition)
and not self.asset_graph.get(parent_key).is_partitioned
and asset_partition.partition_key != partitions_def.get_last_partition_key()
):
continue
updated_parents.update(
self.get_asset_partitions_updated_after_cursor(
asset_key=parent_key,
asset_partitions=parent_asset_partitions,
after_cursor=self.get_latest_materialization_or_observation_storage_id(
asset_partition
),
respect_materialization_data_versions=respect_materialization_data_versions,
)
)
return updated_parents
def have_ignorable_partition_mapping_for_outdated(
self, asset_key: AssetKey, upstream_asset_key: AssetKey
) -> bool:
"""Returns whether the given assets have a partition mapping between them which can be
ignored in the context of calculating if an asset is outdated or not.
These mappings are ignored in cases where respecting them would require an unrealistic
number of upstream partitions to be in a 'good' state before allowing a downstream asset
to be considered up to date.
"""
# Self partition mappings impose constraints on all historical partitions
return asset_key == upstream_asset_key
@cached_method
def get_outdated_ancestors(
self, *, asset_partition: AssetKeyPartitionKey
) -> AbstractSet[AssetKey]:
asset_key = asset_partition.asset_key
partition_key = asset_partition.partition_key
if not (
self.asset_graph.has(asset_key) and self.asset_graph.get(asset_key).is_materializable
):
return set()
parent_asset_partitions = self.asset_graph.get_parents_partitions(
asset_key=asset_key, partition_key=partition_key
).parent_partitions
# the set of parent keys which we don't need to check
ignored_parent_keys = {
parent
for parent in self.asset_graph.get(asset_key).parent_keys
if self.have_ignorable_partition_mapping_for_outdated(asset_key, parent)
}
updated_parents = self.get_parent_asset_partitions_updated_after_child(
asset_partition=asset_partition,
parent_asset_partitions=parent_asset_partitions,
respect_materialization_data_versions=self._respect_materialization_data_versions,
ignored_parent_keys=ignored_parent_keys,
)
root_unreconciled_ancestors = {asset_key} if updated_parents else set()
# recurse over parents
for parent in set(parent_asset_partitions) - updated_parents:
if parent.asset_key in ignored_parent_keys:
continue
root_unreconciled_ancestors.update(self.get_outdated_ancestors(asset_partition=parent))
return root_unreconciled_ancestors
| CachingInstanceQueryer |
python | django__django | django/test/client.py | {
"start": 22068,
"end": 25780
} | class ____(RequestFactory):
"""
Class that lets you create mock ASGI-like Request objects for use in
testing. Usage:
rf = AsyncRequestFactory()
get_request = rf.get("/hello/")
post_request = rf.post("/submit/", {"foo": "bar"})
Once you have a request object you can pass it to any view function,
including synchronous ones. The reason we have a separate class here is:
a) this makes ASGIRequest subclasses, and
b) AsyncClient can subclass it.
"""
def _base_scope(self, **request):
"""The base scope for a request."""
# This is a minimal valid ASGI scope, plus:
# - headers['cookie'] for cookie support,
# - 'client' often useful, see #8551.
scope = {
"asgi": {"version": "3.0"},
"type": "http",
"http_version": "1.1",
"client": ["127.0.0.1", 0],
"server": ("testserver", "80"),
"scheme": "http",
"method": "GET",
"headers": [],
**self.defaults,
**request,
}
scope["headers"].append(
(
b"cookie",
b"; ".join(
sorted(
("%s=%s" % (morsel.key, morsel.coded_value)).encode("ascii")
for morsel in self.cookies.values()
)
),
)
)
return scope
def request(self, **request):
"""Construct a generic request object."""
# This is synchronous, which means all methods on this class are.
# AsyncClient, however, has an async request function, which makes all
# its methods async.
if "_body_file" in request:
body_file = request.pop("_body_file")
else:
body_file = FakePayload("")
# Wrap FakePayload body_file to allow large read() in test environment.
return ASGIRequest(
self._base_scope(**request), LimitedStream(body_file, len(body_file))
)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlsplit(str(path)) # path can be lazy.
data = force_bytes(data, settings.DEFAULT_CHARSET)
s = {
"method": method,
"path": self._get_path(parsed),
"server": ("127.0.0.1", "443" if secure else "80"),
"scheme": "https" if secure else "http",
"headers": [(b"host", b"testserver")],
}
if self.defaults:
extra = {**self.defaults, **extra}
if data:
s["headers"].extend(
[
(b"content-length", str(len(data)).encode("ascii")),
(b"content-type", content_type.encode("ascii")),
]
)
s["_body_file"] = FakePayload(data)
if query_params:
s["query_string"] = urlencode(query_params, doseq=True)
elif query_string := extra.pop("QUERY_STRING", None):
s["query_string"] = query_string
else:
# If QUERY_STRING is absent or empty, we want to extract it from
# the URL.
s["query_string"] = parsed.query
if headers:
extra.update(HttpHeaders.to_asgi_names(headers))
s["headers"] += [
(key.lower().encode("ascii"), value.encode("latin1"))
for key, value in extra.items()
]
return self.request(**s)
| AsyncRequestFactory |
python | pytorch__pytorch | test/distributed/tensor/test_convolution_ops.py | {
"start": 1045,
"end": 12894
} | class ____(DTensorTestBase):
@property
def world_size(self) -> int:
# hard code world size to 2
return 2
@with_comms
def test_downsampling_convolution(self):
device_mesh = self.build_device_mesh()
shard_spec = [Shard(3)]
input_list = torch.rand(ITER_TIME, 7, 3, 512, 1024)
grad_output_list = torch.rand(ITER_TIME, 7, 256, 128, 256) * 1e-3
model = nn.Conv2d(3, 256, kernel_size=4, stride=4, padding=0).to(
self.device_type
)
nn.init.ones_(model.weight)
nn.init.zeros_(model.bias)
model_gt = copy.deepcopy(model).to(self.device_type)
# training with dtensor
model = distribute_module(
model, device_mesh, _conv_fn, input_fn=None, output_fn=None
)
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
for i in range(ITER_TIME):
optimizer.zero_grad()
inp = input_list[i].to(self.device_type).requires_grad_()
inp_dtensor = distribute_tensor(inp, device_mesh, shard_spec)
output = model(inp_dtensor)
grad_output = grad_output_list[i].to(self.device_type)
grad_output_dtensor = distribute_tensor(
grad_output, device_mesh, shard_spec
)
output.backward(grad_output_dtensor)
optimizer.step()
# training with plain tensor
optimizer_gt = torch.optim.SGD(model_gt.parameters(), lr=LR)
for i in range(ITER_TIME):
optimizer_gt.zero_grad()
inp = input_list[i].to(self.device_type).requires_grad_()
output = model_gt(inp)
grad_output = grad_output_list[i].to(self.device_type)
output.backward(grad_output)
optimizer_gt.step()
weight_diff_abs = model.weight.to_local() - model_gt.weight
bias_diff_abs = model.bias.to_local() - model_gt.bias
weight_diff_rel = weight_diff_abs / (torch.abs(model_gt.weight) + 1e-8)
bias_diff_rel = bias_diff_abs / (torch.abs(model_gt.bias) + 1e-8)
weight_mse_abs = torch.mean(weight_diff_abs * weight_diff_abs).item()
bias_mse_abs = torch.mean(bias_diff_abs * bias_diff_abs).item()
weight_mse_rel = torch.mean(weight_diff_rel * weight_diff_rel).item()
bias_mse_rel = torch.mean(bias_diff_rel * bias_diff_rel).item()
self.assertTrue(
weight_mse_abs <= 1e-6,
f"Too large absolute mse for weight tensor, expected less equal 1e-6, got {weight_mse_abs}",
)
self.assertTrue(
bias_mse_abs <= 1e-6,
f"Too large absolute mse for bias tensor, expected less equal 1e-6, got {bias_mse_abs}",
)
self.assertTrue(
weight_mse_rel <= 1e-6,
f"Too large relative mse for weight tensor, expected less equal 1e-6, got {weight_mse_rel}",
)
self.assertTrue(
bias_mse_rel <= 1e-6,
f"Too large relative mse for bias tensor, expected less equal 1e-6, got {bias_mse_rel}",
)
# TODO: test_depthwise_convolution is broken in CI with gloo backend.
# Temporarily disable it to unblock CI.
@with_comms
@skip_if_lt_x_gpu(2)
def test_depthwise_convolution(self):
device_mesh = self.build_device_mesh()
shard_spec = [Shard(3)]
input_list = torch.rand(ITER_TIME, 7, 256, 128, 256)
grad_output_list = torch.rand(ITER_TIME, 7, 256, 128, 256) * 1e-3
model = nn.Conv2d(256, 256, kernel_size=7, padding=3, groups=256).to(
self.device_type
)
nn.init.ones_(model.weight)
nn.init.zeros_(model.bias)
model_gt = copy.deepcopy(model).to(self.device_type)
# training with dtensor
model = distribute_module(
model, device_mesh, _conv_fn, input_fn=None, output_fn=None
)
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
for i in range(ITER_TIME):
optimizer.zero_grad()
inp = input_list[i].to(self.device_type).requires_grad_()
inp_dtensor = distribute_tensor(inp, device_mesh, shard_spec)
output = model(inp_dtensor)
grad_output = grad_output_list[i].to(self.device_type)
grad_output_dtensor = distribute_tensor(
grad_output, device_mesh, shard_spec
)
output.backward(grad_output_dtensor)
optimizer.step()
# training with plain tensor
optimizer_gt = torch.optim.SGD(model_gt.parameters(), lr=LR)
for i in range(ITER_TIME):
optimizer_gt.zero_grad()
inp = input_list[i].to(self.device_type).requires_grad_()
output = model_gt(inp)
grad_output = grad_output_list[i].to(self.device_type)
output.backward(grad_output)
optimizer_gt.step()
weight_diff_abs = model.weight.to_local() - model_gt.weight
bias_diff_abs = model.bias.to_local() - model_gt.bias
weight_diff_rel = weight_diff_abs / (torch.abs(model_gt.weight) + 1e-8)
bias_diff_rel = bias_diff_abs / (torch.abs(model_gt.bias) + 1e-8)
weight_mse_abs = torch.mean(weight_diff_abs * weight_diff_abs).item()
bias_mse_abs = torch.mean(bias_diff_abs * bias_diff_abs).item()
weight_mse_rel = torch.mean(weight_diff_rel * weight_diff_rel).item()
bias_mse_rel = torch.mean(bias_diff_rel * bias_diff_rel).item()
self.assertTrue(
weight_mse_abs <= 1e-6,
f"Too large absolute mse for weight tensor, expected less equal 1e-6, got {weight_mse_abs}",
)
self.assertTrue(
bias_mse_abs <= 1e-6,
f"Too large absolute mse for bias tensor, expected less equal 1e-6, got {bias_mse_abs}",
)
self.assertTrue(
weight_mse_rel <= 1e-6,
f"Too large relative mse for weight tensor, expected less equal 1e-6, got {weight_mse_rel}",
)
self.assertTrue(
bias_mse_rel <= 1e-6,
f"Too large relative mse for bias tensor, expected less equal 1e-6, got {bias_mse_rel}",
)
@with_comms
@skip_if_lt_x_gpu(2)
def test_conv_backward_none_grad_inp(self):
device_mesh = self.build_device_mesh()
conv = nn.Conv2d(64, 64, 3, padding=1).train()
x = torch.randn(1, 64, 32, 32)
x_dt = DTensor.from_local(x, device_mesh, [Replicate()])
w = conv.weight
w_dt = torch.nn.Parameter(DTensor.from_local(w, device_mesh, [Replicate()]))
b = conv.bias
b_dt = torch.nn.Parameter(DTensor.from_local(b, device_mesh, [Replicate()]))
res = F.conv2d(x_dt, w_dt, b_dt, padding=1)
dres = torch.rand_like(res)
res.backward(dres)
self.assertTrue(w_dt.grad is not None)
self.assertTrue(b_dt.grad is not None)
self.assertTrue(x_dt.grad is None)
def _run_single_arg_fwd(
self, model, arg, placements=None
) -> tuple[torch.Tensor, torch.Tensor]:
"""Given model and arg, runs fwd model local and distbuted given device_mesh"""
device_mesh = self.build_device_mesh()
model_copy = copy.deepcopy(model).to(device=self.device_type)
dist_model = distribute_module(model, device_mesh, _conv_fn)
arg_dt = DTensor.from_local(arg, device_mesh, placements)
out_dt = dist_model(arg_dt.to(device=self.device_type))
out = model_copy(arg_dt.full_tensor())
return (out_dt.full_tensor(), out)
@with_comms
def test_conv1d(self):
model = nn.Conv1d(64, 64, 3, padding=1)
x = torch.randn(1, 64, 8, device=self.device_type)
out_dt, out = self._run_single_arg_fwd(model, x)
self.assertEqual(out_dt, out)
@with_comms
def test_conv3d(self):
model = nn.Conv3d(64, 64, 3, padding=1)
x = torch.randn(1, 64, 8, 8, 8, device=self.device_type)
out_dt, out = self._run_single_arg_fwd(model, x, [Shard(0)])
self.assertEqual(out_dt, out)
@with_tf32_off
@with_comms
def test_conv2d_no_bias_compile(self):
"""Test Conv2d with bias=False in compile mode (Issue #167091)
Regression test: Previously this would fail during torch.compile
tracing with AssertionError when bias_spec was None.
"""
device_mesh = self.build_device_mesh()
def conv_fn(x, w):
return F.conv2d(x, w, bias=None, padding=1)
compiled_fn = torch.compile(conv_fn)
# Create tensors
x = torch.randn(1, 4, 5, 5, device=self.device_type)
w = torch.randn(8, 4, 3, 3, device=self.device_type)
# Distribute tensors
x_dt = distribute_tensor(x, device_mesh, [Replicate()])
w_dt = distribute_tensor(w, device_mesh, [Replicate()])
# Test eager mode for comparison
result_eager = conv_fn(x_dt, w_dt)
# Test compiled mode - this should not crash
result_compiled = compiled_fn(x_dt, w_dt)
# Verify shape is correct (the key regression test)
self.assertEqual(result_compiled.shape, torch.Size([1, 8, 5, 5]))
# Verify numerical correctness
self.assertEqual(result_compiled.to_local(), result_eager.to_local())
@with_comms
def test_conv2d_no_bias_backward(self):
"""Test Conv2d backward pass with bias=False (Issue #167091)
Regression test: Previously backward pass would fail when
grad_bias_spec was None.
"""
device_mesh = self.build_device_mesh()
# Create tensors with requires_grad
x = torch.randn(1, 4, 5, 5, device=self.device_type)
w = torch.randn(8, 4, 3, 3, device=self.device_type, requires_grad=True)
# Distribute tensors
x_dt = distribute_tensor(x, device_mesh, [Replicate()])
w_dt = torch.nn.Parameter(distribute_tensor(w, device_mesh, [Replicate()]))
# Forward pass
result = F.conv2d(x_dt, w_dt, bias=None, padding=1)
# Backward pass - this should not crash
grad_output = torch.randn_like(result)
result.backward(grad_output)
# Check weight gradient exists (the key regression test)
self.assertIsNotNone(w_dt.grad)
self.assertEqual(w_dt.grad.shape, torch.Size([8, 4, 3, 3]))
@with_comms
def test_conv2d_module_no_bias(self):
"""Test nn.Conv2d module with bias=False (Issue #167091)
Regression test: Ensures nn.Conv2d with bias=False works with DTensor.
"""
device_mesh = self.build_device_mesh()
# Create model with bias=False
model = nn.Conv2d(4, 8, kernel_size=3, padding=1, bias=False).to(
self.device_type
)
nn.init.ones_(model.weight)
# Distribute model
model_dt = distribute_module(model, device_mesh, _conv_fn)
# Create input
x = torch.randn(1, 4, 5, 5, device=self.device_type)
x_dt = distribute_tensor(x, device_mesh, [Replicate()])
# Forward pass - this should not crash
output_dt = model_dt(x_dt)
# Check outputs shape is correct
self.assertEqual(output_dt.shape, torch.Size([1, 8, 5, 5]))
# Check that model.bias is None
self.assertIsNone(model.bias)
DistConvolutionOpsTestWithLocalTensor = create_local_tensor_test_class(
DistConvolutionOpsTest,
# Send / recv ops are not supported
skipped_tests=[
"test_conv_backward_none_grad_inp",
"test_depthwise_convolution",
"test_downsampling_convolution",
# New tests for Issue #167091 - use send/recv via tp_convolution
"test_conv2d_no_bias_compile",
"test_conv2d_no_bias_backward",
"test_conv2d_module_no_bias",
],
)
if __name__ == "__main__":
run_tests()
| DistConvolutionOpsTest |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/cmdoptions.py | {
"start": 3904,
"end": 32044
} | class ____(Option):
TYPES = Option.TYPES + ("path", "package_name")
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["package_name"] = _package_name_option_check
TYPE_CHECKER["path"] = _path_option_check
###########
# options #
###########
help_: Callable[..., Option] = partial(
Option,
"-h",
"--help",
dest="help",
action="help",
help="Show help.",
)
debug_mode: Callable[..., Option] = partial(
Option,
"--debug",
dest="debug_mode",
action="store_true",
default=False,
help=(
"Let unhandled exceptions propagate outside the main subroutine, "
"instead of logging them to stderr."
),
)
isolated_mode: Callable[..., Option] = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv: Callable[..., Option] = partial(
Option,
"--require-virtualenv",
"--require-venv",
dest="require_venv",
action="store_true",
default=False,
help=(
"Allow pip to only run in a virtual environment; "
"exit with an error otherwise."
),
)
override_externally_managed: Callable[..., Option] = partial(
Option,
"--break-system-packages",
dest="override_externally_managed",
action="store_true",
help="Allow pip to modify an EXTERNALLY-MANAGED Python installation",
)
python: Callable[..., Option] = partial(
Option,
"--python",
dest="python",
help="Run pip with the specified Python interpreter.",
)
verbose: Callable[..., Option] = partial(
Option,
"-v",
"--verbose",
dest="verbose",
action="count",
default=0,
help="Give more output. Option is additive, and can be used up to 3 times.",
)
no_color: Callable[..., Option] = partial(
Option,
"--no-color",
dest="no_color",
action="store_true",
default=False,
help="Suppress colored output.",
)
version: Callable[..., Option] = partial(
Option,
"-V",
"--version",
dest="version",
action="store_true",
help="Show version and exit.",
)
quiet: Callable[..., Option] = partial(
Option,
"-q",
"--quiet",
dest="quiet",
action="count",
default=0,
help=(
"Give less output. Option is additive, and can be used up to 3"
" times (corresponding to WARNING, ERROR, and CRITICAL logging"
" levels)."
),
)
progress_bar: Callable[..., Option] = partial(
Option,
"--progress-bar",
dest="progress_bar",
type="choice",
choices=["on", "off", "raw"],
default="on",
help="Specify whether the progress bar should be used [on, off, raw] (default: on)",
)
log: Callable[..., Option] = partial(
PipOption,
"--log",
"--log-file",
"--local-log",
dest="log",
metavar="path",
type="path",
help="Path to a verbose appending log.",
)
no_input: Callable[..., Option] = partial(
Option,
# Don't ask for input
"--no-input",
dest="no_input",
action="store_true",
default=False,
help="Disable prompting for input.",
)
keyring_provider: Callable[..., Option] = partial(
Option,
"--keyring-provider",
dest="keyring_provider",
choices=["auto", "disabled", "import", "subprocess"],
default="auto",
help=(
"Enable the credential lookup via the keyring library if user input is allowed."
" Specify which mechanism to use [auto, disabled, import, subprocess]."
" (default: %default)"
),
)
proxy: Callable[..., Option] = partial(
Option,
"--proxy",
dest="proxy",
type="str",
default="",
help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.",
)
retries: Callable[..., Option] = partial(
Option,
"--retries",
dest="retries",
type="int",
default=5,
help="Maximum attempts to establish a new HTTP connection. (default: %default)",
)
resume_retries: Callable[..., Option] = partial(
Option,
"--resume-retries",
dest="resume_retries",
type="int",
default=0,
help="Maximum attempts to resume or restart an incomplete download. "
"(default: %default)",
)
timeout: Callable[..., Option] = partial(
Option,
"--timeout",
"--default-timeout",
metavar="sec",
dest="timeout",
type="float",
default=15,
help="Set the socket timeout (default %default seconds).",
)
def exists_action() -> Option:
return Option(
# Option when path already exist
"--exists-action",
dest="exists_action",
type="choice",
choices=["s", "i", "w", "b", "a"],
default=[],
action="append",
metavar="action",
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
)
cert: Callable[..., Option] = partial(
PipOption,
"--cert",
dest="cert",
type="path",
metavar="path",
help=(
"Path to PEM-encoded CA certificate bundle. "
"If provided, overrides the default. "
"See 'SSL Certificate Verification' in pip documentation "
"for more information."
),
)
client_cert: Callable[..., Option] = partial(
PipOption,
"--client-cert",
dest="client_cert",
type="path",
default=None,
metavar="path",
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
)
index_url: Callable[..., Option] = partial(
Option,
"-i",
"--index-url",
"--pypi-url",
dest="index_url",
metavar="URL",
default=PyPI.simple_url,
help="Base URL of the Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
)
def extra_index_url() -> Option:
return Option(
"--extra-index-url",
dest="extra_index_urls",
metavar="URL",
action="append",
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index: Callable[..., Option] = partial(
Option,
"--no-index",
dest="no_index",
action="store_true",
default=False,
help="Ignore package index (only looking at --find-links URLs instead).",
)
def find_links() -> Option:
return Option(
"-f",
"--find-links",
dest="find_links",
action="append",
default=[],
metavar="url",
help="If a URL or path to an html file, then parse for links to "
"archives such as sdist (.tar.gz) or wheel (.whl) files. "
"If a local path or file:// URL that's a directory, "
"then look for archives in the directory listing. "
"Links to VCS project URLs are not supported.",
)
def trusted_host() -> Option:
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host or host:port pair as trusted, even though it "
"does not have valid or any HTTPS.",
)
def constraints() -> Option:
return Option(
"-c",
"--constraint",
dest="constraints",
action="append",
default=[],
metavar="file",
help="Constrain versions using the given constraints file. "
"This option can be used multiple times.",
)
def requirements() -> Option:
return Option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help="Install from the given requirements file. "
"This option can be used multiple times.",
)
def editable() -> Option:
return Option(
"-e",
"--editable",
dest="editables",
action="append",
default=[],
metavar="path/url",
help=(
"Install a project in editable mode (i.e. setuptools "
'"develop mode") from a local project path or a VCS url.'
),
)
def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None:
value = os.path.abspath(value)
setattr(parser.values, option.dest, value)
src: Callable[..., Option] = partial(
PipOption,
"--src",
"--source",
"--source-dir",
"--source-directory",
dest="src_dir",
type="path",
metavar="dir",
default=get_src_prefix(),
action="callback",
callback=_handle_src,
help="Directory to check out editable projects into. "
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".',
)
def _get_format_control(values: Values, option: Option) -> Any:
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.no_binary,
existing.only_binary,
)
def _handle_only_binary(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.only_binary,
existing.no_binary,
)
def no_binary() -> Option:
format_control = FormatControl(set(), set())
return Option(
"--no-binary",
dest="format_control",
action="callback",
callback=_handle_no_binary,
type="str",
default=format_control,
help="Do not use binary packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all binary packages, ":none:" to empty the set (notice '
"the colons), or one or more package names with commas between "
"them (no colons). Note that some packages are tricky to compile "
"and may fail to install when this option is used on them.",
)
def only_binary() -> Option:
format_control = FormatControl(set(), set())
return Option(
"--only-binary",
dest="format_control",
action="callback",
callback=_handle_only_binary,
type="str",
default=format_control,
help="Do not use source packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all source packages, ":none:" to empty the set, or one '
"or more package names with commas between them. Packages "
"without binary distributions will fail to install when this "
"option is used on them.",
)
platforms: Callable[..., Option] = partial(
Option,
"--platform",
dest="platforms",
metavar="platform",
action="append",
default=None,
help=(
"Only use wheels compatible with <platform>. Defaults to the "
"platform of the running system. Use this option multiple times to "
"specify multiple platforms supported by the target interpreter."
),
)
# This was made a separate function for unit-testing purposes.
def _convert_python_version(value: str) -> Tuple[Tuple[int, ...], Optional[str]]:
"""
Convert a version string like "3", "37", or "3.7.3" into a tuple of ints.
:return: A 2-tuple (version_info, error_msg), where `error_msg` is
non-None if and only if there was a parsing error.
"""
if not value:
# The empty string is the same as not providing a value.
return (None, None)
parts = value.split(".")
if len(parts) > 3:
return ((), "at most three version parts are allowed")
if len(parts) == 1:
# Then we are in the case of "3" or "37".
value = parts[0]
if len(value) > 1:
parts = [value[0], value[1:]]
try:
version_info = tuple(int(part) for part in parts)
except ValueError:
return ((), "each version part must be an integer")
return (version_info, None)
def _handle_python_version(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
"""
Handle a provided --python-version value.
"""
version_info, error_msg = _convert_python_version(value)
if error_msg is not None:
msg = f"invalid --python-version value: {value!r}: {error_msg}"
raise_option_error(parser, option=option, msg=msg)
parser.values.python_version = version_info
python_version: Callable[..., Option] = partial(
Option,
"--python-version",
dest="python_version",
metavar="python_version",
action="callback",
callback=_handle_python_version,
type="str",
default=None,
help=dedent(
"""\
The Python interpreter version to use for wheel and "Requires-Python"
compatibility checks. Defaults to a version derived from the running
interpreter. The version can be specified using up to three dot-separated
integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
version can also be given as a string without dots (e.g. "37" for 3.7.0).
"""
),
)
implementation: Callable[..., Option] = partial(
Option,
"--implementation",
dest="implementation",
metavar="implementation",
default=None,
help=(
"Only use wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."
),
)
abis: Callable[..., Option] = partial(
Option,
"--abi",
dest="abis",
metavar="abi",
action="append",
default=None,
help=(
"Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
"If not specified, then the current interpreter abi tag is used. "
"Use this option multiple times to specify multiple abis supported "
"by the target interpreter. Generally you will need to specify "
"--implementation, --platform, and --python-version when using this "
"option."
),
)
def add_target_python_options(cmd_opts: OptionGroup) -> None:
cmd_opts.add_option(platforms())
cmd_opts.add_option(python_version())
cmd_opts.add_option(implementation())
cmd_opts.add_option(abis())
def make_target_python(options: Values) -> TargetPython:
target_python = TargetPython(
platforms=options.platforms,
py_version_info=options.python_version,
abis=options.abis,
implementation=options.implementation,
)
return target_python
def prefer_binary() -> Option:
return Option(
"--prefer-binary",
dest="prefer_binary",
action="store_true",
default=False,
help=(
"Prefer binary packages over source packages, even if the "
"source packages are newer."
),
)
cache_dir: Callable[..., Option] = partial(
PipOption,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
type="path",
help="Store the cache data in <dir>.",
)
def _handle_no_cache_dir(
option: Option, opt: str, value: str, parser: OptionParser
) -> None:
"""
Process a value provided for the --no-cache-dir option.
This is an optparse.Option callback for the --no-cache-dir option.
"""
# The value argument will be None if --no-cache-dir is passed via the
# command-line, since the option doesn't accept arguments. However,
# the value can be non-None if the option is triggered e.g. by an
# environment variable, like PIP_NO_CACHE_DIR=true.
if value is not None:
# Then parse the string value to get argument error-checking.
try:
strtobool(value)
except ValueError as exc:
raise_option_error(parser, option=option, msg=str(exc))
# Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
# converted to 0 (like "false" or "no") caused cache_dir to be disabled
# rather than enabled (logic would say the latter). Thus, we disable
# the cache directory not just on values that parse to True, but (for
# backwards compatibility reasons) also on values that parse to False.
# In other words, always set it to False if the option is provided in
# some (valid) form.
parser.values.cache_dir = False
no_cache: Callable[..., Option] = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="callback",
callback=_handle_no_cache_dir,
help="Disable the cache.",
)
no_deps: Callable[..., Option] = partial(
Option,
"--no-deps",
"--no-dependencies",
dest="ignore_dependencies",
action="store_true",
default=False,
help="Don't install package dependencies.",
)
def _handle_dependency_group(
option: Option, opt: str, value: str, parser: OptionParser
) -> None:
"""
Process a value provided for the --group option.
Splits on the rightmost ":", and validates that the path (if present) ends
in `pyproject.toml`. Defaults the path to `pyproject.toml` when one is not given.
`:` cannot appear in dependency group names, so this is a safe and simple parse.
This is an optparse.Option callback for the dependency_groups option.
"""
path, sep, groupname = value.rpartition(":")
if not sep:
path = "pyproject.toml"
else:
# check for 'pyproject.toml' filenames using pathlib
if pathlib.PurePath(path).name != "pyproject.toml":
msg = "group paths use 'pyproject.toml' filenames"
raise_option_error(parser, option=option, msg=msg)
parser.values.dependency_groups.append((path, groupname))
dependency_groups: Callable[..., Option] = partial(
Option,
"--group",
dest="dependency_groups",
default=[],
type=str,
action="callback",
callback=_handle_dependency_group,
metavar="[path:]group",
help='Install a named dependency-group from a "pyproject.toml" file. '
'If a path is given, the name of the file must be "pyproject.toml". '
'Defaults to using "pyproject.toml" in the current directory.',
)
ignore_requires_python: Callable[..., Option] = partial(
Option,
"--ignore-requires-python",
dest="ignore_requires_python",
action="store_true",
help="Ignore the Requires-Python information.",
)
no_build_isolation: Callable[..., Option] = partial(
Option,
"--no-build-isolation",
dest="build_isolation",
action="store_false",
default=True,
help="Disable isolation when building a modern source distribution. "
"Build dependencies specified by PEP 518 must be already installed "
"if this option is used.",
)
check_build_deps: Callable[..., Option] = partial(
Option,
"--check-build-dependencies",
dest="check_build_deps",
action="store_true",
default=False,
help="Check the build dependencies when PEP517 is used.",
)
def _handle_no_use_pep517(
option: Option, opt: str, value: str, parser: OptionParser
) -> None:
"""
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
"""
# Since --no-use-pep517 doesn't accept arguments, the value argument
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# If user doesn't wish to use pep517, we check if setuptools is installed
# and raise error if it is not.
packages = ("setuptools",)
if not all(importlib.util.find_spec(package) for package in packages):
msg = (
f"It is not possible to use --no-use-pep517 "
f"without {' and '.join(packages)} installed."
)
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False
use_pep517: Any = partial(
Option,
"--use-pep517",
dest="use_pep517",
action="store_true",
default=None,
help="Use PEP 517 for building source distributions "
"(use --no-use-pep517 to force legacy behaviour).",
)
no_use_pep517: Any = partial(
Option,
"--no-use-pep517",
dest="use_pep517",
action="callback",
callback=_handle_no_use_pep517,
default=None,
help=SUPPRESS_HELP,
)
def _handle_config_settings(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
key, sep, val = value.partition("=")
if sep != "=":
parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL")
dest = getattr(parser.values, option.dest)
if dest is None:
dest = {}
setattr(parser.values, option.dest, dest)
if key in dest:
if isinstance(dest[key], list):
dest[key].append(val)
else:
dest[key] = [dest[key], val]
else:
dest[key] = val
config_settings: Callable[..., Option] = partial(
Option,
"-C",
"--config-settings",
dest="config_settings",
type=str,
action="callback",
callback=_handle_config_settings,
metavar="settings",
help="Configuration settings to be passed to the PEP 517 build backend. "
"Settings take the form KEY=VALUE. Use multiple --config-settings options "
"to pass multiple keys to the backend.",
)
build_options: Callable[..., Option] = partial(
Option,
"--build-option",
dest="build_options",
metavar="options",
action="append",
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
)
global_options: Callable[..., Option] = partial(
Option,
"--global-option",
dest="global_options",
action="append",
metavar="options",
help="Extra global options to be supplied to the setup.py "
"call before the install or bdist_wheel command.",
)
no_clean: Callable[..., Option] = partial(
Option,
"--no-clean",
action="store_true",
default=False,
help="Don't clean up build directories.",
)
pre: Callable[..., Option] = partial(
Option,
"--pre",
action="store_true",
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
)
json: Callable[..., Option] = partial(
Option,
"--json",
action="store_true",
default=False,
help="Output data in a machine-readable JSON format.",
)
disable_pip_version_check: Callable[..., Option] = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
)
root_user_action: Callable[..., Option] = partial(
Option,
"--root-user-action",
dest="root_user_action",
default="warn",
choices=["warn", "ignore"],
help="Action if pip is run as a root user [warn, ignore] (default: warn)",
)
def _handle_merge_hash(
option: Option, opt_str: str, value: str, parser: OptionParser
) -> None:
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(":", 1)
except ValueError:
parser.error(
f"Arguments to {opt_str} must be a hash name "
"followed by a value, like --hash=sha256:"
"abcde..."
)
if algo not in STRONG_HASHES:
parser.error(
"Allowed hash algorithms for {} are {}.".format(
opt_str, ", ".join(STRONG_HASHES)
)
)
parser.values.hashes.setdefault(algo, []).append(digest)
hash: Callable[..., Option] = partial(
Option,
"--hash",
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest="hashes",
action="callback",
callback=_handle_merge_hash,
type="string",
help="Verify that the package's archive matches this "
"hash before installing. Example: --hash=sha256:abcdef...",
)
require_hashes: Callable[..., Option] = partial(
Option,
"--require-hashes",
dest="require_hashes",
action="store_true",
default=False,
help="Require a hash to check each requirement against, for "
"repeatable installs. This option is implied when any package in a "
"requirements file has a --hash option.",
)
list_path: Callable[..., Option] = partial(
PipOption,
"--path",
dest="path",
type="path",
action="append",
help="Restrict to the specified installation path for listing "
"packages (can be used multiple times).",
)
def check_list_path_option(options: Values) -> None:
if options.path and (options.user or options.local):
raise CommandError("Cannot combine '--path' with '--user' or '--local'")
list_exclude: Callable[..., Option] = partial(
PipOption,
"--exclude",
dest="excludes",
action="append",
metavar="package",
type="package_name",
help="Exclude specified package from the output",
)
no_python_version_warning: Callable[..., Option] = partial(
Option,
"--no-python-version-warning",
dest="no_python_version_warning",
action="store_true",
default=False,
help=SUPPRESS_HELP, # No-op, a hold-over from the Python 2->3 transition.
)
# Features that are now always on. A warning is printed if they are used.
ALWAYS_ENABLED_FEATURES = [
"truststore", # always on since 24.2
"no-binary-enable-wheel-cache", # always on since 23.1
]
use_new_feature: Callable[..., Option] = partial(
Option,
"--use-feature",
dest="features_enabled",
metavar="feature",
action="append",
default=[],
choices=[
"fast-deps",
]
+ ALWAYS_ENABLED_FEATURES,
help="Enable new functionality, that may be backward incompatible.",
)
use_deprecated_feature: Callable[..., Option] = partial(
Option,
"--use-deprecated",
dest="deprecated_features_enabled",
metavar="feature",
action="append",
default=[],
choices=[
"legacy-resolver",
"legacy-certs",
],
help=("Enable deprecated functionality, that will be removed in the future."),
)
##########
# groups #
##########
general_group: Dict[str, Any] = {
"name": "General Options",
"options": [
help_,
debug_mode,
isolated_mode,
require_virtualenv,
python,
verbose,
version,
quiet,
log,
no_input,
keyring_provider,
proxy,
retries,
timeout,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
no_python_version_warning,
use_new_feature,
use_deprecated_feature,
resume_retries,
],
}
index_group: Dict[str, Any] = {
"name": "Package Index Options",
"options": [
index_url,
extra_index_url,
no_index,
find_links,
],
}
| PipOption |
python | langchain-ai__langchain | libs/core/langchain_core/language_models/fake.py | {
"start": 465,
"end": 2057
} | class ____(LLM):
"""Fake LLM for testing purposes."""
responses: list[str]
"""List of responses to return in order."""
# This parameter should be removed from FakeListLLM since
# it's only used by sub-classes.
sleep: float | None = None
"""Sleep time in seconds between responses.
Ignored by FakeListLLM, but used by sub-classes.
"""
i: int = 0
"""Internally incremented after every model invocation.
Useful primarily for testing purposes.
"""
@property
@override
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
@override
def _call(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@override
async def _acall(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@property
@override
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
| FakeListLLM |
python | ray-project__ray | python/ray/_private/external_storage.py | {
"start": 2276,
"end": 9311
} | class ____(metaclass=abc.ABCMeta):
"""The base class for external storage.
This class provides some useful functions for zero-copy object
put/get from plasma store. Also it specifies the interface for
object spilling.
When inheriting this class, please make sure to implement validation
logic inside __init__ method. When ray instance starts, it will
instantiating external storage to validate the config.
Raises:
ValueError: when given configuration for
the external storage is invalid.
"""
HEADER_LENGTH = 24
CORE_WORKER_INIT_GRACE_PERIOD_S = 1
def __init__(self):
# NOTE(edoakes): do not access this field directly. Use the `core_worker`
# property instead to handle initialization race conditions.
self._core_worker: Optional["ray._raylet.CoreWorker"] = None
@property
def core_worker(self) -> "ray._raylet.CoreWorker":
"""Get the core_worker initialized in this process.
In rare cases, the core worker may not be fully initialized by the time an I/O
worker begins to execute an operation because there is no explicit flag set to
indicate that the Python layer is ready to execute tasks.
"""
if self._core_worker is None:
worker = ray._private.worker.global_worker
start = time.time()
while not worker.connected:
time.sleep(0.001)
if time.time() - start > self.CORE_WORKER_INIT_GRACE_PERIOD_S:
raise RuntimeError(
"CoreWorker didn't initialize within grace period of "
f"{self.CORE_WORKER_INIT_GRACE_PERIOD_S}s."
)
self._core_worker = worker.core_worker
return self._core_worker
def _get_objects_from_store(self, object_refs):
# Since the object should always exist in the plasma store before
# spilling, it can directly get the object from the local plasma
# store.
# issue: https://github.com/ray-project/ray/pull/13831
return self.core_worker.get_if_local(object_refs)
def _put_object_to_store(
self, metadata, data_size, file_like, object_ref, owner_address
):
self.core_worker.put_file_like_object(
metadata, data_size, file_like, object_ref, owner_address
)
def _write_multiple_objects(
self, f: IO, object_refs: List[ObjectRef], owner_addresses: List[str], url: str
) -> List[str]:
"""Fuse all given objects into a given file handle.
Args:
f: File handle to fusion all given object refs.
object_refs: Object references to fusion to a single file.
owner_addresses: Owner addresses for the provided objects.
url: url where the object ref is stored
in the external storage.
Return:
List of urls_with_offset of fused objects.
The order of returned keys are equivalent to the one
with given object_refs.
"""
keys = []
offset = 0
ray_object_pairs = self._get_objects_from_store(object_refs)
for ref, (buf, metadata, _), owner_address in zip(
object_refs, ray_object_pairs, owner_addresses
):
address_len = len(owner_address)
metadata_len = len(metadata)
if buf is None and len(metadata) == 0:
error = f"Object {ref.hex()} does not exist."
raise ValueError(error)
buf_len = 0 if buf is None else len(buf)
payload = (
address_len.to_bytes(8, byteorder="little")
+ metadata_len.to_bytes(8, byteorder="little")
+ buf_len.to_bytes(8, byteorder="little")
+ owner_address
+ metadata
+ (memoryview(buf) if buf_len else b"")
)
# 24 bytes to store owner address, metadata, and buffer lengths.
payload_len = len(payload)
assert (
self.HEADER_LENGTH + address_len + metadata_len + buf_len == payload_len
)
written_bytes = f.write(payload)
assert written_bytes == payload_len
url_with_offset = create_url_with_offset(
url=url, offset=offset, size=written_bytes
)
keys.append(url_with_offset.encode())
offset += written_bytes
# Necessary because pyarrow.io.NativeFile does not flush() on close().
f.flush()
return keys
def _size_check(self, address_len, metadata_len, buffer_len, obtained_data_size):
"""Check whether or not the obtained_data_size is as expected.
Args:
address_len: Length of the address.
metadata_len: Actual metadata length of the object.
buffer_len: Actual buffer length of the object.
obtained_data_size: Data size specified in the url_with_offset.
Raises:
ValueError: If obtained_data_size is different from
address_len + metadata_len + buffer_len + 24 (first 8 bytes to store length).
"""
data_size_in_bytes = (
address_len + metadata_len + buffer_len + self.HEADER_LENGTH
)
if data_size_in_bytes != obtained_data_size:
raise ValueError(
f"Obtained data has a size of {data_size_in_bytes}, "
"although it is supposed to have the "
f"size of {obtained_data_size}."
)
@abc.abstractmethod
def spill_objects(self, object_refs, owner_addresses) -> List[str]:
"""Spill objects to the external storage. Objects are specified
by their object refs.
Args:
object_refs: The list of the refs of the objects to be spilled.
owner_addresses: Owner addresses for the provided objects.
Returns:
A list of internal URLs with object offset.
"""
@abc.abstractmethod
def restore_spilled_objects(
self, object_refs: List[ObjectRef], url_with_offset_list: List[str]
) -> int:
"""Restore objects from the external storage.
Args:
object_refs: List of object IDs (note that it is not ref).
url_with_offset_list: List of url_with_offset.
Returns:
The total number of bytes restored.
"""
@abc.abstractmethod
def delete_spilled_objects(self, urls: List[str]):
"""Delete objects that are spilled to the external storage.
Args:
urls: URLs that store spilled object files.
NOTE: This function should not fail if some of the urls
do not exist.
"""
@abc.abstractmethod
def destroy_external_storage(self):
"""Destroy external storage when a head node is down.
NOTE: This is currently working when the cluster is
started by ray.init
"""
| ExternalStorage |
python | huggingface__transformers | utils/test_module/custom_pipeline.py | {
"start": 239,
"end": 1100
} | class ____(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors="pt")
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| PairClassificationPipeline |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 6461,
"end": 7846
} | class ____(LogCaptureAPITestCase):
endpoint = "snuba-ratelimit-endpoint"
def test_access_log_snuba_rate_limited(self) -> None:
"""Test that Snuba rate limits are properly logged by access log middleware."""
self._caplog.set_level(logging.INFO, logger="sentry")
self.get_error_response(status_code=429)
self.assert_access_log_recorded()
assert self.captured_logs[0].rate_limit_type == "snuba"
assert self.captured_logs[0].rate_limited == "True"
# All the types from the standard rate limit metadata should be set
assert self.captured_logs[0].remaining == "122"
assert self.captured_logs[0].concurrent_limit == "123"
assert self.captured_logs[0].concurrent_requests == "1"
assert self.captured_logs[0].limit == "123"
assert self.captured_logs[0].reset_time == "123"
# Snuba rate limit specific fields should be set
assert self.captured_logs[0].snuba_policy == "ConcurrentRateLimitAllocationPolicy"
assert self.captured_logs[0].snuba_quota_unit == "no_units"
assert self.captured_logs[0].snuba_storage_key == "test_storage_key"
assert self.captured_logs[0].snuba_quota_used == "41"
assert self.captured_logs[0].snuba_rejection_threshold == "40"
@all_silo_test
@override_settings(SENTRY_SELF_HOSTED=False)
| TestAccessLogSnubaRateLimited |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 69149,
"end": 69464
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("headline", "body")
headline = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="headline")
body = sgqlc.types.Field(String, graphql_name="body")
| CommitMessage |
python | spack__spack | var/spack/test_repos/spack_repo/duplicates_test/packages/hdf5/package.py | {
"start": 216,
"end": 554
} | class ____(Package):
"""Requires gmake at a version that doesn't match that of its dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/tdep-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("pinned-gmake", type="link")
depends_on("gmake@4", type="build")
| Hdf5 |
python | spyder-ide__spyder | spyder/plugins/pylint/main_widget.py | {
"start": 2565,
"end": 3992
} | class ____(QTreeWidgetItem):
"""
Category item for results.
Notes
-----
Possible categories are Convention, Refactor, Warning and Error.
"""
CATEGORIES = {
"Convention": {
'translation_string': _("Convention"),
'icon': ima.icon("convention")
},
"Refactor": {
'translation_string': _("Refactor"),
'icon': ima.icon("refactor")
},
"Warning": {
'translation_string': _("Warning"),
'icon': ima.icon("warning")
},
"Error": {
'translation_string': _("Error"),
'icon': ima.icon("error")
}
}
def __init__(self, parent, category, number_of_messages):
# Messages string to append to category.
if number_of_messages > 1 or number_of_messages == 0:
messages = _('messages')
else:
messages = _('message')
# Category title.
title = self.CATEGORIES[category]['translation_string']
title += f" ({number_of_messages} {messages})"
super().__init__(parent, [title], QTreeWidgetItem.Type)
# Set icon
icon = self.CATEGORIES[category]['icon']
self.setIcon(0, icon)
# ---- Widgets
# ----------------------------------------------------------------------------
# TODO: display results on 3 columns instead of 1: msg_id, lineno, message
| CategoryItem |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox11.py | {
"start": 315,
"end": 868
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"fill": {"color": "red"}})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | walkccc__LeetCode | solutions/65. Valid Number/65.py | {
"start": 0,
"end": 641
} | class ____:
def isNumber(self, s: str) -> bool:
s = s.strip()
if not s:
return False
seenNum = False
seenDot = False
seenE = False
for i, c in enumerate(s):
if c == '.':
if seenDot or seenE:
return False
seenDot = True
elif c == 'e' or c == 'E':
if seenE or not seenNum:
return False
seenE = True
seenNum = False
elif c in '+-':
if i > 0 and s[i - 1] not in 'eE':
return False
seenNum = False
else:
if not c.isdigit():
return False
seenNum = True
return seenNum
| Solution |
python | great-expectations__great_expectations | great_expectations/data_context/data_context_variables.py | {
"start": 8055,
"end": 8546
} | class ____(DataContextVariables):
@override
def _init_store(self) -> DataContextStore:
from great_expectations.data_context.store.data_context_store import (
DataContextStore,
)
store = DataContextStore(
store_name="ephemeral_data_context_store",
store_backend=None, # Defaults to InMemoryStoreBackend
runtime_environment=None,
)
return store
@dataclass(repr=False)
| EphemeralDataContextVariables |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 7565,
"end": 8673
} | class ____(SearchField):
field_type = "location"
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super().prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.coords
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from django.contrib.gis.geos import Point
from haystack.utils.geo import ensure_point
if value is None:
return None
if hasattr(value, "geom_type"):
value = ensure_point(value)
return value
if isinstance(value, str):
lat, lng = value.split(",")
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get("lat", 0)
lng = value.get("lon", 0)
else:
raise TypeError("Unable to extract coordinates from %r" % value)
value = Point(float(lng), float(lat))
return value
| LocationField |
python | google__jax | docs/autodidax.py | {
"start": 5458,
"end": 6979
} | class ____(NamedTuple):
level: int
trace_type: type['Trace']
global_data: Any | None
trace_stack: list[MainTrace] = []
dynamic_trace: MainTrace | None = None # to be employed in Part 3
@contextmanager
def new_main(trace_type: type['Trace'], global_data=None):
level = len(trace_stack)
main = MainTrace(level, trace_type, global_data)
trace_stack.append(main)
try:
yield main
finally:
trace_stack.pop()
# -
# When we're about to apply a transformation, we'll push another interpreter
# onto the stack using `new_main`. Then, as we apply primitives in the function,
# we can think of the `bind` first being interpreted by the trace at the top of
# the stack (i.e. with the highest level). If that first interpreter itself
# binds other primitives in its interpretation rule for the primitive, like how
# the JVP rule of `sin_p` might bind `cos_p` and `mul_p`, then those `bind`
# calls will be handled by the interpreter at the next level down.
#
# What goes at the bottom of the interpreter stack? At the bottom, we know all
# the transformation interpreters are finished, and we just want to do standard
# evaluation. So at the bottom we'll put an evaluation interpreter.
#
# Let's sketch out the interface for interpreters, which is based on the `Trace`
# and `Tracer` base classes. A `Tracer` represents a boxed-up value, perhaps
# carrying some extra context data used by the interpreter. A `Trace` handles
# boxing up values into `Tracers` and also handles primitive application.
| MainTrace |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/common_transformers/anf.py | {
"start": 1758,
"end": 2803
} | class ____(collections.namedtuple(
'ASTEdgePattern', ['parent', 'field', 'child'])):
"""A pattern defining a type of AST edge.
This consists of three components:
- The type of the parent node, checked with isinstance,
- The name of the field, checked with string equality, and
- The type of the child node, also checked with isinstance.
If all three match, the whole pattern is considered to match.
In all three slots, the special value `anf.ANY` is treated as "match
anything". The internal nodes are produced from the `gast` library rather
than the standard `ast` module, which may affect `isinstance` checks.
"""
__slots__ = ()
def matches(self, parent, field, child):
"""Computes whether this pattern matches the given edge."""
if self.parent is ANY or isinstance(parent, self.parent):
pass # OK
else:
return False
if self.field is ANY or field == self.field:
pass # OK
else:
return False
return self.child is ANY or isinstance(child, self.child)
| ASTEdgePattern |
python | neetcode-gh__leetcode | python/0303-range-sum-query-immutable.py | {
"start": 0,
"end": 357
} | class ____:
def __init__(self, nums: List[int]):
self.prefix = []
cur = 0
for n in nums:
cur += n
self.prefix.append(cur)
def sumRange(self, left: int, right: int) -> int:
r = self.prefix[right]
l = self.prefix[left - 1] if left > 0 else 0
return r - l
| NumArray |
python | keras-team__keras | keras/src/distillation/distiller_test.py | {
"start": 1064,
"end": 18187
} | class ____(TestCase):
"""Essential test cases for the Distiller class."""
def setUp(self):
"""Set up test fixtures."""
super().setUp()
# Create test data
self.x = np.random.random((20, 5)).astype(np.float32)
self.y = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create teacher and student models
self.teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models
dummy_input = self.x[:2]
self.teacher(dummy_input)
self.student(dummy_input)
# Create distillation distillation_loss
self.distillation_loss = LogitsDistillation(temperature=2.0)
# Create distiller
self.distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
self.distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
def test_distiller_initialization(self):
"""Test Distiller initialization."""
# Check that teacher is frozen
self.assertFalse(self.teacher.trainable)
# Check that student is trainable
self.assertTrue(self.student.trainable)
# Check student_loss_weight
self.assertEqual(self.distiller.student_loss_weight, 0.5)
# Check distillation_loss (should be a list with one distillation_loss)
self.assertIsInstance(self.distiller.distillation_losses, list)
self.assertEqual(len(self.distiller.distillation_losses), 1)
self.assertIsInstance(
self.distiller.distillation_losses[0], LogitsDistillation
)
# Check that distillation_loss has the correct temperature
self.assertEqual(self.distiller.distillation_losses[0].temperature, 2.0)
# Check that model is compiled
self.assertIsNotNone(self.distiller.optimizer)
# Check if the model has been compiled (different backends may handle
# this differently)
self.assertTrue(
hasattr(self.distiller, "_compile_config")
or hasattr(self.distiller, "compiled_loss"),
"Model should be compiled",
)
def test_distiller_call(self):
"""Test Distiller call method (inference)."""
# Call should return student outputs
outputs = self.distiller(self.x)
# Check output shape
expected_shape = (20, 10) # batch_size, vocab_size
self.assertEqual(outputs.shape, expected_shape)
# Check that outputs are from student, not teacher
student_outputs = self.student(self.x)
self.assertAllClose(outputs, student_outputs)
def test_teacher_freezing(self):
"""Test that teacher is properly frozen."""
# Teacher should be frozen
self.assertFalse(self.teacher.trainable)
# Student should be trainable
self.assertTrue(self.student.trainable)
# Create a new teacher that is trainable and verify it gets frozen
new_teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.assertTrue(new_teacher.trainable) # Should be trainable initially
# Create distiller - should freeze the teacher
Distiller(
teacher=new_teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Teacher should now be frozen
self.assertFalse(new_teacher.trainable)
def test_model_compatibility_validation(self):
"""Test model compatibility validation."""
# Test with non-Keras objects
with self.assertRaises(ValueError):
Distiller(
teacher="not_a_model",
student=self.student,
distillation_losses=self.distillation_loss,
)
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student="not_a_model",
distillation_losses=self.distillation_loss,
)
def test_multi_distillation_loss_functionality(self):
"""Test multi-distillation_loss functionality."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
distillation_loss_weights = [0.7, 0.3]
# Create distiller with multiple distillation_loss
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=distillation_loss_weights,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that distillation_loss are stored correctly
self.assertEqual(len(distiller.distillation_losses), 2)
self.assertEqual(distiller.distillation_loss_weights, [0.7, 0.3])
# Test training
x = np.random.random((10, 5)).astype(np.float32)
y = np.random.randint(0, 10, (10,))
history = distiller.fit(x, y, epochs=1, verbose=0)
# Check metrics
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
def test_multi_distillation_loss_validation(self):
"""Test multi-distillation_loss validation."""
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
# Test that validation passes for valid configurations
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
student_loss_weight=0.5,
)
self.assertEqual(len(distiller.distillation_losses), 2)
# Test invalid distillation_loss weights length
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0], # Wrong length
student_loss_weight=0.5,
)
def test_student_loss_weighting(self):
"""Test student loss weighting functionality."""
# Test with student_loss_weight = 0.0 (only distillation loss)
distiller_0 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.0,
)
# Test with student_loss_weight = 1.0 (only student loss)
distiller_1 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=1.0,
)
# Compile both distillers
distiller_0.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
distiller_1.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that they can be used for training without errors
small_x = self.x[:5]
small_y = self.y[:5]
# Both should train without errors
history_0 = distiller_0.fit(small_x, small_y, epochs=1, verbose=0)
history_1 = distiller_1.fit(small_x, small_y, epochs=1, verbose=0)
# Check that training completed
self.assertIn("total_loss", history_0.history)
self.assertIn("total_loss", history_1.history)
def test_full_training_workflow(self):
"""Test complete training workflow with model.fit() - MOST IMPORTANT."""
# Create larger dataset for training
np.random.seed(42)
x_train = np.random.random((100, 5)).astype(np.float32)
y_train = np.random.randint(0, 10, (100,)).astype(np.int32)
x_val = np.random.random((20, 5)).astype(np.float32)
y_val = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create fresh models for training
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_train[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train the model
history = distiller.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
epochs=3,
batch_size=16,
verbose=0,
)
# Check that training completed
self.assertIn("total_loss", history.history)
self.assertIn("val_total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Check that losses are finite
for loss_name in ["total_loss", "student_loss", "distillation_loss"]:
losses = history.history[loss_name]
self.assertGreater(len(losses), 0)
for loss in losses:
self.assertTrue(np.isfinite(loss))
# Check that the model can make predictions
predictions = distiller.predict(x_val[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10)) # batch_size, vocab_size
# Check that student weights have changed (indicating learning)
initial_weights = [w.numpy().copy() for w in student.trainable_weights]
# Train a bit more
distiller.fit(x_train[:10], y_train[:10], epochs=1, verbose=0)
final_weights = [w.numpy() for w in student.trainable_weights]
# At least some weights should have changed
weights_changed = any(
not np.allclose(initial, final, atol=1e-6)
for initial, final in zip(initial_weights, final_weights)
)
self.assertTrue(
weights_changed, "Student weights should change during training"
)
def test_evaluation_workflow(self):
"""Test evaluation workflow with model.evaluate()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((30, 5)).astype(np.float32)
y_test = np.random.randint(0, 10, (30,)).astype(np.int32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train briefly
distiller.fit(x_test[:10], y_test[:10], epochs=1, verbose=0)
# Evaluate the model
results = distiller.evaluate(x_test, y_test, verbose=0)
# Check that evaluation returns expected metrics
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
# All results should be finite
for result in results:
self.assertTrue(np.isfinite(result))
def test_prediction_workflow(self):
"""Test prediction workflow with model.predict()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((20, 5)).astype(np.float32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Make predictions
predictions = distiller.predict(x_test, verbose=0)
# Check prediction shape
self.assertEqual(predictions.shape, (20, 10)) # batch_size, vocab_size
# Check that predictions are finite
self.assertTrue(np.all(np.isfinite(predictions)))
# Check predictions sum to reasonable values (not zeros/infinities)
prediction_sums = np.sum(predictions, axis=1)
self.assertTrue(np.all(np.isfinite(prediction_sums)))
def test_distiller_serialization_and_saving(self):
"""Test Distiller serialization, saving, and loading."""
# Use standard Sequential models for serialization testing
teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
student = keras.Sequential(
[
keras.layers.Dense(
16, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
8, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
# Create distiller with single distillation_loss
distillation_loss = LogitsDistillation(
temperature=3.0, loss="kl_divergence"
)
original_distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=distillation_loss,
student_loss_weight=0.7,
)
# Build the models by calling them
x_test = np.random.random((2, 20)).astype(np.float32)
_ = original_distiller(x_test)
# Test get_config
config = original_distiller.get_config()
# Verify all components are in config
required_keys = [
"teacher",
"student",
"distillation_losses",
"distillation_loss_weights",
"student_loss_weight",
]
for key in required_keys:
self.assertIn(key, config, f"Missing key: {key}")
# Test JSON serialization
json_str = json.dumps(config)
self.assertIsInstance(json_str, str)
# Test from_config reconstruction
reconstructed_distiller = Distiller.from_config(config)
# Verify reconstruction
self.assertEqual(reconstructed_distiller.student_loss_weight, 0.7)
self.assertIsInstance(
reconstructed_distiller.distillation_losses[0], LogitsDistillation
)
# Verify distillation_loss parameters
self.assertEqual(
reconstructed_distiller.distillation_losses[0].temperature, 3.0
)
# Test that reconstructed distiller can be used for inference
reconstructed_output = reconstructed_distiller(x_test)
self.assertEqual(reconstructed_output.shape, (2, 10))
# Test model saving and loading (full integration test)
temp_dir = self.get_temp_dir()
model_path = os.path.join(temp_dir, "distiller_model.keras")
# Compile original distiller
original_distiller.compile(
loss="sparse_categorical_crossentropy",
)
# Save the model
original_distiller.save(model_path)
# Load the model
loaded_distiller = keras.models.load_model(model_path)
# Verify loaded model works
loaded_output = loaded_distiller(x_test)
self.assertEqual(loaded_output.shape, (2, 10))
# Verify parameters are preserved
self.assertEqual(loaded_distiller.student_loss_weight, 0.7)
# The core serialization functionality is working
self.assertTrue(True, "Distiller serialization test passed")
| TestDistiller |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 227882,
"end": 232625
} | class ____(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes["AllInteger"]:
assert_(1 > np.array(0, dtype=dt1), f"type {dt1} failed")
assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed")
for dt2 in np.typecodes["AllInteger"]:
assert_(
np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
assert_(
not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
# Unsigned integers
for dt1 in "B":
assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed")
assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed")
assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed")
# Unsigned vs signed
for dt2 in "bhil":
assert_(
np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
assert_(
not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
assert_(
np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
# Signed integers and floats
for dt1 in "bhl" + np.typecodes["Float"]:
assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed")
assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed")
assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed")
for dt2 in "bhl" + np.typecodes["Float"]:
assert_(
np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
assert_(
not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
assert_(
np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed",
)
@skip(reason="object arrays")
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible:
def __bool__(self):
raise NotImplementedError
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
if IS_PYSTON:
raise SkipTest("Pyston disables recursion checking")
self_containing = np.array([None])
self_containing[0] = self_containing
Error = RecursionError
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array(0)), 0)
assert_equal(int_func(np.array([1])), 1)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises((ValueError, TypeError), int_func, np.array([1, 2]))
@skip(reason="object arrays")
def test_to_int_scalar_2(self):
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
# gh-9972
assert_equal(4, int_func(np.array("4")))
assert_equal(5, int_func(np.bytes_(b"5")))
assert_equal(6, int_func(np.str_("6")))
# The delegation of int() to __trunc__ was deprecated in
# Python 3.11.
if sys.version_info < (3, 11):
class HasTrunc:
def __trunc__(self):
return 3
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
else:
pass
class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError, int_func, np.array(NotConvertible()))
assert_raises(NotImplementedError, int_func, np.array([NotConvertible()]))
| TestConversion |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 7883,
"end": 8191
} | class ____:
async def process_spider_exception(self, response, exception):
yield {"foo": 1}
d = defer.Deferred()
call_later(0, d.callback, None)
await maybe_deferred_to_future(d)
yield {"foo": 2}
yield {"foo": 3}
| ProcessSpiderExceptionAsyncIteratorMiddleware |
python | realpython__materials | python-312/typing/accounts.py | {
"start": 345,
"end": 701
} | class ____:
account_number: str
balance: float
@classmethod
def from_balance(cls, balance: float) -> Self:
return cls(generate_account_number(), balance)
def deposit(self, amount: float) -> None:
self.balance += amount
def withdraw(self, amount: float) -> None:
self.balance -= amount
@dataclass
| BankAccount |
python | getsentry__sentry | src/sentry/integrations/jira_server/integration.py | {
"start": 4841,
"end": 4917
} | class ____(TypedDict):
on_resolve: str
on_unresolve: str
| _ColumnLabels |
python | tiangolo__fastapi | docs_src/extra_models/tutorial002.py | {
"start": 264,
"end": 300
} | class ____(UserBase):
pass
| UserOut |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_area05.py | {
"start": 315,
"end": 1536
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_area05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "area"})
chart.axis_ids = [60957824, 60959360]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart, {"description": "Some alternative text"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Lightning-AI__lightning | examples/fabric/build_your_own_trainer/trainer.py | {
"start": 523,
"end": 23333
} | class ____:
def __init__(
self,
accelerator: Union[str, Accelerator] = "auto",
strategy: Union[str, Strategy] = "auto",
devices: Union[list[int], str, int] = "auto",
precision: Union[str, int] = "32-true",
plugins: Optional[Union[str, Any]] = None,
callbacks: Optional[Union[list[Any], Any]] = None,
loggers: Optional[Union[Logger, list[Logger]]] = None,
max_epochs: Optional[int] = 1000,
max_steps: Optional[int] = None,
grad_accum_steps: int = 1,
limit_train_batches: Union[int, float] = float("inf"),
limit_val_batches: Union[int, float] = float("inf"),
validation_frequency: int = 1,
use_distributed_sampler: bool = True,
checkpoint_dir: str = "./checkpoints",
checkpoint_frequency: int = 1,
) -> None:
"""Exemplary Trainer with Fabric. This is a very simple trainer focused on readability but with reduced
featureset. As a trainer with more included features, we recommend using the
:class:`lightning.pytorch.Trainer`.
Args:
accelerator: The hardware to run on. Possible choices are:
``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
strategy: Strategy for how to run across multiple devices. Possible choices are:
``"dp"``, ``"ddp"``, ``"ddp_spawn"``, ``"deepspeed"``, ``"fsdp"``.
devices: Number of devices to train on (``int``),
which GPUs to train on (``list`` or ``str``), or ``"auto"``.
The value applies per node.
precision: Double precision (``"64"``), full precision (``"32"``), half precision AMP (``"16-mixed"``),
or bfloat16 precision AMP (``"bf16-mixed"``).
plugins: One or several custom plugins
callbacks: A single callback or a list of callbacks. The following hooks are supported:
- on_train_epoch_start
- on train_epoch_end
- on_train_batch_start
- on_train_batch_end
- on_before_backward
- on_after_backward
- on_before_zero_grad
- on_before_optimizer_step
- on_validation_model_eval
- on_validation_model_train
- on_validation_epoch_start
- on_validation_epoch_end
- on_validation_batch_start
- on_validation_batch_end
loggers: A single logger or a list of loggers. See :meth:`~lightning.fabric.fabric.Fabric.log` for more
information.
max_epochs: The maximum number of epochs to train
max_steps: The maximum number of (optimizer) steps to train
grad_accum_steps: How many batches to process before each optimizer step
limit_train_batches: Limits the number of train batches per epoch
If greater than number of batches in the dataloader, this has no effect.
limit_val_batches: Limits the number of validation batches per epoch.
If greater than number of batches in the dataloader, this has no effect.
validation_frequency: How many epochs to run before each validation epoch.
use_distributed_sampler: Wraps the sampler of each dataloader with a respective distributed-aware sampler
in case of distributed training.
checkpoint_dir: Directory to store checkpoints to.
checkpoint_frequency: How many epochs to run before each checkpoint is written.
Warning:
callbacks written for the lightning trainer (especially making assumptions on the trainer), won't work!
"""
self.fabric = L.Fabric(
accelerator=accelerator,
strategy=strategy,
devices=devices,
precision=precision,
plugins=plugins,
callbacks=callbacks,
loggers=loggers,
)
self.global_step = 0
self.grad_accum_steps: int = grad_accum_steps
self.current_epoch = 0
self.max_epochs = max_epochs
self.max_steps = max_steps
self.should_stop = False
# ensures limit_X_batches is either int or inf
if not isinstance(limit_train_batches, int):
assert limit_train_batches == float("inf")
if not isinstance(limit_val_batches, int):
assert limit_val_batches == float("inf")
self.limit_train_batches = limit_train_batches
self.limit_val_batches = limit_val_batches
self.validation_frequency = validation_frequency
self.use_distributed_sampler = use_distributed_sampler
self._current_train_return: Union[torch.Tensor, Mapping[str, Any]] = {}
self._current_val_return: Optional[Union[torch.Tensor, Mapping[str, Any]]] = {}
self.checkpoint_dir = checkpoint_dir
self.checkpoint_frequency = checkpoint_frequency
def fit(
self,
model: L.LightningModule,
train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader,
ckpt_path: Optional[str] = None,
):
"""The main entrypoint of the trainer, triggering the actual training.
Args:
model: the LightningModule to train.
Can have the same hooks as :attr:`callbacks` (see :meth:`MyCustomTrainer.__init__`).
train_loader: the training dataloader. Has to be an iterable returning batches.
val_loader: the validation dataloader. Has to be an iterable returning batches.
If not specified, no validation will run.
ckpt_path: Path to previous checkpoints to resume training from.
If specified, will always look for the latest checkpoint within the given directory.
"""
self.fabric.launch()
# setup dataloaders
train_loader = self.fabric.setup_dataloaders(train_loader, use_distributed_sampler=self.use_distributed_sampler)
if val_loader is not None:
val_loader = self.fabric.setup_dataloaders(val_loader, use_distributed_sampler=self.use_distributed_sampler)
# setup model and optimizer
if isinstance(self.fabric.strategy, L.fabric.strategies.fsdp.FSDPStrategy):
# currently, there is no way to support fsdp with model.configure_optimizers in fabric
# as it would require fabric to hold a reference to the model, which we don't want to.
raise NotImplementedError("BYOT currently does not support FSDP")
optimizer, scheduler_cfg = self._parse_optimizers_schedulers(model.configure_optimizers())
assert optimizer is not None
model, optimizer = self.fabric.setup(model, optimizer)
# assemble state (current epoch and global step will be added in save)
state = {"model": model, "optim": optimizer, "scheduler": scheduler_cfg}
# load last checkpoint if available
if ckpt_path is not None and os.path.isdir(ckpt_path):
latest_checkpoint_path = self.get_latest_checkpoint(self.checkpoint_dir)
if latest_checkpoint_path is not None:
self.load(state, latest_checkpoint_path)
# check if we even need to train here
if self.max_epochs is not None and self.current_epoch >= self.max_epochs:
self.should_stop = True
while not self.should_stop:
self.train_loop(
model, optimizer, train_loader, limit_batches=self.limit_train_batches, scheduler_cfg=scheduler_cfg
)
if self.should_validate:
self.val_loop(model, val_loader, limit_batches=self.limit_val_batches)
self.step_scheduler(model, scheduler_cfg, level="epoch", current_value=self.current_epoch)
self.current_epoch += 1
# stopping condition on epoch level
if self.max_epochs is not None and self.current_epoch >= self.max_epochs:
self.should_stop = True
self.save(state)
# reset for next fit call
self.should_stop = False
def train_loop(
self,
model: L.LightningModule,
optimizer: torch.optim.Optimizer,
train_loader: torch.utils.data.DataLoader,
limit_batches: Union[int, float] = float("inf"),
scheduler_cfg: Optional[Mapping[str, Union[L.fabric.utilities.types.LRScheduler, bool, str, int]]] = None,
):
"""The training loop running a single training epoch.
Args:
model: the LightningModule to train
optimizer: the optimizer, optimizing the LightningModule.
train_loader: The dataloader yielding the training batches.
limit_batches: Limits the batches during this training epoch.
If greater than the number of batches in the ``train_loader``, this has no effect.
scheduler_cfg: The learning rate scheduler configuration.
Have a look at :meth:`~lightning.pytorch.core.LightningModule.configure_optimizers`
for supported values.
"""
self.fabric.call("on_train_epoch_start")
iterable = self.progbar_wrapper(
train_loader, total=min(len(train_loader), limit_batches), desc=f"Epoch {self.current_epoch}"
)
for batch_idx, batch in enumerate(iterable):
# end epoch if stopping training completely or max batches for this epoch reached
if self.should_stop or batch_idx >= limit_batches:
break
self.fabric.call("on_train_batch_start", batch, batch_idx)
# check if optimizer should step in gradient accumulation
should_optim_step = self.global_step % self.grad_accum_steps == 0
if should_optim_step:
# currently only supports a single optimizer
self.fabric.call("on_before_optimizer_step", optimizer)
# optimizer step runs train step internally through closure
optimizer.step(partial(self.training_step, model=model, batch=batch, batch_idx=batch_idx))
self.fabric.call("on_before_zero_grad", optimizer)
optimizer.zero_grad()
else:
# gradient accumulation -> no optimizer step
self.training_step(model=model, batch=batch, batch_idx=batch_idx)
self.fabric.call("on_train_batch_end", self._current_train_return, batch, batch_idx)
# this guard ensures, we only step the scheduler once per global step
if should_optim_step:
self.step_scheduler(model, scheduler_cfg, level="step", current_value=self.global_step)
# add output values to progress bar
self._format_iterable(iterable, self._current_train_return, "train")
# only increase global step if optimizer stepped
self.global_step += int(should_optim_step)
# stopping criterion on step level
if self.max_steps is not None and self.global_step >= self.max_steps:
self.should_stop = True
break
self.fabric.call("on_train_epoch_end")
def val_loop(
self,
model: L.LightningModule,
val_loader: Optional[torch.utils.data.DataLoader],
limit_batches: Union[int, float] = float("inf"),
):
"""The validation loop running a single validation epoch.
Args:
model: the LightningModule to evaluate
val_loader: The dataloader yielding the validation batches.
limit_batches: Limits the batches during this validation epoch.
If greater than the number of batches in the ``val_loader``, this has no effect.
"""
# no validation if val_loader wasn't passed
if val_loader is None:
return
# no validation but warning if val_loader was passed, but validation_step not implemented
if val_loader is not None and not is_overridden("validation_step", _unwrap_objects(model)):
L.fabric.utilities.rank_zero_warn(
"Your LightningModule does not have a validation_step implemented, "
"but you passed a validation dataloder. Skipping Validation."
)
return
if not is_overridden("on_validation_model_eval", _unwrap_objects(model)):
model.eval()
else:
self.fabric.call("on_validation_model_eval") # calls `model.eval()`
torch.set_grad_enabled(False)
self.fabric.call("on_validation_epoch_start")
iterable = self.progbar_wrapper(val_loader, total=min(len(val_loader), limit_batches), desc="Validation")
for batch_idx, batch in enumerate(iterable):
# end epoch if stopping training completely or max batches for this epoch reached
if self.should_stop or batch_idx >= limit_batches:
break
self.fabric.call("on_validation_batch_start", batch, batch_idx)
out = model.validation_step(batch, batch_idx)
# avoid gradients in stored/accumulated values -> prevents potential OOM
out = apply_to_collection(out, torch.Tensor, lambda x: x.detach())
self.fabric.call("on_validation_batch_end", out, batch, batch_idx)
self._current_val_return = out
self._format_iterable(iterable, self._current_val_return, "val")
self.fabric.call("on_validation_epoch_end")
if not is_overridden("on_validation_model_train", _unwrap_objects(model)):
model.train()
else:
self.fabric.call("on_validation_model_train")
torch.set_grad_enabled(True)
def training_step(self, model: L.LightningModule, batch: Any, batch_idx: int) -> torch.Tensor:
"""A single training step, running forward and backward. The optimizer step is called separately, as this is
given as a closure to the optimizer step.
Args:
model: the lightning module to train
batch: the batch to run the forward on
batch_idx: index of the current batch w.r.t the current epoch
"""
outputs: Union[torch.Tensor, Mapping[str, Any]] = model.training_step(batch, batch_idx=batch_idx)
loss = outputs if isinstance(outputs, torch.Tensor) else outputs["loss"]
self.fabric.call("on_before_backward", loss)
self.fabric.backward(loss)
self.fabric.call("on_after_backward")
# avoid gradients in stored/accumulated values -> prevents potential OOM
self._current_train_return = apply_to_collection(outputs, dtype=torch.Tensor, function=lambda x: x.detach())
return loss
def step_scheduler(
self,
model: L.LightningModule,
scheduler_cfg: Optional[Mapping[str, Union[L.fabric.utilities.types.LRScheduler, bool, str, int]]],
level: Literal["step", "epoch"],
current_value: int,
) -> None:
"""Steps the learning rate scheduler if necessary.
Args:
model: The LightningModule to train
scheduler_cfg: The learning rate scheduler configuration.
Have a look at :meth:`lightning.pytorch.LightningModule.configure_optimizers` for supported values.
level: whether we are trying to step on epoch- or step-level
current_value: Holds the current_epoch if ``level==epoch``, else holds the ``global_step``
"""
# no scheduler
if scheduler_cfg is None:
return
# wrong interval (step vs. epoch)
if scheduler_cfg["interval"] != level:
return
# right interval, but wrong step wrt frequency
if current_value % cast(int, scheduler_cfg["frequency"]) != 0:
return
# assemble potential monitored values
possible_monitor_vals = {None: None}
if isinstance(self._current_train_return, torch.Tensor):
possible_monitor_vals.update("train_loss", self._current_train_return)
elif isinstance(self._current_train_return, Mapping):
possible_monitor_vals.update({"train_" + k: v for k, v in self._current_train_return.items()})
if isinstance(self._current_val_return, torch.Tensor):
possible_monitor_vals.update("val_loss", self._current_val_return)
elif isinstance(self._current_val_return, Mapping):
possible_monitor_vals.update({"val_" + k: v for k, v in self._current_val_return.items()})
try:
monitor = possible_monitor_vals[cast(Optional[str], scheduler_cfg["monitor"])]
except KeyError as ex:
possible_keys = list(possible_monitor_vals.keys())
raise KeyError(
f"monitor {scheduler_cfg['monitor']} is invalid. Possible values are {possible_keys}."
) from ex
# rely on model hook for actual step
model.lr_scheduler_step(scheduler_cfg["scheduler"], monitor)
@property
def should_validate(self) -> bool:
"""Whether to currently run validation."""
return self.current_epoch % self.validation_frequency == 0
def progbar_wrapper(self, iterable: Iterable, total: int, **kwargs: Any):
"""Wraps the iterable with tqdm for global rank zero.
Args:
iterable: the iterable to wrap with tqdm
total: the total length of the iterable, necessary in case the number of batches was limited.
"""
if self.fabric.is_global_zero:
return tqdm(iterable, total=total, **kwargs)
return iterable
def load(self, state: Optional[Mapping], path: str) -> None:
"""Loads a checkpoint from a given file into state.
Args:
state: a mapping containing model, optimizer and lr scheduler
path: the path to load the checkpoint from
"""
if state is None:
state = {}
remainder = self.fabric.load(path, state)
self.global_step = remainder.pop("global_step")
self.current_epoch = remainder.pop("current_epoch")
if remainder:
raise RuntimeError(f"Unused Checkpoint Values: {remainder}")
def save(self, state: Optional[Mapping]) -> None:
"""Saves a checkpoint to the ``checkpoint_dir``
Args:
state: A mapping containing model, optimizer and lr scheduler.
"""
if state is None:
state = {}
state.update(global_step=self.global_step, current_epoch=self.current_epoch)
self.fabric.save(os.path.join(self.checkpoint_dir, f"epoch-{self.current_epoch:04d}.ckpt"), state)
@staticmethod
def get_latest_checkpoint(checkpoint_dir: str) -> Optional[str]:
"""Returns the latest checkpoint from the ``checkpoint_dir``
Args:
checkpoint_dir: the directory to search for checkpoints
"""
if not os.path.isdir(checkpoint_dir):
return None
items = sorted(os.listdir(checkpoint_dir))
if not items:
return None
return os.path.join(checkpoint_dir, items[-1])
def _parse_optimizers_schedulers(
self, configure_optim_output
) -> tuple[
Optional[L.fabric.utilities.types.Optimizable],
Optional[Mapping[str, Union[L.fabric.utilities.types.LRScheduler, bool, str, int]]],
]:
"""Recursively parses the output of :meth:`lightning.pytorch.LightningModule.configure_optimizers`.
Args:
configure_optim_output: The output of ``configure_optimizers``.
For supported values, please refer to :meth:`lightning.pytorch.LightningModule.configure_optimizers`.
"""
_lr_sched_defaults = {"interval": "epoch", "frequency": 1, "monitor": "val_loss"}
# single optimizer
if isinstance(configure_optim_output, L.fabric.utilities.types.Optimizable):
return configure_optim_output, None
# single lr scheduler
if isinstance(configure_optim_output, L.fabric.utilities.types.LRScheduler):
return None, _lr_sched_defaults.update(scheduler=configure_optim_output)
# single lr scheduler config
if isinstance(configure_optim_output, Mapping):
_lr_sched_defaults.update(configure_optim_output)
return None, _lr_sched_defaults
# list or tuple
if isinstance(configure_optim_output, (list, tuple)):
if all(isinstance(_opt_cand, L.fabric.utilities.types.Optimizable) for _opt_cand in configure_optim_output):
# single optimizer in list
if len(configure_optim_output) == 1:
return configure_optim_output[0][0], None
raise NotImplementedError("BYOT only supports a single optimizer")
if all(
isinstance(_lr_cand, (L.fabric.utilities.types.LRScheduler, Mapping))
for _lr_cand in configure_optim_output
):
# single scheduler in list
if len(configure_optim_output) == 1:
return None, self._parse_optimizers_schedulers(configure_optim_output[0])[1]
# optimizer and lr scheduler
elif len(configure_optim_output) == 2:
opt_cands, lr_cands = (
self._parse_optimizers_schedulers(configure_optim_output[0])[0],
self._parse_optimizers_schedulers(configure_optim_output[1])[1],
)
return opt_cands, lr_cands
return None, None
@staticmethod
def _format_iterable(
prog_bar, candidates: Optional[Union[torch.Tensor, Mapping[str, Union[torch.Tensor, float, int]]]], prefix: str
):
"""Adds values as postfix string to progressbar.
Args:
prog_bar: a progressbar (on global rank zero) or an iterable (every other rank).
candidates: the values to add as postfix strings to the progressbar.
prefix: the prefix to add to each of these values.
"""
if isinstance(prog_bar, tqdm) and candidates is not None:
postfix_str = ""
float_candidates = apply_to_collection(candidates, torch.Tensor, lambda x: x.item())
if isinstance(candidates, torch.Tensor):
postfix_str += f" {prefix}_loss: {float_candidates:.3f}"
elif isinstance(candidates, Mapping):
for k, v in float_candidates.items():
postfix_str += f" {prefix}_{k}: {v:.3f}"
if postfix_str:
prog_bar.set_postfix_str(postfix_str)
| MyCustomTrainer |
python | explosion__spaCy | spacy/lang/fr/__init__.py | {
"start": 741,
"end": 1380
} | class ____(Language):
lang = "fr"
Defaults = FrenchDefaults
@French.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return FrenchLemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["French"]
| French |
python | pytorch__pytorch | test/inductor/test_flex_flash.py | {
"start": 7700,
"end": 24012
} | class ____(InductorTestCase):
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_basic(self, device, dtype):
q, k, v = create_test_tensors(dtype=dtype, device=device)
flash_vs_triton(q, k, v)
@dtypes(torch.float16, torch.bfloat16)
@parametrize("score_mod", [_times_two, _causal, _rel_bias], name_fn=name_fn)
def test_flash_attention_with_score_mod(self, device, dtype, score_mod):
q, k, v = create_test_tensors(dtype=dtype, device=device)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
@parametrize("seq_len", [127, 255, 383, 511])
def test_flash_attention_unfriendly_seqlen_with_causal(
self, device, dtype, seq_len
):
"""Test flash attention with unfriendly sequence lengths and causal masking."""
q, k, v = create_test_tensors(seq_len=seq_len, dtype=dtype, device=device)
flash_vs_triton(q, k, v, score_mod=_causal)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_kernel_called(self, device, dtype):
"""Test that flash attention kernel is actually called when BACKEND='FLASH'."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
compiled_fn = torch.compile(flex_attention)
# Test that flash kernel is called with BACKEND='FLASH'
with cuda_kernel_profiler("flash_attncute") as prof_result:
compiled_fn(q, k, v, score_mod=_causal, kernel_options={"BACKEND": "FLASH"})
self.assertTrue(
prof_result["found"],
f"Flash attention kernel not found. Available kernels: {prof_result['kernel_names']}",
)
# Test that flash kernel is NOT called with BACKEND='TRITON'
with cuda_kernel_profiler("flash_attncute") as prof_result:
compiled_fn(
q, k, v, score_mod=_causal, kernel_options={"BACKEND": "TRITON"}
)
self.assertFalse(
prof_result["found"],
f"Flash attention kernel unexpectedly found when BACKEND='TRITON'. Kernels: {prof_result['kernel_names']}",
)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_alibi_learned(self, device, dtype):
"""Test flash attention with ALiBi learned slopes (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_alibi_learned(num_heads=4, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_pos_bias_table(self, device, dtype):
"""Test flash attention with position bias table (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_pos_bias_table(seq_len=512, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_head_scale(self, device, dtype):
"""Test flash attention with head scaling (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_head_scale(num_heads=4, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_batch_bias(self, device, dtype):
"""Test flash attention with batch bias (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_batch_bias(batch_size=2, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_batch_head_bias(self, device, dtype):
"""Test flash attention with batch-head bias matrix (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_batch_head_bias(batch_size=2, num_heads=4, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_dual_buffer_bias(self, device, dtype):
"""Test flash attention with dual buffer loading (tensor loading)."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
score_mod = create_dual_buffer_bias(num_heads=4, seq_len=512, dtype=dtype)
flash_vs_triton(q, k, v, score_mod=score_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_score_view_buffer(self, device, dtype):
"""Score modifier should load from a non-contiguous view."""
num_heads = 4
q, k, v = create_test_tensors(num_heads=num_heads, dtype=dtype, device=device)
base_scales = torch.rand(num_heads, 2, device=device, dtype=dtype) + 0.5
scales_view = base_scales[:, 0]
assert not scales_view.is_contiguous()
def score_view_mod(score, b, h, q_idx, kv_idx):
return score + scales_view[h]
flash_vs_triton(q, k, v, score_mod=score_view_mod)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_impl_error_with_requires_grad(self, device, dtype):
"""Test that BACKEND='FLASH' raises error when tensor requires gradients."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
bias = torch.randn(4, device=device, dtype=dtype, requires_grad=True)
def score_mod_with_grad(score, b, h, q_idx, kv_idx):
return score + bias[h]
compiled_fn = torch.compile(flex_attention)
with self.assertRaisesRegex(
RuntimeError,
r"BACKEND='FLASH' but flash attention cannot be used.*require gradients",
):
compiled_fn(
q,
k,
v,
score_mod=score_mod_with_grad,
kernel_options={"BACKEND": "FLASH"},
)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_backward_rejects_mask_mod(self, device, dtype):
q, k, v = create_test_tensors(dtype=dtype, device=device)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = _create_block_mask_for_device(
causal_mask, 2, 4, 512, 512, device=device
)
q.requires_grad_(True)
compiled_fn = torch.compile(flex_attention)
with self.assertRaisesRegex(
RuntimeError,
r"NYI: Flex Flash Attention doesn't support block_sparsity yet",
):
compiled_fn(
q, k, v, block_mask=block_mask, kernel_options={"BACKEND": "FLASH"}
).sum().backward()
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_backward_rejects_score_mod_capture(self, device, dtype):
q, k, v = create_test_tensors(dtype=dtype, device=device)
bias = torch.randn(4, device=device, dtype=dtype)
def score_mod_with_capture(score, b, h, q_idx, kv_idx):
return score + bias[h]
q.requires_grad_(True)
compiled_fn = torch.compile(flex_attention)
with self.assertRaisesRegex(
RuntimeError,
r"NYI: Flex Flash Attention doesn't support score_mods in bwds yet",
):
compiled_fn(
q,
k,
v,
score_mod=score_mod_with_capture,
kernel_options={"BACKEND": "FLASH"},
).sum().backward()
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_backward_rejects_score_mod(self, device, dtype):
q, k, v = create_test_tensors(dtype=dtype, device=device)
def score_mod_twice(score, b, h, q_idx, kv_idx):
return score * 2
q.requires_grad_(True)
k.requires_grad_(True)
v.requires_grad_(True)
compiled_fn = torch.compile(flex_attention)
with self.assertRaisesRegex(
RuntimeError,
r"NYI: Flex Flash Attention doesn't support score_mods in bwds yet",
):
compiled_fn(
q,
k,
v,
score_mod=score_mod_twice,
kernel_options={"BACKEND": "FLASH"},
).sum().backward()
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_backward_kernel_called(self, device, dtype):
q, k, v = create_test_tensors(dim=128, dtype=dtype, device=device)
q.requires_grad_(True)
k.requires_grad_(True)
v.requires_grad_(True)
flash_vs_triton(q, k, v)
compiled_fn = torch.compile(flex_attention)
def run_for_profile():
q_run, k_run, v_run = (
t.detach().clone().requires_grad_(True) for t in (q, k, v)
)
compiled_fn(
q_run, k_run, v_run, kernel_options={"BACKEND": "FLASH"}
).sum().backward()
with cuda_kernel_profiler("flash_attncuteflash_bwd") as prof_result:
run_for_profile()
self.assertTrue(
prof_result["found"],
f"Flash attention backward kernel not found. Kernels: {prof_result['kernel_names']}",
)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_block_mask(self, device, dtype):
"""Test flash attention with block mask and mask_mod."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = _create_block_mask_for_device(
causal_mask, 2, 4, 512, 512, device=device
)
flash_vs_triton(q, k, v, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_block_mask_with_score_mod(self, device, dtype):
"""Test flash attention with both block mask and score_mod."""
q, k, v = create_test_tensors(dtype=dtype, device=device)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = _create_block_mask_for_device(
causal_mask, 2, 4, 512, 512, device=device
)
flash_vs_triton(q, k, v, score_mod=_times_two, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_mask_mod_buffer(self, device, dtype):
"""Test flash attention with mask_mod that loads from buffer."""
q, k, v = create_test_tensors(
batch_size=2, num_heads=4, dtype=dtype, device=device
)
mask_bias = torch.randn(4, device=device, dtype=dtype) * 0.1
def custom_mask(b, h, q_idx, kv_idx):
bias_value = mask_bias[h]
return (q_idx >= kv_idx) | (bias_value > 0)
block_mask = _create_block_mask_for_device(
custom_mask, 2, 4, 512, 512, device=device
)
flash_vs_triton(q, k, v, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_doc_mask(self, device, dtype):
"""Test flash attention with a document-aware mask_mod."""
# Use shorter sequences to make the document layout explicit.
seq_len = 128
q, k, v = create_test_tensors(
batch_size=2, num_heads=4, seq_len=seq_len, dtype=dtype, device=device
)
lengths_per_batch = (
(16, 31, 25, 56), # batch 0
(40, 9, 23, 56), # batch 1 uses a different document arrangement
)
document_ids = []
for lengths in lengths_per_batch:
assert sum(lengths) == seq_len
doc_tokens = []
for doc_id, length in enumerate(lengths):
doc_tokens.extend([doc_id] * length)
document_ids.append(doc_tokens)
document_ids = torch.tensor(document_ids, device=device, dtype=torch.long)
def document_mask(b, _h, q_idx, kv_idx):
doc_id_q = document_ids[b, q_idx // 2]
doc_id_kv = document_ids[b, kv_idx]
return doc_id_q == doc_id_kv
block_mask = _create_block_mask_for_device(
document_mask, 2, 1, seq_len, seq_len, device=device
)
flash_vs_triton(q, k, v, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_mask_mod_with_view_buffer(self, device, dtype):
"""Mask modifier should support buffers that are non-contiguous views."""
batch_size, num_heads, seq_len = 2, 4, 512
q, k, v = create_test_tensors(
batch_size=batch_size, num_heads=num_heads, dtype=dtype, device=device
)
base_bias = torch.randn(num_heads, 3, device=device, dtype=dtype)
mask_bias_view = base_bias[:, 1]
assert not mask_bias_view.is_contiguous()
def mask_with_view_buffer(b, h, q_idx, kv_idx):
bias_value = mask_bias_view[h]
double_bias = bias_value * 2
return (q_idx >= kv_idx) | (double_bias > 0)
block_mask = _create_block_mask_for_device(
mask_with_view_buffer,
batch_size,
num_heads,
seq_len,
seq_len,
device=device,
)
flash_vs_triton(q, k, v, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_mask_mod_with_dual_buffers(self, device, dtype):
"""Mask modifier should support multiple captured buffers."""
batch_size, num_heads, seq_len = 2, 4, 512
q, k, v = create_test_tensors(
batch_size=batch_size, num_heads=num_heads, dtype=dtype, device=device
)
head_bias = torch.randn(num_heads, device=device, dtype=dtype) * 0.2
batch_bias = torch.randn(batch_size, device=device, dtype=dtype) * 0.2
def dual_buffer_mask(b, h, q_idx, kv_idx):
head_term = head_bias[h]
batch_term = batch_bias[b]
causal = q_idx >= kv_idx
bias_cond = (head_term + batch_term).to(torch.float32) > 0
return causal | bias_cond
block_mask = _create_block_mask_for_device(
dual_buffer_mask, batch_size, num_heads, seq_len, seq_len, device=device
)
flash_vs_triton(q, k, v, block_mask=block_mask)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_score_mod_with_many_buffer_indexing(self, device, dtype):
batch_size, num_heads, seq_len = 2, 4, 512
q, k, v = create_test_tensors(
batch_size=batch_size, num_heads=num_heads, dtype=dtype, device=device
)
head_bias = torch.randn(num_heads, device=device, dtype=dtype) * 0.15
query_scale = torch.randn(seq_len, device=device, dtype=dtype) * 0.05
kv_scale = torch.randn(seq_len, device=device, dtype=dtype) * 0.05
batch_bias = torch.randn(batch_size, device=device, dtype=dtype) * 0.1
def complex_score(score, b, h, q_idx, kv_idx):
head_term = head_bias[h]
query_term = query_scale[q_idx]
kv_term = kv_scale[kv_idx]
batch_term = batch_bias[b]
return score + head_term + query_term - kv_term + batch_term
flash_vs_triton(q, k, v, score_mod=complex_score)
@dtypes(torch.float16, torch.bfloat16)
def test_flash_attention_with_score_and_mask_buffers(self, device, dtype):
"""Test flash attention with both score_mod and mask_mod using buffers."""
q, k, v = create_test_tensors(
batch_size=2, num_heads=4, dtype=dtype, device=device
)
score_bias = torch.randn(4, device=device, dtype=dtype) * 0.2
mask_bias = torch.randn(4, device=device, dtype=dtype) * 0.1
def score_with_buffer(score, b, h, q_idx, kv_idx):
return score + score_bias[h]
def mask_with_buffer(b, h, q_idx, kv_idx):
bias_value = mask_bias[h]
return (q_idx >= kv_idx) | (bias_value > 0)
block_mask = _create_block_mask_for_device(
mask_with_buffer, 2, 4, 512, 512, device=device
)
flash_vs_triton(q, k, v, score_mod=score_with_buffer, block_mask=block_mask)
instantiate_device_type_tests(TestFlexFlash, globals(), only_for="cuda")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
| TestFlexFlash |
python | has2k1__plotnine | plotnine/_utils/context.py | {
"start": 2389,
"end": 3906
} | class ____:
"""
Context within which a plot composition is built
Parameters
----------
cmp :
composition object to be built within the context.
show :
Whether to show the plot.
"""
cmp: Compose
show: bool
def __post_init__(self):
import matplotlib as mpl
# The dpi is needed when the figure is created, either as
# a parameter to plt.figure() or an rcParam.
# https://github.com/matplotlib/matplotlib/issues/24644
# When drawing the Composition, the dpi themeable is infective
# because it sets the rcParam after this figure is created.
rcParams = {"figure.dpi": self.cmp.last_plot.theme.getp("dpi")}
self._rc_context = mpl.rc_context(rcParams)
def __enter__(self) -> Self:
"""
Enclose in matplolib & pandas environments
"""
self._rc_context.__enter__()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
import matplotlib.pyplot as plt
if exc_type is None:
if self.show:
if is_closed(self.cmp.figure):
reopen(self.cmp.figure)
plt.show()
else:
plt.close(self.cmp.figure)
else:
# There is an exception, close any figure
if hasattr(self.cmp, "figure"):
plt.close(self.cmp.figure)
self._rc_context.__exit__(exc_type, exc_value, exc_traceback)
| plot_composition_context |
python | mlflow__mlflow | dev/clint/src/clint/linter.py | {
"start": 2924,
"end": 4012
} | class ____:
rule: rules.Rule
path: Path
range: Range
cell: int | None = None
def __str__(self) -> str:
# Use the same format as ruff
cell_loc = f"cell {self.cell}:" if self.cell is not None else ""
return (
# Since `Range` is 0-indexed, lineno and col_offset are incremented by 1
f"{self.path}:{cell_loc}{self.range.shift(Position(1, 1))}: "
f"{self.rule.id}: {self.rule.message} "
f"See dev/clint/README.md for instructions on ignoring this rule ({self.rule.name})."
)
def json(self) -> dict[str, str | int | None]:
return {
"type": "error",
"module": None,
"obj": None,
"line": self.range.start.line,
"column": self.range.start.column,
"endLine": self.range.end.line,
"endColumn": self.range.end.column,
"path": str(self.path),
"symbol": self.rule.name,
"message": self.rule.message,
"message-id": self.rule.id,
}
@dataclass
| Violation |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 17809,
"end": 18442
} | class ____(parser_test_base.ParserTestBase):
def test_annotation(self):
self.check(
"""
class A: ...
x: "A"
y: "List[A]" = ...
""",
"""
x: A
y: List[A] = ...
class A: ...
""",
)
def test_def(self):
self.check(
"""
def f(x: "int", *args: "float", y: "str", **kwargs: "bool") -> "str": ...
""",
"""
def f(x: int, *args: float, y: str, **kwargs: bool) -> str: ...
""",
)
def test_partial_quotes(self):
self.check(
"""
x: List["A"]
""",
"""
x: List[A]
""",
)
| QuotedTypeTest |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/utils.py | {
"start": 3393,
"end": 3480
} | class ____(TypedDict):
type: str
decayedValue: NotRequired[str | None]
| DecayingFn |
python | google__jax | jax/_src/cudnn/fused_attention_stablehlo.py | {
"start": 2315,
"end": 2373
} | class ____(enum.Enum):
BTNH = 0
BNTH = 1
| AttentionLayout |
python | getsentry__sentry | tests/sentry/api/serializers/test_apitoken.py | {
"start": 238,
"end": 527
} | class ____(TestCase):
def setUp(self) -> None:
self._user = self.create_user()
self._scopes = ["test_scope"]
self._token = self.create_user_auth_token(user=self._user, scope_list=self._scopes)
self._serializer = ApiTokenSerializer()
| TestApiTokenSerializer |
python | pydata__xarray | xarray/tests/test_merge.py | {
"start": 32679,
"end": 35398
} | class ____:
def test_merge_datasets_false_warning(self):
data = create_test_data(add_attrs=False, use_extension_array=True)
with set_options(use_new_combine_kwarg_defaults=False):
old = xr.merge([data, data])
with set_options(use_new_combine_kwarg_defaults=True):
new = xr.merge([data, data])
assert_identical(old, new)
def test_merge(self):
data = create_test_data()
ds1 = data[["var1"]]
ds2 = data[["var3"]]
expected = data[["var1", "var3"]]
with set_options(use_new_combine_kwarg_defaults=True):
actual = ds1.merge(ds2)
assert_identical(expected, actual)
actual = ds2.merge(ds1)
assert_identical(expected, actual)
actual = data.merge(data)
assert_identical(data, actual)
ds1.merge(ds2.rename({"var3": "var1"}))
with pytest.raises(ValueError, match=r"should be coordinates or not"):
data.reset_coords().merge(data)
with pytest.raises(ValueError, match=r"should be coordinates or not"):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = xr.Dataset({"x": 0})
ds2 = xr.Dataset({"x": ("y", [0, 0])})
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning,
match="will change from compat='no_conflicts' to compat='override'",
):
old = ds1.merge(ds2)
with set_options(use_new_combine_kwarg_defaults=True):
new = ds1.merge(ds2)
assert_identical(ds2, old)
with pytest.raises(AssertionError):
assert_identical(old, new)
def test_merge_auto_align(self):
ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]})
expected = xr.Dataset(
{"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
)
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning, match="will change from join='outer' to join='exact'"
):
assert expected.identical(ds1.merge(ds2))
with pytest.warns(
FutureWarning, match="will change from join='outer' to join='exact'"
):
assert expected.identical(ds2.merge(ds1))
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(ValueError, match="might be related to new default"):
expected.identical(ds2.merge(ds1))
| TestNewDefaults |
python | realpython__materials | hangman-pysimplegui/source_code_step_5/hangman.py | {
"start": 112,
"end": 6950
} | class ____:
def __init__(self) -> None:
layout = [
[
self._build_canvas_frame(),
self._build_letters_frame(),
],
[
self._build_guessed_word_frame(),
],
[
self._build_action_buttons_frame(),
],
]
self._window = sg.Window(title="Hangman", layout=layout, finalize=True)
self._canvas = self._window["-CANVAS-"]
self._new_game()
def _build_canvas_frame(self):
return sg.Frame(
"Hangman",
[
[
sg.Graph(
key="-CANVAS-",
canvas_size=(200, 400),
graph_bottom_left=(0, 0),
graph_top_right=(200, 400),
)
]
],
font="Any 20",
)
def _build_letters_frame(self):
letter_groups = [
ascii_uppercase[i : i + 4]
for i in range(0, len(ascii_uppercase), 4)
]
letter_buttons = [
[
sg.Button(
button_text=f" {letter} ",
font="Courier 20",
border_width=0,
button_color=(None, sg.theme_background_color()),
key=f"-letter-{letter}-",
enable_events=True,
)
for letter in letter_group
]
for letter_group in letter_groups
]
return sg.Column(
[
[
sg.Frame(
"Letters",
letter_buttons,
font="Any 20",
),
sg.Sizer(),
]
]
)
def _build_guessed_word_frame(self):
return sg.Frame(
"",
[
[
sg.Text(
key="-DISPLAY-WORD-",
font="Courier 20",
)
]
],
element_justification="center",
)
def _build_action_buttons_frame(self):
return sg.Frame(
"",
[
[
sg.Sizer(h_pixels=90),
sg.Button(
button_text="New",
key="-NEW-",
font="Any 20",
),
sg.Sizer(h_pixels=60),
sg.Button(
button_text="Restart",
key="-RESTART-",
font="Any 20",
),
sg.Sizer(h_pixels=60),
sg.Button(
button_text="Quit",
key="-QUIT-",
font="Any 20",
),
sg.Sizer(h_pixels=90),
]
],
font="Any 20",
)
def _draw_scaffold(self):
lines = [
((40, 55), (180, 55), 10),
((165, 60), (165, 365), 10),
((160, 360), (100, 360), 10),
((100, 365), (100, 330), 10),
((100, 330), (100, 310), 1),
]
for *points, width in lines:
self._canvas.DrawLine(*points, color="black", width=width)
def _draw_hanged_man(self):
head = (100, 290)
torso = [((100, 270), (100, 170))]
left_arm = [
((100, 250), (80, 250)),
((80, 250), (60, 210)),
((60, 210), (60, 190)),
]
right_arm = [
((100, 250), (120, 250)),
((120, 250), (140, 210)),
((140, 210), (140, 190)),
]
left_leg = [
((100, 170), (80, 170)),
((80, 170), (70, 140)),
((70, 140), (70, 80)),
((70, 80), (60, 80)),
]
right_leg = [
((100, 170), (120, 170)),
((120, 170), (130, 140)),
((130, 140), (130, 80)),
((130, 80), (140, 80)),
]
body = [
torso,
left_arm,
right_arm,
left_leg,
right_leg,
]
if self._wrong_guesses == 1:
self._canvas.DrawCircle(head, 20, line_color="red", line_width=2)
elif self._wrong_guesses > 1:
for part in body[self._wrong_guesses - 2]:
self._canvas.DrawLine(*part, color="red", width=2)
def _select_word(self):
with open("words.txt", mode="r", encoding="utf-8") as words:
word_list = words.readlines()
return choice(word_list).strip().upper()
def _build_guessed_word(self):
current_letters = []
for letter in self._target_word:
if letter in self._guessed_letters:
current_letters.append(letter)
else:
current_letters.append("_")
return " ".join(current_letters)
def _new_game(self):
self._target_word = self._select_word()
self._restart_game()
def _restart_game(self):
self._guessed_letters = set()
self._wrong_guesses = 0
self._guessed_word = self._build_guessed_word()
# Restart GUI
self._canvas.erase()
self._draw_scaffold()
for letter in ascii_uppercase:
self._window[f"-letter-{letter}-"].update(disabled=False)
self._window["-DISPLAY-WORD-"].update(self._guessed_word)
def _play(self, letter):
if letter not in self._target_word:
self._wrong_guesses += 1
self._guessed_letters.add(letter)
self._guessed_word = self._build_guessed_word()
# Update GUI
self._window[f"-letter-{letter}-"].update(disabled=True)
self._window["-DISPLAY-WORD-"].update(self._guessed_word)
self._draw_hanged_man()
def read_event(self):
return self._window.read()
def process_event(self, event):
if event[:8] == "-letter-":
self._play(letter=event[8])
elif event == "-RESTART-":
self._restart_game()
elif event == "-NEW-":
self._new_game()
def is_over(self):
return any(
[
self._wrong_guesses == MAX_WRONG_GUESSES,
set(self._target_word) <= self._guessed_letters,
]
)
def close(self):
self._window.close()
if __name__ == "__main__":
game = Hangman()
# Event loop
while not game.is_over():
event, values = game.read_event()
if event in {sg.WIN_CLOSED}:
break
game.process_event(event)
game.close()
| Hangman |
python | arrow-py__arrow | arrow/locales.py | {
"start": 51989,
"end": 52086
} | class ____(PortugueseLocale):
names = ["pt-br"]
past = "faz {0}"
| BrazilianPortugueseLocale |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 8904,
"end": 9798
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_deidentify_template(self, mock_hook):
mock_hook.return_value.delete_deidentify_template.return_value = mock.MagicMock()
operator = CloudDLPDeleteDeidentifyTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_deidentify_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPDeleteDeidentifyTemplateOperator |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 10927,
"end": 11007
} | class ____(Exception):
"""Contract source cannot be parsed."""
| ParserException |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.