language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/data_service_ops_test.py | {
"start": 46274,
"end": 48026
} | class ____(
data_service_test_base.TestBase, parameterized.TestCase
):
@combinations.generate(test_base.default_test_combinations())
def testExplicitProtocolFromDatasetId(self):
cluster = self.make_test_cluster(
num_workers=1, data_transfer_protocol="grpc"
)
range_ds = dataset_ops.Dataset.range(10)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, range_ds
)
ds = data_service_ops.from_dataset_id(
dataset_id=dataset_id,
processing_mode="parallel_epochs",
element_spec=range_ds.element_spec,
service=cluster.dispatcher.target,
data_transfer_protocol="grpc",
)
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testDistributeExplicitProtocol(self):
cluster = self.make_test_cluster(
num_workers=1, data_transfer_protocol="grpc"
)
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service="grpc://" + cluster.dispatcher_address(),
)
)
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[None, "AUTO"]),
)
)
def testDistributeCompression(self, compression):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, compression=compression
)
self.assertDatasetProduces(ds, list(range(num_elements)))
if __name__ == "__main__":
test.main()
| DataServiceOpsGrpcDataTransferTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/chat_store/simple_chat_store.py | {
"start": 943,
"end": 3696
} | class ____(BaseChatStore):
"""Simple chat store. Async methods provide same functionality as sync methods in this class."""
store: Dict[str, List[AnnotatedChatMessage]] = Field(default_factory=dict)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "SimpleChatStore"
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key."""
self.store[key] = messages
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key."""
return self.store.get(key, [])
def add_message(
self, key: str, message: ChatMessage, idx: Optional[int] = None
) -> None:
"""Add a message for a key."""
if idx is None:
self.store.setdefault(key, []).append(message)
else:
self.store.setdefault(key, []).insert(idx, message)
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete messages for a key."""
if key not in self.store:
return None
return self.store.pop(key)
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
if key not in self.store:
return None
if idx >= len(self.store[key]):
return None
return self.store[key].pop(idx)
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
if key not in self.store:
return None
return self.store[key].pop()
def get_keys(self) -> List[str]:
"""Get all keys."""
return list(self.store.keys())
def persist(
self,
persist_path: str = "chat_store.json",
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the docstore to a file."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(self.json())
@classmethod
def from_persist_path(
cls,
persist_path: str = "chat_store.json",
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleChatStore":
"""Create a SimpleChatStore from a persist path."""
fs = fs or fsspec.filesystem("file")
if not fs.exists(persist_path):
return cls()
with fs.open(persist_path, "r") as f:
data = json.load(f)
if isinstance(data, str):
return cls.model_validate_json(data)
else:
return cls.model_validate(data)
| SimpleChatStore |
python | pikepdf__pikepdf | tests/test_object.py | {
"start": 17783,
"end": 21703
} | class ____:
@pytest.fixture
def stream_object(self):
with pikepdf.new() as pdf:
yield Stream(pdf, b'abc123xyz')
def test_basic(self, stream_object):
stream_object.write(b'abc')
assert stream_object.read_bytes() == b'abc'
def test_compressed_readback(self, stream_object):
stream_object.write(compress(b'def'), filter=Name.FlateDecode)
assert stream_object.read_bytes() == b'def'
def test_stacked_compression(self, stream_object):
double_compressed = compress(compress(b'pointless'))
stream_object.write(
double_compressed, filter=[Name.FlateDecode, Name.FlateDecode]
)
assert stream_object.read_bytes() == b'pointless'
assert stream_object.read_raw_bytes() == double_compressed
def test_explicit_decodeparms(self, stream_object):
double_compressed = compress(compress(b'pointless'))
stream_object.write(
double_compressed,
filter=[Name.FlateDecode, Name.FlateDecode],
decode_parms=[None, None],
)
assert stream_object.read_bytes() == b'pointless'
assert stream_object.read_raw_bytes() == double_compressed
def test_no_kwargs(self, stream_object):
with pytest.raises(TypeError):
stream_object.write(compress(b'x'), [Name.FlateDecode])
def test_ccitt(self, stream_object):
ccitt = b'\x00' # Not valid data, just for testing decode_parms
stream_object.write(
ccitt,
filter=Name.CCITTFaxDecode,
decode_parms=Dictionary(K=-1, Columns=8, Length=1),
)
def test_stream_bytes(self, stream_object):
stream_object.write(b'pi')
assert bytes(stream_object) == b'pi'
def test_invalid_filter(self, stream_object):
with pytest.raises(TypeError, match="filter must be"):
stream_object.write(b'foo', filter=[42])
def test_invalid_decodeparms(self, stream_object):
with pytest.raises(TypeError, match="decode_parms must be"):
stream_object.write(
compress(b'foo'), filter=Name.FlateDecode, decode_parms=[42]
)
def test_filter_decodeparms_mismatch(self, stream_object):
with pytest.raises(ValueError, match=r"filter.*and decode_parms"):
stream_object.write(
compress(b'foo'),
filter=[Name.FlateDecode],
decode_parms=[Dictionary(), Dictionary()],
)
def test_raw_stream_buffer(self, stream_object):
raw_buffer = stream_object.get_raw_stream_buffer()
assert bytes(raw_buffer) == b'abc123xyz'
def test_copy():
d = Dictionary(
{
'/Boolean': True,
'/Integer': 42,
'/Real': Decimal('42.42'),
'/String': String('hi'),
'/Array': Array([1, 2, 3.14]),
'/Dictionary': Dictionary({'/Color': 'Red'}),
}
)
d2 = copy(d)
assert d2 == d
assert d2 is not d
assert d2['/Dictionary'] == d['/Dictionary']
def test_object_iteration(sandwich):
expected = len(sandwich.objects)
loops = 0
for obj in sandwich.objects:
loops += 1
if isinstance(obj, Dictionary):
assert len(obj.keys()) >= 1
assert expected == loops
def test_object_not_iterable():
with pytest.raises(TypeError, match="__iter__ not available"):
iter(pikepdf.Name.A)
@pytest.mark.parametrize(
'obj', [Array([1]), Dictionary({'/A': 'b'}), Operator('q'), String('s')]
)
def test_object_isinstance(obj):
assert isinstance(obj, Array | Dictionary | Operator | String | Stream)
assert isinstance(obj, type(obj))
assert isinstance(obj, Object)
def test_object_classes():
classes = [Array, Dictionary, Operator, String, Stream]
for cls in classes:
assert issubclass(cls, Object)
| TestStreamReadWrite |
python | huggingface__transformers | tests/models/xcodec/test_modeling_xcodec.py | {
"start": 1321,
"end": 3669
} | class ____:
def __init__(
self,
parent,
batch_size=4,
num_channels=1,
sample_rate=16000,
codebook_size=1024,
num_samples=256,
is_training=False,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sample_rate = sample_rate
self.codebook_size = codebook_size
self.is_training = is_training
self.num_samples = num_samples
self.acoustic_model_config = DacConfig(
decoder_hidden_size=8, encoder_hidden_size=8, codebook_size=16, downsampling_ratios=[16, 16]
)
self.semantic_model_config = HubertConfig(
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=12,
conv_dim=(4, 4, 4, 4, 4, 4, 4),
)
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = {
"input_values": floats_tensor([self.batch_size, self.num_channels, self.num_samples], scale=1.0)
}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
config, inputs_dict = self.prepare_config_and_inputs()
codes_length = math.ceil(self.num_samples / config.hop_length)
inputs_dict["audio_codes"] = ids_tensor(
[self.batch_size, config.num_quantizers, codes_length], config.codebook_size
)
return config, inputs_dict
def get_config(self):
return XcodecConfig(
sample_rate=self.sample_rate,
audio_channels=self.num_channels,
codebook_size=self.codebook_size,
acoustic_model_config=self.acoustic_model_config,
semantic_model_config=self.semantic_model_config,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = XcodecModel(config=config).to(torch_device).eval()
result = model(input_values=inputs_dict["input_values"])
self.parent.assertEqual(result.audio_values.shape, (self.batch_size, self.num_channels, self.num_samples))
@require_torch
| XcodecModelTester |
python | django__django | tests/contenttypes_tests/test_views.py | {
"start": 660,
"end": 5087
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
# Don't use the manager to ensure the site exists with pk=1, regardless
# of whether or not it already exists.
cls.site1 = Site(pk=1, domain="testserver", name="testserver")
cls.site1.save()
cls.author1 = Author.objects.create(name="Boris")
cls.article1 = Article.objects.create(
title="Old Article",
slug="old_article",
author=cls.author1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23),
)
cls.article2 = Article.objects.create(
title="Current Article",
slug="current_article",
author=cls.author1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23),
)
cls.article3 = Article.objects.create(
title="Future Article",
slug="future_article",
author=cls.author1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23),
)
cls.scheme1 = SchemeIncludedURL.objects.create(
url="http://test_scheme_included_http/"
)
cls.scheme2 = SchemeIncludedURL.objects.create(
url="https://test_scheme_included_https/"
)
cls.scheme3 = SchemeIncludedURL.objects.create(
url="//test_default_scheme_kept/"
)
def setUp(self):
Site.objects.clear_cache()
def test_shortcut_with_absolute_url(self):
"""
Can view a shortcut for an Author object that has a get_absolute_url
method
"""
for obj in Author.objects.all():
with self.subTest(obj=obj):
short_url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(Author).id,
obj.pk,
)
response = self.client.get(short_url)
self.assertRedirects(
response,
"http://testserver%s" % obj.get_absolute_url(),
target_status_code=404,
)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are: "http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
with self.subTest(obj=obj):
short_url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(SchemeIncludedURL).id,
obj.pk,
)
response = self.client.get(short_url)
self.assertRedirects(
response, obj.get_absolute_url(), fetch_redirect_response=False
)
def test_shortcut_no_absolute_url(self):
"""
Shortcuts for an object that has no get_absolute_url() method raise
404.
"""
for obj in Article.objects.all():
with self.subTest(obj=obj):
short_url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(Article).id,
obj.pk,
)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(Author).id,
"nobody/expects",
)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(Author).id,
"42424242",
)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = "/shortcut/%s/%s/" % ("spam", an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = "/shortcut/%s/%s/" % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
@override_settings(ROOT_URLCONF="contenttypes_tests.urls")
| ContentTypesViewsTests |
python | django__django | tests/staticfiles_tests/test_liveserver.py | {
"start": 674,
"end": 886
} | class ____(StaticLiveServerTestCase):
available_apps = []
@classmethod
def setUpClass(cls):
cls.enterClassContext(override_settings(**TEST_SETTINGS))
super().setUpClass()
| LiveServerBase |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/beta.py | {
"start": 3429,
"end": 4032
} | class ____:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def models(self) -> ModelsWithRawResponse:
return ModelsWithRawResponse(self._beta.models)
@cached_property
def messages(self) -> MessagesWithRawResponse:
return MessagesWithRawResponse(self._beta.messages)
@cached_property
def files(self) -> FilesWithRawResponse:
return FilesWithRawResponse(self._beta.files)
@cached_property
def skills(self) -> SkillsWithRawResponse:
return SkillsWithRawResponse(self._beta.skills)
| BetaWithRawResponse |
python | Pylons__pyramid | src/pyramid/security.py | {
"start": 5572,
"end": 6344
} | class ____(int):
def __new__(cls, s, *args):
"""
Create a new instance.
:param fmt: A format string explaining the reason for denial.
:param args: Arguments are stored and used with the format string
to generate the ``msg``.
"""
inst = int.__new__(cls, cls.boolval)
inst.s = s
inst.args = args
return inst
@property
def msg(self):
"""A string indicating why the result was generated."""
return self.s % self.args
def __str__(self):
return self.msg
def __repr__(self):
return '<{} instance at {} with msg {!r}>'.format(
self.__class__.__name__,
id(self),
self.msg,
)
| PermitsResult |
python | numba__numba | numba/cuda/tests/cudapy/test_vector_type.py | {
"start": 6479,
"end": 10515
} | class ____(CUDATestCase):
def test_basic(self):
"""Basic test that makes sure that vector type and aliases
are available within the cuda module from both device and
simulator mode. This is an important sanity check, since other
tests below tests the vector type objects programmatically.
"""
@cuda.jit("void(float64[:])")
def kernel(arr):
v1 = cuda.float64x4(1.0, 3.0, 5.0, 7.0)
v2 = cuda.short2(10, 11)
arr[0] = v1.x
arr[1] = v1.y
arr[2] = v1.z
arr[3] = v1.w
arr[4] = v2.x
arr[5] = v2.y
res = np.zeros(6, dtype=np.float64)
kernel[1, 1](res)
self.assertTrue(np.allclose(res, [1.0, 3.0, 5.0, 7.0, 10, 11]))
def test_creation_readout(self):
for vty in vector_types.values():
with self.subTest(vty=vty):
arr = np.zeros((vty.num_elements,))
kernel = make_kernel(vty)
kernel[1, 1](arr)
np.testing.assert_almost_equal(
arr, np.array(range(vty.num_elements))
)
def test_fancy_creation_readout(self):
for vty in vector_types.values():
with self.subTest(vty=vty):
kernel = make_fancy_creation_kernel(vty)
expected = np.array([
# 1-component vectors
1,
1,
# 2-component vectors
2, 3,
1, 3,
2, 1,
1, 1,
2, 3,
# 3-component vectors
2, 3, 1,
2, 3, 1,
1, 2, 3,
1, 2, 3,
1, 2, 3,
1, 2, 3,
1, 1, 3,
1, 2, 1,
1, 1, 3,
1, 1, 1,
1, 2, 1,
1, 1, 1,
2, 3, 1,
# 4-component vectors
1, 2, 3, 4,
1, 2, 3, 4,
1, 1, 3, 4,
1, 2, 1, 4,
1, 2, 3, 1,
1, 1, 3, 4,
1, 2, 1, 4,
1, 2, 3, 1,
1, 1, 1, 4,
1, 1, 3, 1,
1, 2, 1, 1,
1, 1, 1, 4,
1, 1, 3, 1,
1, 2, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
2, 3, 2, 3,
2, 3, 1, 3,
2, 3, 2, 1,
2, 3, 1, 1,
1, 2, 3, 3,
1, 2, 3, 3,
1, 2, 3, 1,
1, 2, 3, 1,
1, 4, 2, 3,
1, 4, 2, 3,
1, 1, 2, 3,
1, 1, 2, 3,
2, 3, 2, 3,
2, 3, 1, 4,
2, 3, 1, 1,
4, 2, 3, 1,
1, 2, 3, 1,
1, 2, 3, 4
])
arr = np.zeros(expected.shape)
kernel[1, 1](arr)
np.testing.assert_almost_equal(arr, expected)
def test_vector_type_alias(self):
"""Tests that `cuda.<vector_type.alias>` are importable and
that is the same as `cuda.<vector_type.name>`.
`test_fancy_creation_readout` only test vector types imported
with its name. This test makes sure that construction with
objects imported with alias should work the same.
"""
for vty in vector_types.values():
for alias in vty.user_facing_object.aliases:
with self.subTest(vty=vty.name, alias=alias):
self.assertEqual(
id(getattr(cuda, vty.name)), id(getattr(cuda, alias))
)
| TestCudaVectorType |
python | django__django | tests/admin_filters/tests.py | {
"start": 7847,
"end": 7938
} | class ____(ModelAdmin):
list_filter = (NotNinetiesListFilter,)
| NotNinetiesListFilterAdmin |
python | kamyu104__LeetCode-Solutions | Python/widest-vertical-area-between-two-points-containing-no-points.py | {
"start": 52,
"end": 337
} | class ____(object):
def maxWidthOfVerticalArea(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
sorted_x = sorted({x for x, y in points})
return max([b-a for a, b in itertools.izip(sorted_x, sorted_x[1:])] + [0])
| Solution |
python | doocs__leetcode | lcof2/剑指 Offer II 102. 加减的目标值/Solution4.py | {
"start": 0,
"end": 358
} | class ____:
def findTargetSumWays(self, nums: List[int], target: int) -> int:
@cache
def dfs(i, t):
if i == n:
if t == target:
return 1
return 0
return dfs(i + 1, t + nums[i]) + dfs(i + 1, t - nums[i])
ans, n = 0, len(nums)
return dfs(0, 0)
| Solution |
python | joke2k__faker | faker/providers/isbn/isbn.py | {
"start": 148,
"end": 492
} | class ____:
def __init__(
self,
ean: Optional[str] = None,
group: Optional[str] = None,
registrant: Optional[str] = None,
publication: Optional[str] = None,
) -> None:
self.ean = ean
self.group = group
self.registrant = registrant
self.publication = publication
| ISBN |
python | python-excel__xlrd | xlrd/formatting.py | {
"start": 41430,
"end": 41951
} | class ____(BaseObject, EqNeAttrs):
"""
A collection of the background-related attributes of an ``XF`` record.
Items correspond to those in the Excel UI's Format -> Cells -> Patterns tab.
An explanations of "colour index" is given in :ref:`palette`.
.. versionadded:: 0.6.1
"""
#: See section 3.11 of the OOo docs.
fill_pattern = 0
#: See section 3.11 of the OOo docs.
background_colour_index = 0
#: See section 3.11 of the OOo docs.
pattern_colour_index = 0
| XFBackground |
python | jazzband__django-oauth-toolkit | tests/test_auth_backends.py | {
"start": 798,
"end": 1464
} | class ____(TestCase):
"""
Base class for cases in this module
"""
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.user = UserModel.objects.create_user("user", "test@example.com", "123456")
cls.app = ApplicationModel.objects.create(
name="app",
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=cls.user,
)
cls.token = AccessTokenModel.objects.create(
user=cls.user, token="tokstr", application=cls.app, expires=now() + timedelta(days=365)
)
| BaseTest |
python | openai__openai-python | src/openai/_models.py | {
"start": 1610,
"end": 24114
} | class ____(pydantic.BaseModel):
if PYDANTIC_V1:
@property
@override
def model_fields_set(self) -> set[str]:
# a forwards-compat shim for pydantic v2
return self.__fields_set__ # type: ignore
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore
@override
def __repr_args__(self) -> ReprArgs:
# we don't want these attributes to be included when something like `rich.print` is used
return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}]
else:
model_config: ClassVar[ConfigDict] = ConfigDict(
extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
)
if TYPE_CHECKING:
_request_id: Optional[str] = None
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
```py
completion = await client.chat.completions.create(...)
completion._request_id # req_id_xxx
completion.usage._request_id # raises `AttributeError`
```
Note: unlike other properties that use an `_` prefix, this property
*is* public. Unless documented otherwise, all other `_` prefix properties,
methods and modules are *private*.
"""
def to_dict(
self,
*,
mode: Literal["json", "python"] = "python",
use_api_names: bool = True,
exclude_unset: bool = True,
exclude_defaults: bool = False,
exclude_none: bool = False,
warnings: bool = True,
) -> dict[str, object]:
"""Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
By default, fields that were not set by the API will not be included,
and keys will match the API response, *not* the property names from the model.
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
Args:
mode:
If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`.
If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)`
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value from the output.
exclude_none: Whether to exclude fields that have a value of `None` from the output.
warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
"""
return self.model_dump(
mode=mode,
by_alias=use_api_names,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
warnings=warnings,
)
def to_json(
self,
*,
indent: int | None = 2,
use_api_names: bool = True,
exclude_unset: bool = True,
exclude_defaults: bool = False,
exclude_none: bool = False,
warnings: bool = True,
) -> str:
"""Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation).
By default, fields that were not set by the API will not be included,
and keys will match the API response, *not* the property names from the model.
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
Args:
indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2`
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that have the default value.
exclude_none: Whether to exclude fields that have a value of `None`.
warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
"""
return self.model_dump_json(
indent=indent,
by_alias=use_api_names,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
warnings=warnings,
)
@override
def __str__(self) -> str:
# mypy complains about an invalid self arg
return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc]
# Override the 'construct' method in a way that supports recursive parsing without validation.
# Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
@classmethod
@override
def construct( # pyright: ignore[reportIncompatibleMethodOverride]
__cls: Type[ModelT],
_fields_set: set[str] | None = None,
**values: object,
) -> ModelT:
m = __cls.__new__(__cls)
fields_values: dict[str, object] = {}
config = get_model_config(__cls)
populate_by_name = (
config.allow_population_by_field_name
if isinstance(config, _ConfigProtocol)
else config.get("populate_by_name")
)
if _fields_set is None:
_fields_set = set()
model_fields = get_model_fields(__cls)
for name, field in model_fields.items():
key = field.alias
if key is None or (key not in values and populate_by_name):
key = name
if key in values:
fields_values[name] = _construct_field(value=values[key], field=field, key=key)
_fields_set.add(name)
else:
fields_values[name] = field_get_default(field)
extra_field_type = _get_extra_fields_type(__cls)
_extra = {}
for key, value in values.items():
if key not in model_fields:
parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
if PYDANTIC_V1:
_fields_set.add(key)
fields_values[key] = parsed
else:
_extra[key] = parsed
object.__setattr__(m, "__dict__", fields_values)
if PYDANTIC_V1:
# init_private_attributes() does not exist in v2
m._init_private_attributes() # type: ignore
# copied from Pydantic v1's `construct()` method
object.__setattr__(m, "__fields_set__", _fields_set)
else:
# these properties are copied from Pydantic's `model_construct()` method
object.__setattr__(m, "__pydantic_private__", None)
object.__setattr__(m, "__pydantic_extra__", _extra)
object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
return m
if not TYPE_CHECKING:
# type checkers incorrectly complain about this assignment
# because the type signatures are technically different
# although not in practice
model_construct = construct
if PYDANTIC_V1:
# we define aliases for some of the new pydantic v2 methods so
# that we can just document these methods without having to specify
# a specific pydantic version as some users may not know which
# pydantic version they are currently using
@override
def model_dump(
self,
*,
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
Args:
mode: The mode in which `to_python` should run.
If mode is 'json', the output will only contain JSON serializable types.
If mode is 'python', the output may contain non-JSON-serializable Python objects.
include: A set of fields to include in the output.
exclude: A set of fields to exclude from the output.
context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value.
exclude_none: Whether to exclude fields that have a value of `None`.
exclude_computed_fields: Whether to exclude computed fields.
While this can be useful for round-tripping, it is usually recommended to use the dedicated
`round_trip` parameter instead.
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
fallback: A function to call when an unknown value is encountered. If not provided,
a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
"""
if mode not in {"json", "python"}:
raise ValueError("mode must be either 'json' or 'python'")
if round_trip != False:
raise ValueError("round_trip is only supported in Pydantic v2")
if warnings != True:
raise ValueError("warnings is only supported in Pydantic v2")
if context is not None:
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
if exclude_computed_fields != False:
raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped
@override
def model_dump_json(
self,
*,
indent: int | None = None,
ensure_ascii: bool = False,
include: IncEx | None = None,
exclude: IncEx | None = None,
context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
Generates a JSON representation of the model using Pydantic's `to_json` method.
Args:
indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
include: Field(s) to include in the JSON output. Can take either a string or set of strings.
exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings.
by_alias: Whether to serialize using field aliases.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that have the default value.
exclude_none: Whether to exclude fields that have a value of `None`.
round_trip: Whether to use serialization/deserialization between JSON and class instance.
warnings: Whether to show any warnings that occurred during serialization.
Returns:
A JSON string representation of the model.
"""
if round_trip != False:
raise ValueError("round_trip is only supported in Pydantic v2")
if warnings != True:
raise ValueError("warnings is only supported in Pydantic v2")
if context is not None:
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
if ensure_ascii != False:
raise ValueError("ensure_ascii is only supported in Pydantic v2")
if exclude_computed_fields != False:
raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
exclude=exclude,
by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
if PYDANTIC_V1:
type_ = cast(type, field.outer_type_) # type: ignore
else:
type_ = field.annotation # type: ignore
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
if PYDANTIC_V1:
# TODO
return None
schema = cls.__pydantic_core_schema__
if schema["type"] == "model":
fields = schema["schema"]
if fields["type"] == "model-fields":
extras = fields.get("extras_schema")
if extras and "cls" in extras:
# mypy can't narrow the type
return extras["cls"] # type: ignore[no-any-return]
return None
def is_basemodel(type_: type) -> bool:
"""Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
if is_union(type_):
for variant in get_args(type_):
if is_basemodel(variant):
return True
return False
return is_basemodel_type(type_)
def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
origin = get_origin(type_) or type_
if not inspect.isclass(origin):
return False
return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)
def build(
base_model_cls: Callable[P, _BaseModelT],
*args: P.args,
**kwargs: P.kwargs,
) -> _BaseModelT:
"""Construct a BaseModel class without validation.
This is useful for cases where you need to instantiate a `BaseModel`
from an API response as this provides type-safe params which isn't supported
by helpers like `construct_type()`.
```py
build(MyModel, my_field_a="foo", my_field_b=123)
```
"""
if args:
raise TypeError(
"Received positional arguments which are not supported; Keyword arguments must be used instead",
)
return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs))
def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
"""Loose coercion to the expected type with construction of nested values.
Note: the returned value from this function is not guaranteed to match the
given type.
"""
return cast(_T, construct_type(value=value, type_=type_))
def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object:
"""Loose coercion to the expected type with construction of nested values.
If the given value does not match the expected type then it is returned as-is.
"""
# store a reference to the original type we were given before we extract any inner
# types so that we can properly resolve forward references in `TypeAliasType` annotations
original_type = None
# we allow `object` as the input type because otherwise, passing things like
# `Literal['value']` will be reported as a type error by type checkers
type_ = cast("type[object]", type_)
if is_type_alias_type(type_):
original_type = type_ # type: ignore[unreachable]
type_ = type_.__value__ # type: ignore[unreachable]
# unwrap `Annotated[T, ...]` -> `T`
if metadata is not None and len(metadata) > 0:
meta: tuple[Any, ...] = tuple(metadata)
elif is_annotated_type(type_):
meta = get_args(type_)[1:]
type_ = extract_type_arg(type_, 0)
else:
meta = tuple()
# we need to use the origin class for any types that are subscripted generics
# e.g. Dict[str, object]
origin = get_origin(type_) or type_
args = get_args(type_)
if is_union(origin):
try:
return validate_type(type_=cast("type[object]", original_type or type_), value=value)
except Exception:
pass
# if the type is a discriminated union then we want to construct the right variant
# in the union, even if the data doesn't match exactly, otherwise we'd break code
# that relies on the constructed class types, e.g.
#
# class FooType:
# kind: Literal['foo']
# value: str
#
# class BarType:
# kind: Literal['bar']
# value: int
#
# without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then
# we'd end up constructing `FooType` when it should be `BarType`.
discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta)
if discriminator and is_mapping(value):
variant_value = value.get(discriminator.field_alias_from or discriminator.field_name)
if variant_value and isinstance(variant_value, str):
variant_type = discriminator.mapping.get(variant_value)
if variant_type:
return construct_type(type_=variant_type, value=value)
# if the data is not valid, use the first variant that doesn't fail while deserializing
for variant in args:
try:
return construct_type(value=value, type_=variant)
except Exception:
continue
raise RuntimeError(f"Could not convert data into a valid instance of {type_}")
if origin == dict:
if not is_mapping(value):
return value
_, items_type = get_args(type_) # Dict[_, items_type]
return {key: construct_type(value=item, type_=items_type) for key, item in value.items()}
if (
not is_literal_type(type_)
and inspect.isclass(origin)
and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel))
):
if is_list(value):
return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value]
if is_mapping(value):
if issubclass(type_, BaseModel):
return type_.construct(**value) # type: ignore[arg-type]
return cast(Any, type_).construct(**value)
if origin == list:
if not is_list(value):
return value
inner_type = args[0] # List[inner_type]
return [construct_type(value=entry, type_=inner_type) for entry in value]
if origin == float:
if isinstance(value, int):
coerced = float(value)
if coerced != value:
return value
return coerced
return value
if type_ == datetime:
try:
return parse_datetime(value) # type: ignore
except Exception:
return value
if type_ == date:
try:
return parse_date(value) # type: ignore
except Exception:
return value
return value
@runtime_checkable
| BaseModel |
python | ray-project__ray | python/ray/serve/_private/test_utils.py | {
"start": 16988,
"end": 18139
} | class ____:
def __init__(self):
self._auth_context = {"key": "value"}
self._invocation_metadata = [("key", "value")]
self._peer = "peer"
self._peer_identities = b"peer_identities"
self._peer_identity_key = "peer_identity_key"
self._code = None
self._details = None
self._trailing_metadata = []
self._invocation_metadata = []
def auth_context(self):
return self._auth_context
def code(self):
return self._code
def details(self):
return self._details
def peer(self):
return self._peer
def peer_identities(self):
return self._peer_identities
def peer_identity_key(self):
return self._peer_identity_key
def trailing_metadata(self):
return self._trailing_metadata
def set_code(self, code):
self._code = code
def set_details(self, details):
self._details = details
def set_trailing_metadata(self, trailing_metadata):
self._trailing_metadata = trailing_metadata
def invocation_metadata(self):
return self._invocation_metadata
| FakeGrpcContext |
python | Textualize__textual | src/textual/events.py | {
"start": 8935,
"end": 15979
} | class ____(InputEvent, bubble=True):
"""Sent in response to a mouse event.
- [X] Bubbles
- [ ] Verbose
Args:
widget: The widget under the mouse.
x: The relative x coordinate.
y: The relative y coordinate.
delta_x: Change in x since the last message.
delta_y: Change in y since the last message.
button: Indexed of the pressed button.
shift: True if the shift key is pressed.
meta: True if the meta key is pressed.
ctrl: True if the ctrl key is pressed.
screen_x: The absolute x coordinate.
screen_y: The absolute y coordinate.
style: The Rich Style under the mouse cursor.
"""
__slots__ = [
"widget",
"_x",
"_y",
"_delta_x",
"_delta_y",
"button",
"shift",
"meta",
"ctrl",
"_screen_x",
"_screen_y",
"_style",
]
def __init__(
self,
widget: Widget | None,
x: float,
y: float,
delta_x: int,
delta_y: int,
button: int,
shift: bool,
meta: bool,
ctrl: bool,
screen_x: float | None = None,
screen_y: float | None = None,
style: Style | None = None,
) -> None:
super().__init__()
self.widget: Widget | None = widget
"""The widget under the mouse at the time of a click."""
self._x = x
"""The relative x coordinate."""
self._y = y
"""The relative y coordinate."""
self._delta_x = delta_x
"""Change in x since the last message."""
self._delta_y = delta_y
"""Change in y since the last message."""
self.button = button
"""Indexed of the pressed button."""
self.shift = shift
"""`True` if the shift key is pressed."""
self.meta = meta
"""`True` if the meta key is pressed."""
self.ctrl = ctrl
"""`True` if the ctrl key is pressed."""
self._screen_x = x if screen_x is None else screen_x
"""The absolute x coordinate."""
self._screen_y = y if screen_y is None else screen_y
"""The absolute y coordinate."""
self._style = style or Style()
@property
def x(self) -> int:
"""The relative X coordinate of the cell under the mouse."""
return int(self._x)
@property
def y(self) -> int:
"""The relative Y coordinate of the cell under the mouse."""
return int(self._y)
@property
def delta_x(self) -> int:
"""Change in `x` since last message."""
return self._delta_x
@property
def delta_y(self) -> int:
"""Change in `y` since the last message."""
return self._delta_y
@property
def screen_x(self) -> int:
"""X coordinate of the cell relative to top left of screen."""
return int(self._screen_x)
@property
def screen_y(self) -> int:
"""Y coordinate of the cell relative to top left of screen."""
return int(self._screen_y)
@property
def pointer_x(self) -> float:
"""The relative X coordinate of the pointer."""
return self._x
@property
def pointer_y(self) -> float:
"""The relative Y coordinate of the pointer."""
return self._y
@property
def pointer_screen_x(self) -> float:
"""The X coordinate of the pointer relative to the screen."""
return self._screen_x
@property
def pointer_screen_y(self) -> float:
"""The Y coordinate of the pointer relative to the screen."""
return self._screen_y
@classmethod
def from_event(
cls: Type[MouseEventT], widget: Widget, event: MouseEvent
) -> MouseEventT:
new_event = cls(
widget,
event._x,
event._y,
event._delta_x,
event._delta_y,
event.button,
event.shift,
event.meta,
event.ctrl,
event._screen_x,
event._screen_y,
event._style,
)
return new_event
def __rich_repr__(self) -> rich.repr.Result:
yield self.widget
yield "x", self.x
yield "y", self.y
yield "pointer_x", self.pointer_x
yield "pointer_y", self.pointer_y
yield "delta_x", self.delta_x, 0
yield "delta_y", self.delta_y, 0
if self.screen_x != self.x:
yield "screen_x", self._screen_x
if self.screen_y != self.y:
yield "screen_y", self._screen_y
yield "button", self.button, 0
yield "shift", self.shift, False
yield "meta", self.meta, False
yield "ctrl", self.ctrl, False
if self.style:
yield "style", self.style
@property
def control(self) -> Widget | None:
return self.widget
@property
def offset(self) -> Offset:
"""The mouse coordinate as an offset.
Returns:
Mouse coordinate.
"""
return Offset(self.x, self.y)
@property
def screen_offset(self) -> Offset:
"""Mouse coordinate relative to the screen."""
return Offset(self.screen_x, self.screen_y)
@property
def delta(self) -> Offset:
"""Mouse coordinate delta (change since last event)."""
return Offset(self.delta_x, self.delta_y)
@property
def style(self) -> Style:
"""The (Rich) Style under the cursor."""
return self._style or Style()
@style.setter
def style(self, style: Style) -> None:
self._style = style
def get_content_offset(self, widget: Widget) -> Offset | None:
"""Get offset within a widget's content area, or None if offset is not in content (i.e. padding or border).
Args:
widget: Widget receiving the event.
Returns:
An offset where the origin is at the top left of the content area.
"""
if self.screen_offset not in widget.content_region:
return None
return self.get_content_offset_capture(widget)
def get_content_offset_capture(self, widget: Widget) -> Offset:
"""Get offset from a widget's content area.
This method works even if the offset is outside the widget content region.
Args:
widget: Widget receiving the event.
Returns:
An offset where the origin is at the top left of the content area.
"""
return self.offset - widget.gutter.top_left
def _apply_offset(self, x: int, y: int) -> MouseEvent:
return self.__class__(
self.widget,
x=self._x + x,
y=self._y + y,
delta_x=self._delta_x,
delta_y=self._delta_y,
button=self.button,
shift=self.shift,
meta=self.meta,
ctrl=self.ctrl,
screen_x=self._screen_x,
screen_y=self._screen_y,
style=self.style,
)
@rich.repr.auto
| MouseEvent |
python | kamyu104__LeetCode-Solutions | Python/letter-tile-possibilities.py | {
"start": 52,
"end": 1537
} | class ____(object):
def numTilePossibilities(self, tiles):
"""
:type tiles: str
:rtype: int
"""
fact = [0.0]*(len(tiles)+1)
fact[0] = 1.0
for i in xrange(1, len(tiles)+1):
fact[i] = fact[i-1]*i
count = collections.Counter(tiles)
# 1. we can represent each alphabet 1..26 as generating functions:
# G1(x) = 1 + x^1/1! + x^2/2! + x^3/3! + ... + x^count1/count1!
# G2(x) = 1 + x^1/1! + x^2/2! + x^3/3! + ... + x^count2/count2!
# ...
# G26(x) = 1 + x^1/1! + x^2/2! + x^3/3! + ... + x^count26/count26!
#
# 2. let G1(x)*G2(x)*...*G26(x) = c0 + c1*x1 + ... + ck*x^k, k is the max number s.t. ck != 0
# => ci (1 <= i <= k) is the number we need to divide when permuting i letters
# => the answer will be : c1*1! + c2*2! + ... + ck*k!
coeff = [0.0]*(len(tiles)+1)
coeff[0] = 1.0
for i in count.itervalues():
new_coeff = [0.0]*(len(tiles)+1)
for j in xrange(len(coeff)):
for k in xrange(i+1):
if k+j >= len(new_coeff):
break
new_coeff[j+k] += coeff[j]*1.0/fact[k]
coeff = new_coeff
result = 0
for i in xrange(1, len(coeff)):
result += int(round(coeff[i]*fact[i]))
return result
# Time: O(r), r is the value of result
# Space: O(n)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-all-lonely-numbers-in-the-array.py | {
"start": 42,
"end": 304
} | class ____(object):
def findLonely(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
cnt = collections.Counter(nums)
return [x for x in nums if cnt[x] == 1 and x-1 not in cnt and x+1 not in cnt]
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 24172,
"end": 31497
} | class ____(
_WriteOnlyFixture, _fixtures.FixtureTest, AssertsCompiledSQL
):
__dialect__ = "default"
def test_iteration_error(self, user_address_fixture):
User, Address = user_address_fixture()
sess = fixture_session()
u = sess.get(User, 8)
with expect_raises_message(
TypeError,
"WriteOnly collections don't support iteration in-place; to "
"query for collection items",
):
list(u.addresses)
def test_order_by(self, user_address_fixture):
User, Address = user_address_fixture()
sess = fixture_session()
u = sess.get(User, 8)
eq_(
list(
sess.scalars(
u.addresses.select().order_by(desc(Address.email_address))
)
),
[
Address(email_address="ed@wood.com"),
Address(email_address="ed@lala.com"),
Address(email_address="ed@bettyboop.com"),
],
)
def test_configured_order_by(self, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={"order_by": addresses.c.email_address.desc()}
)
sess = fixture_session()
u = sess.get(User, 8)
eq_(
list(sess.scalars(u.addresses.select())),
[
Address(email_address="ed@wood.com"),
Address(email_address="ed@lala.com"),
Address(email_address="ed@bettyboop.com"),
],
)
# test cancellation of None, replacement with something else
eq_(
list(
sess.scalars(
u.addresses.select()
.order_by(None)
.order_by(Address.email_address)
)
),
[
Address(email_address="ed@bettyboop.com"),
Address(email_address="ed@lala.com"),
Address(email_address="ed@wood.com"),
],
)
# test cancellation of None, replacement with nothing
eq_(
set(sess.scalars(u.addresses.select().order_by(None))),
{
Address(email_address="ed@bettyboop.com"),
Address(email_address="ed@lala.com"),
Address(email_address="ed@wood.com"),
},
)
def test_secondary_as_join(self):
# test [ticket:4349]
User, users = self.classes.User, self.tables.users
items, orders, order_items, Item = (
self.tables.items,
self.tables.orders,
self.tables.order_items,
self.classes.Item,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"items": relationship(
Item, secondary=order_items.join(orders), lazy="write_only"
)
},
)
item_mapper = self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
u1 = sess.query(User).first()
dyn = u1.items.select()
# test for #7868
eq_(dyn._from_obj[0]._annotations["parententity"], item_mapper)
self.assert_compile(
u1.items.select(),
"SELECT items.id, "
"items.description "
"FROM items, order_items JOIN orders "
"ON orders.id = order_items.order_id "
"WHERE :param_1 = orders.user_id "
"AND items.id = order_items.item_id",
use_default_dialect=True,
)
def test_secondary_as_join_complex_entity(self, decl_base):
"""integration test for #7868"""
class GrandParent(decl_base):
__tablename__ = "grandparent"
id = Column(Integer, primary_key=True)
grand_children = relationship(
"Child", secondary="parent", viewonly=True, lazy="write_only"
)
class Parent(decl_base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
grand_parent_id = Column(
Integer, ForeignKey("grandparent.id"), nullable=False
)
class Child(decl_base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
type = Column(String)
parent_id = Column(
Integer, ForeignKey("parent.id"), nullable=False
)
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "unknown",
"with_polymorphic": "*",
}
class SubChild(Child):
__tablename__ = "subchild"
id = Column(Integer, ForeignKey("child.id"), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "sub",
}
gp = GrandParent(id=1)
make_transient_to_detached(gp)
self.assert_compile(
gp.grand_children.select().filter_by(id=1),
"SELECT child.id, child.type, "
"child.parent_id, subchild.id AS id_1 "
"FROM child LEFT OUTER JOIN subchild "
"ON child.id = subchild.id, parent "
"WHERE :param_1 = parent.grand_parent_id "
"AND parent.id = child.parent_id AND child.id = :id_2",
{"id_2": 1},
)
def test_secondary_doesnt_interfere_w_join_to_fromlist(self):
# tests that the "secondary" being added to the FROM
# as part of [ticket:4349] does not prevent a subsequent join to
# an entity that does not provide any "left side". Query right now
# does not know how to join() like this unambiguously if _from_obj is
# more than one element long.
Order, orders = self.classes.Order, self.tables.orders
items, order_items, Item = (
self.tables.items,
self.tables.order_items,
self.classes.Item,
)
item_keywords = self.tables.item_keywords
class ItemKeyword:
pass
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, lazy="write_only"
)
},
)
self.mapper_registry.map_imperatively(
ItemKeyword,
item_keywords,
primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id],
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={"item_keywords": relationship(ItemKeyword)},
)
sess = fixture_session()
order = sess.query(Order).first()
self.assert_compile(
order.items.select().join(ItemKeyword),
"SELECT items.id, "
"items.description "
"FROM items "
"JOIN item_keywords ON items.id = item_keywords.item_id, "
"order_items "
"WHERE :param_1 = order_items.order_id "
"AND items.id = order_items.item_id",
use_default_dialect=True,
)
| WriteOnlyTest |
python | pytorch__pytorch | torch/_dynamo/comptime.py | {
"start": 1699,
"end": 6034
} | class ____:
"""
A ComptimeVar represents a Python value, at some particular point
in time, in the Python code we are symbolically evaluating with
torchdynamo. This must be distinguished from a runtime value, as
at compile-time there are some properties of the variable we
do not know (for example, if the ComptimeVar represents a Tensor,
we only know metadata about the tensor; we do NOT know what the
actual data in the Tensor is.)
"""
def __init__(self, v: VariableTracker) -> None:
self.__variable = v
def as_proxy(self) -> Union[VariableTracker, Sequence[VariableTracker]]:
"""
Returns an fx.Proxy (or tuple/list of fx.Proxy) representing
this variable in the FX graph we are assembling to pass
to the user compiler.
This method only works for variables we actually track in
the FX graph, aka Tensors (and ints, if you are compiling
with dynamic shapes). In particular, if you have a list
or tuple of tensors, you will get a list/tuple of proxies
(not a single proxy representing the entire list/tuple).
"""
return self.__variable.as_proxy()
def is_proxy(self) -> bool:
"""
Returns True if as_proxy() would succeed.
"""
return self.__variable.is_proxy()
def as_fake(self) -> Union[FakeTensor, torch.SymInt]:
"""
Returns a "fake" value (either a FakeTensor or a SymInt)
representing the variable in question. This only works
for variables that denote Tensor or int. You can use
this to query metadata; e.g., v.as_fake().size(0) will
tell you the compile-time known size of the tensor.
WARNING: Do NOT mutate the returned tensor.
"""
return self.__variable.as_proxy().node.meta["example_value"]
def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]:
"""
Returns the size of the tensor (if dim is None) or the size
at the dimension dim. The returned size may be a SymInt.
"""
return self.as_fake().size(dim) # type: ignore[union-attr, return-value]
def python_type(self) -> type:
"""
Returns what type(v) would have returned for the variable
at compile time.
"""
return self.__variable.python_type()
def as_python_constant(self) -> Any:
"""
Returns the Python value this variable would have, but only if it is
completely known at compile-time (e.g., it is constant).
WARNING: Do NOT mutate the returned constant. The returned constant
may or may not correspond to the actual value this variable may take
on at runtime; for example, if the variable in question is a constant
list, we may return a copy of that list.
"""
return self.__variable.as_python_constant()
def is_python_constant(self) -> bool:
"""
Returns True if as_python_constant would succeed.
"""
return self.__variable.is_python_constant()
def is_dynamic(self) -> bool:
if isinstance(self.__variable, SymNodeVariable):
fs = free_symbols(self.__variable.sym_num)
return bool(fs)
return False
def force_static(self) -> None:
"""
Forces that a value is static, inducing a guard on its specific value
"""
if isinstance(self.__variable, SymNodeVariable):
self.__variable.evaluate_expr()
elif isinstance(self.__variable, ConstantVariable):
# TODO: Maybe complain if this isn't a int/bool/float variable
pass
else:
raise AssertionError(
f"cannot force {self.__variable} ({type(self.__variable)}) static"
)
def _i_will_not_complain_if_bc_breaks_VariableTracker(self) -> VariableTracker:
"""
Returns the internal data structure VariableTracker that Dynamo uses
to represent variables at compile time. There are no BC guarantees on
this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on
it.
"""
return self.__variable
def __repr__(self) -> str:
return self.__variable.debug_repr()
# TODO: API for adding a custom guard
| ComptimeVar |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 402758,
"end": 413586
} | class ____:
"""
Verify that making a view of a non-contiguous array works as expected.
"""
def test_smaller_dtype_multiple(self):
# x is non-contiguous
x = np.arange(10, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
def test_smaller_dtype_not_multiple(self):
# x is non-contiguous
x = np.arange(5, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('S3')
with pytest.raises(ValueError,
match='When changing to a smaller dtype'):
x[:, np.newaxis].view('S3')
# Make sure the problem is because of the dtype size
expected = [[b''], [b'\x02'], [b'\x04']]
assert_array_equal(x[:, np.newaxis].view('S4'), expected)
def test_larger_dtype_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
expected = np.array([[65536], [327684], [589832],
[851980], [1114128]], dtype='<i4')
assert_array_equal(x.view('<i4'), expected)
def test_larger_dtype_not_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
with pytest.raises(ValueError,
match='When changing to a larger dtype'):
x.view('S3')
# Make sure the problem is because of the dtype size
expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
[b'\x0c\x00\r'], [b'\x10\x00\x11']]
assert_array_equal(x.view('S4'), expected)
def test_f_contiguous(self):
# x is F-contiguous
x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
def test_non_c_contiguous(self):
# x is contiguous in axis=-1, but not C-contiguous in other axes
x = np.arange(2 * 3 * 4, dtype='i1').\
reshape(2, 3, 4).transpose(1, 0, 2)
expected = [[[256, 770], [3340, 3854]],
[[1284, 1798], [4368, 4882]],
[[2312, 2826], [5396, 5910]]]
assert_array_equal(x.view('<i2'), expected)
@pytest.mark.xfail(check_support_sve(), reason="gh-22982")
# Test various array sizes that hit different code paths in quicksort-avx512
@pytest.mark.parametrize("N", np.arange(1, 512))
@pytest.mark.parametrize("dtype", ['e', 'f', 'd'])
def test_sort_float(N, dtype):
# Regular data with nan sprinkled
np.random.seed(42)
arr = -0.5 + np.random.sample(N).astype(dtype)
arr[np.random.choice(arr.shape[0], 3)] = np.nan
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
# (2) with +INF
infarr = np.inf * np.ones(N, dtype=dtype)
infarr[np.random.choice(infarr.shape[0], 5)] = -1.0
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
# (3) with -INF
neginfarr = -np.inf * np.ones(N, dtype=dtype)
neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0
assert_equal(np.sort(neginfarr, kind='quick'),
np.sort(neginfarr, kind='heap'))
# (4) with +/-INF
infarr = np.inf * np.ones(N, dtype=dtype)
infarr[np.random.choice(infarr.shape[0], (int)(N / 2))] = -np.inf
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
def test_sort_float16():
arr = np.arange(65536, dtype=np.int16)
temp = np.frombuffer(arr.tobytes(), dtype=np.float16)
data = np.copy(temp)
np.random.shuffle(data)
data_backup = data
assert_equal(np.sort(data, kind='quick'),
np.sort(data_backup, kind='heap'))
@pytest.mark.parametrize("N", np.arange(1, 512))
@pytest.mark.parametrize("dtype", ['h', 'H', 'i', 'I', 'l', 'L'])
def test_sort_int(N, dtype):
# Random data with MAX and MIN sprinkled
minv = np.iinfo(dtype).min
maxv = np.iinfo(dtype).max
arr = np.random.randint(low=minv, high=maxv - 1, size=N, dtype=dtype)
arr[np.random.choice(arr.shape[0], 10)] = minv
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
def test_sort_uint():
# Random data with NPY_MAX_UINT32 sprinkled
rng = np.random.default_rng(42)
N = 2047
maxv = np.iinfo(np.uint32).max
arr = rng.integers(low=0, high=maxv, size=N).astype('uint32')
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
def test_private_get_ndarray_c_version():
assert isinstance(_get_ndarray_c_version(), int)
@pytest.mark.parametrize("N", np.arange(1, 512))
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_argsort_float(N, dtype):
rnd = np.random.RandomState(116112)
# (1) Regular data with a few nan: doesn't use vectorized sort
arr = -0.5 + rnd.random(N).astype(dtype)
arr[rnd.choice(arr.shape[0], 3)] = np.nan
assert_arg_sorted(arr, np.argsort(arr, kind='quick'))
# (2) Random data with inf at the end of array
# See: https://github.com/intel/x86-simd-sort/pull/39
arr = -0.5 + rnd.rand(N).astype(dtype)
arr[N - 1] = np.inf
assert_arg_sorted(arr, np.argsort(arr, kind='quick'))
@pytest.mark.parametrize("N", np.arange(2, 512))
@pytest.mark.parametrize("dtype", [np.int32, np.uint32, np.int64, np.uint64])
def test_argsort_int(N, dtype):
rnd = np.random.RandomState(1100710816)
# (1) random data with min and max values
minv = np.iinfo(dtype).min
maxv = np.iinfo(dtype).max
arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype)
i, j = rnd.choice(N, 2, replace=False)
arr[i] = minv
arr[j] = maxv
assert_arg_sorted(arr, np.argsort(arr, kind='quick'))
# (2) random data with max value at the end of array
# See: https://github.com/intel/x86-simd-sort/pull/39
arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype)
arr[N - 1] = maxv
assert_arg_sorted(arr, np.argsort(arr, kind='quick'))
# Test large arrays that leverage openMP implementations from x86-simd-sort:
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_sort_largearrays(dtype):
N = 1000000
rnd = np.random.RandomState(1100710816)
arr = -0.5 + rnd.random(N).astype(dtype)
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
# Test large arrays that leverage openMP implementations from x86-simd-sort:
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_argsort_largearrays(dtype):
N = 1000000
rnd = np.random.RandomState(1100710816)
arr = -0.5 + rnd.random(N).astype(dtype)
assert_arg_sorted(arr, np.argsort(arr, kind='quick'))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object")
def test_gh_22683():
b = 777.68760986
a = np.array([b] * 10000, dtype=object)
refc_start = sys.getrefcount(b)
np.choose(np.zeros(10000, dtype=int), [a], out=a)
np.choose(np.zeros(10000, dtype=int), [a], out=a)
refc_end = sys.getrefcount(b)
assert refc_end - refc_start < 10
def test_gh_24459():
a = np.zeros((50, 3), dtype=np.float64)
with pytest.raises(TypeError):
np.choose(a, [3, -1])
def test_gh_28206():
a = np.arange(3)
b = np.ones((3, 3), dtype=np.int64)
out = np.array([np.nan, np.nan, np.nan])
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
np.choose(a, b, out=out)
@pytest.mark.parametrize("N", np.arange(2, 512))
@pytest.mark.parametrize("dtype", [np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64])
def test_partition_int(N, dtype):
rnd = np.random.RandomState(1100710816)
# (1) random data with min and max values
minv = np.iinfo(dtype).min
maxv = np.iinfo(dtype).max
arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype)
i, j = rnd.choice(N, 2, replace=False)
arr[i] = minv
arr[j] = maxv
k = rnd.choice(N, 1)[0]
assert_arr_partitioned(np.sort(arr)[k], k,
np.partition(arr, k, kind='introselect'))
assert_arr_partitioned(np.sort(arr)[k], k,
arr[np.argpartition(arr, k, kind='introselect')])
# (2) random data with max value at the end of array
arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype)
arr[N - 1] = maxv
assert_arr_partitioned(np.sort(arr)[k], k,
np.partition(arr, k, kind='introselect'))
assert_arr_partitioned(np.sort(arr)[k], k,
arr[np.argpartition(arr, k, kind='introselect')])
@pytest.mark.parametrize("N", np.arange(2, 512))
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_partition_fp(N, dtype):
rnd = np.random.RandomState(1100710816)
arr = -0.5 + rnd.random(N).astype(dtype)
k = rnd.choice(N, 1)[0]
assert_arr_partitioned(np.sort(arr)[k], k,
np.partition(arr, k, kind='introselect'))
assert_arr_partitioned(np.sort(arr)[k], k,
arr[np.argpartition(arr, k, kind='introselect')])
# Check that `np.inf < np.nan`
# This follows np.sort
arr[0] = np.nan
arr[1] = np.inf
o1 = np.partition(arr, -2, kind='introselect')
o2 = arr[np.argpartition(arr, -2, kind='introselect')]
for out in [o1, o2]:
assert_(np.isnan(out[-1]))
assert_equal(out[-2], np.inf)
def test_cannot_assign_data():
a = np.arange(10)
b = np.linspace(0, 1, 10)
with pytest.raises(AttributeError):
a.data = b.data
def test_insufficient_width():
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient
to represent the number in base 2 (positive) or 2's complement (negative)
form, the function used to silently ignore the parameter and return a
representation using the minimal number of bits needed for the form in
question. Such behavior is now considered unsafe from a user perspective
and will raise an error.
"""
with pytest.raises(ValueError):
np.binary_repr(10, width=2)
with pytest.raises(ValueError):
np.binary_repr(-5, width=2)
def test_npy_char_raises():
from numpy._core._multiarray_tests import npy_char_deprecation
with pytest.raises(ValueError):
npy_char_deprecation()
| TestViewDtype |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver9.py | {
"start": 503,
"end": 660
} | class ____(Generic[_T1]):
def __init__(self, value: _T1) -> None: ...
@classmethod
def get(cls: type[_T3]) -> type[_T3]:
return cls
| ClassA |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_dynamic_line_width.py | {
"start": 6880,
"end": 8250
} | class ____(Abc, Def, Ghi, Jkl, Mno, Pqr, Stu, Vwx, Yz, A1, A2, A3, A4, A5):
def abcdefghijklmnopqrstuvwxyz(self, abc, ddef, ghi, jkl, mno, pqr, stu, vwx, yz, a1, a2, a3, a4):
def abcdefghijklmnopqrstuvwxyz(abc, ddef, ghi, jkl, mno, pqr, stu, vwx, yz, a1, a2, a3, a4):
# For 4 space indents, this is 89 columns, which is one
# more than the limit. Therefore, it should get wrapped for
# indent_width >= 4.
print(abc, ddef, ghi, jkl, mno, pqr, stu, vwx, yz, a1, a2, a3, a4, a5678)
return 5
self.x = doit( 5 )
```
Done.
"""
pass
# See: https://github.com/astral-sh/ruff/issues/9126
def doctest_extra_indent1():
"""
Docstring example containing a class.
Examples
--------
>>> @pl.api.register_dataframe_namespace("split")
... class SplitFrame:
... def __init__(self, df: pl.DataFrame):
... self._df = df
...
... def by_first_letter_of_column_values(self, col: str) -> list[pl.DataFrame]:
... return [
... self._df.filter(pl.col(col).str.starts_with(c))
... for c in sorted(
... set(df.select(pl.col(col).str.slice(0, 1)).to_series())
... )
... ]
"""
# See: https://github.com/astral-sh/ruff/issues/9126
| Abcdefghijklmopqrstuvwxyz |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/matrix_multiply.py | {
"start": 9787,
"end": 14559
} | class ____(MatrixMultiplyOperator):
"""Operator for general matrix multiplication (torch.matmul)."""
def __init__(self):
super().__init__("matmul")
self.weight = 500.0
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.matmul"
def can_produce(self, output_spec: Spec) -> bool:
"""Matmul can handle various tensor dimensions >= 1."""
if not isinstance(output_spec, TensorSpec):
return False
# Must have at least 1 dimension
if len(output_spec.size) < 1:
return False
# Matrix multiply doesn't work with bool or integer types for gradients
if output_spec.dtype in [
torch.bool,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
return False
return True
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for general matrix multiplication."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("MatmulOperator can only produce TensorSpec outputs")
output_size = output_spec.size
output_dims = len(output_size)
dtypes = self._get_compatible_dtype(output_spec.dtype)
if output_dims == 1:
# Matrix-vector multiplication: (n,) = (k,) @ (k, n) or (n,) = (n, k) @ (k,)
n = output_size[0]
k = random.randint(1, 16)
# Randomly choose between two valid patterns
if random.choice([True, False]):
# Pattern 1: (n,) = (k,) @ (k, n)
input1_spec = TensorSpec(size=(k,), stride=(1,), dtype=dtypes[0])
input2_spec = TensorSpec(
size=(k, n),
stride=(n, 1),
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
else:
# Pattern 2: (n,) = (n, k) @ (k,)
input1_spec = TensorSpec(size=(n, k), stride=(k, 1), dtype=dtypes[0])
input2_spec = TensorSpec(
size=(k,),
stride=(1,),
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
elif output_dims == 2:
# Matrix multiplication: (m, n) = (m, k) @ (k, n)
m, n = output_size
k = random.randint(1, 16)
input1_spec = TensorSpec(size=(m, k), stride=(k, 1), dtype=dtypes[0])
input2_spec = TensorSpec(
size=(k, n),
stride=(n, 1),
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
else:
# Batched matrix multiplication: (..., m, n) = (..., m, k) @ (..., k, n)
*batch_dims, m, n = output_size
k = random.randint(1, 16)
# Calculate strides for contiguous tensors
input1_size = tuple(batch_dims + [m, k])
input2_size = tuple(batch_dims + [k, n])
# Contiguous strides
input1_stride = [1]
for i in reversed(range(len(input1_size) - 1)):
input1_stride.append(input1_stride[-1] * input1_size[i + 1])
input1_stride = tuple(reversed(input1_stride))
input2_stride = [1]
for i in reversed(range(len(input2_size) - 1)):
input2_stride.append(input2_stride[-1] * input2_size[i + 1])
input2_stride = tuple(reversed(input2_stride))
input1_spec = TensorSpec(
size=input1_size, stride=input1_stride, dtype=dtypes[0]
)
input2_spec = TensorSpec(
size=input2_size,
stride=input2_stride,
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
return [input1_spec, input2_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for general matrix multiplication."""
if len(input_names) != 2:
raise ValueError("torch.matmul requires exactly 2 inputs")
# Get target dtype
if isinstance(output_spec, TensorSpec):
target_dtype_str = f"torch.{output_spec.dtype}".replace(
"torch.torch.", "torch."
)
# Cast inputs to ensure compatible types
return (
f"{output_name} = torch.matmul("
f"{input_names[0]}.to({target_dtype_str}), "
f"{input_names[1]}.to({target_dtype_str}))"
)
else:
return f"{output_name} = torch.matmul({input_names[0]}, {input_names[1]})"
| MatmulOperator |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 6444,
"end": 7384
} | class ____:
_encoding_name: str
def to_dict(
self,
validate: bool = True,
ignore: list[str] | None = None,
context: dict[str, Any] | None = None,
) -> dict:
context = context or {}
ignore = ignore or []
condition = self._get("condition", Undefined) # type: ignore[attr-defined]
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif "field" in condition and "type" not in condition:
kwds = parse_shorthand(condition["field"], context.get("data", None))
copy = self.copy(deep=["condition"]) # type: ignore[attr-defined]
copy["condition"].update(kwds)
return super(ValueChannelMixin, copy).to_dict(
validate=validate, ignore=ignore, context=context
)
| ValueChannelMixin |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1061168,
"end": 1061773
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (
ProjectV2ItemFieldDateValue,
ProjectV2ItemFieldIterationValue,
ProjectV2ItemFieldLabelValue,
ProjectV2ItemFieldMilestoneValue,
ProjectV2ItemFieldNumberValue,
ProjectV2ItemFieldPullRequestValue,
ProjectV2ItemFieldRepositoryValue,
ProjectV2ItemFieldReviewerValue,
ProjectV2ItemFieldSingleSelectValue,
ProjectV2ItemFieldTextValue,
ProjectV2ItemFieldUserValue,
)
| ProjectV2ItemFieldValue |
python | huggingface__transformers | tests/models/seamless_m4t/test_processing_seamless_m4t.py | {
"start": 967,
"end": 5295
} | class ____(unittest.TestCase):
def setUp(self):
self.checkpoint = "facebook/hf-seamless-m4t-medium"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return SeamlessM4TTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return SeamlessM4TFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = SeamlessM4TProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
tokenizer_instance = isinstance(processor.tokenizer, (SeamlessM4TTokenizerFast, SeamlessM4TTokenizer))
self.assertTrue(tokenizer_instance)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = SeamlessM4TProcessor(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()
)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = SeamlessM4TProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
tokenizer_instance = isinstance(processor.tokenizer, (SeamlessM4TTokenizerFast, SeamlessM4TTokenizer))
self.assertTrue(tokenizer_instance)
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->SeamlessM4T
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(audio=raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer with Whisper->SeamlessM4T
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
# Copied from test.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->SeamlessM4T
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| SeamlessM4TProcessorTest |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_param.py | {
"start": 7037,
"end": 7781
} | class ____(Enum):
"""
- ``SHARDED``: The sharded parameter is registered to the module. It is the
only contributor to parameter memory.
- ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a
smaller world size. Since this data should not be used for computation,
we do not register it to the module. Users should reshard the module
before any in-place modifications. Both it and the sharded parameter
contribute to parameter memory.
- ``UNSHARDED``: The unsharded parameter is registered to the module. Both
it and the sharded parameter contribute to parameter memory.
"""
SHARDED = auto()
SHARDED_POST_FORWARD = auto()
UNSHARDED = auto()
@dataclass
| ShardedState |
python | RaRe-Technologies__gensim | gensim/test/test_rpmodel.py | {
"start": 505,
"end": 2374
} | class ____(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
# HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
np.random.seed(13)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.70710677, 0.70710677])
self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign
def test_persistence(self):
fname = get_tmpfile('gensim_models.tst')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestRpModel |
python | anthropics__anthropic-sdk-python | src/anthropic/_legacy_response.py | {
"start": 1079,
"end": 12270
} | class ____(Generic[R]):
"""This is a legacy class as it will be replaced by `APIResponse`
and `AsyncAPIResponse` in the `_response.py` file in the next major
release.
For the sync client this will mostly be the same with the exception
of `content` & `text` will be methods instead of properties. In the
async client, all methods will be async.
A migration script will be provided & the migration in general should
be smooth.
"""
_cast_to: type[R]
_client: BaseClient[Any, Any]
_parsed_by_type: dict[type[Any], Any]
_stream: bool
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
_options: FinalRequestOptions
http_response: httpx.Response
retries_taken: int
"""The number of retries made. If no retries happened this will be `0`"""
def __init__(
self,
*,
raw: httpx.Response,
cast_to: type[R],
client: BaseClient[Any, Any],
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
options: FinalRequestOptions,
retries_taken: int = 0,
) -> None:
self._cast_to = cast_to
self._client = client
self._parsed_by_type = {}
self._stream = stream
self._stream_cls = stream_cls
self._options = options
self.http_response = raw
self.retries_taken = retries_taken
@property
def request_id(self) -> str | None:
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
@overload
def parse(self, *, to: type[_T]) -> _T: ...
@overload
def parse(self) -> R: ...
def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
NOTE: For the async client: this will become a coroutine in the next major version.
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
the `to` argument, e.g.
```py
from anthropic import BaseModel
class MyModel(BaseModel):
foo: str
obj = response.parse(to=MyModel)
print(obj.foo)
```
We support parsing:
- `BaseModel`
- `dict`
- `list`
- `Union`
- `str`
- `int`
- `float`
- `httpx.Response`
"""
cache_key = to if to is not None else self._cast_to
cached = self._parsed_by_type.get(cache_key)
if cached is not None:
return cached # type: ignore[no-any-return]
parsed = self._parse(to=to)
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)
if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)
self._parsed_by_type[cache_key] = parsed
return cast(R, parsed)
@property
def headers(self) -> httpx.Headers:
return self.http_response.headers
@property
def http_request(self) -> httpx.Request:
return self.http_response.request
@property
def status_code(self) -> int:
return self.http_response.status_code
@property
def url(self) -> httpx.URL:
return self.http_response.url
@property
def method(self) -> str:
return self.http_request.method
@property
def content(self) -> bytes:
"""Return the binary response content.
NOTE: this will be removed in favour of `.read()` in the
next major version.
"""
return self.http_response.content
@property
def text(self) -> str:
"""Return the decoded response content.
NOTE: this will be turned into a method in the next major version.
"""
return self.http_response.text
@property
def http_version(self) -> str:
return self.http_response.http_version
@property
def is_closed(self) -> bool:
return self.http_response.is_closed
@property
def elapsed(self) -> datetime.timedelta:
"""The time taken for the complete request/response cycle to complete."""
return self.http_response.elapsed
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to = to if to is not None else self._cast_to
# unwrap `TypeAlias('Name', T)` -> `T`
if is_type_alias_type(cast_to):
cast_to = cast_to.__value__ # type: ignore[unreachable]
# unwrap `Annotated[T, ...]` -> `T`
if cast_to and is_annotated_type(cast_to):
cast_to = extract_type_arg(cast_to, 0)
origin = get_origin(cast_to) or cast_to
if inspect.isclass(origin):
if issubclass(cast(Any, origin), JSONLDecoder):
return cast(
R,
cast("type[JSONLDecoder[Any]]", cast_to)(
raw_iterator=self.http_response.iter_bytes(chunk_size=64),
line_type=extract_type_arg(cast_to, 0),
http_response=self.http_response,
),
)
if issubclass(cast(Any, origin), AsyncJSONLDecoder):
return cast(
R,
cast("type[AsyncJSONLDecoder[Any]]", cast_to)(
raw_iterator=self.http_response.aiter_bytes(chunk_size=64),
line_type=extract_type_arg(cast_to, 0),
http_response=self.http_response,
),
)
if self._stream:
if to:
if not is_stream_class_type(to):
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
return cast(
_T,
to(
cast_to=extract_stream_chunk_type(
to,
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
),
response=self.http_response,
client=cast(Any, self._client),
),
)
if self._stream_cls:
return cast(
R,
self._stream_cls(
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
),
)
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
if stream_cls is None:
raise MissingStreamClassError()
return cast(
R,
stream_cls(
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
),
)
if cast_to is NoneType:
return cast(R, None)
response = self.http_response
if cast_to == str:
return cast(R, response.text)
if cast_to == int:
return cast(R, int(response.text))
if cast_to == float:
return cast(R, float(response.text))
if cast_to == bool:
return cast(R, response.text.lower() == "true")
if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent):
return cast(R, cast_to(response)) # type: ignore
if origin == LegacyAPIResponse:
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
if inspect.isclass(
origin # pyright: ignore[reportUnknownArgumentType]
) and issubclass(origin, httpx.Response):
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
# and pass that class to our request functions. We cannot change the variance to be either
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
# the response class ourselves but that is something that should be supported directly in httpx
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
if cast_to != httpx.Response:
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
return cast(R, response)
if (
inspect.isclass(
origin # pyright: ignore[reportUnknownArgumentType]
)
and not issubclass(origin, BaseModel)
and issubclass(origin, pydantic.BaseModel)
):
raise TypeError("Pydantic models must subclass our base model type, e.g. `from anthropic import BaseModel`")
if (
cast_to is not object
and not origin is list
and not origin is dict
and not origin is Union
and not issubclass(origin, BaseModel)
):
raise RuntimeError(
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
)
# split is required to handle cases where additional information is included
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type", "*").split(";")
if not content_type.endswith("json"):
if is_basemodel(cast_to):
try:
data = response.json()
except Exception as exc:
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
else:
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
if self._client._strict_response_validation:
raise APIResponseValidationError(
response=response,
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
body=response.text,
)
# If the API responds with content that isn't JSON then we just return
# the (decoded) text without performing any parsing so that you can still
# handle the response however you need to.
return response.text # type: ignore
data = response.json()
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
@override
def __repr__(self) -> str:
return f"<APIResponse [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
| LegacyAPIResponse |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 43658,
"end": 44095
} | class ____(BuiltinFunctionT):
_id = "blobhash"
_inputs = [("index", UINT256_T)]
_return_type = BYTES32_T
mutability = StateMutability.VIEW
@process_inputs
def build_IR(self, expr, args, kwargs, contact):
if not version_check(begin="cancun"):
raise EvmVersionException("`blobhash` is not available pre-cancun", expr)
return IRnode.from_list(["blobhash", args[0]], typ=BYTES32_T)
| BlobHash |
python | ray-project__ray | rllib/models/torch/mingpt.py | {
"start": 715,
"end": 1013
} | class ____:
# block size must be provided
block_size: int
# transformer config
n_layer: int = 12
n_head: int = 12
n_embed: int = 768
# dropout config
embed_pdrop: float = 0.1
resid_pdrop: float = 0.1
attn_pdrop: float = 0.1
@Deprecated(error=False)
| GPTConfig |
python | django-extensions__django-extensions | django_extensions/collision_resolvers.py | {
"start": 4399,
"end": 5282
} | class ____(PathBasedCR, metaclass=ABCMeta):
"""
Abstract collision resolver which transform pair (app name, model_name) to alias by changing dots to underscores.
You must define MODIFICATION_STRING which should be string to format with two keyword arguments:
app_name and model_name. For example: "{app_name}_{model_name}".
Model from last application in alphabetical order is selected.
""" # noqa: E501
MODIFICATION_STRING = None # type: Optional[str]
def transform_import(self, module_path):
assert self.MODIFICATION_STRING is not None, (
"You must define MODIFICATION_STRING in your resolver class!"
)
app_name, model_name = self.get_app_name_and_model(module_path)
app_name = app_name.replace(".", "_")
return self.MODIFICATION_STRING.format(app_name=app_name, model_name=model_name)
| AppNameCR |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 234463,
"end": 235241
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.logical_xor(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype="bool")
@keras_export(["keras.ops.logical_xor", "keras.ops.numpy.logical_xor"])
def logical_xor(x1, x2):
"""Compute the truth value of `x1 XOR x2`, element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x1, x2)):
return LogicalXor().symbolic_call(x1, x2)
return backend.numpy.logical_xor(x1, x2)
| LogicalXor |
python | ipython__ipython | IPython/core/macro.py | {
"start": 541,
"end": 1726
} | class ____:
"""Simple class to store the value of macros as strings.
Macro is just a callable that executes a string of IPython
input when called.
"""
def __init__(self,code):
"""store the macro value, as a single string which can be executed"""
lines = []
enc = None
for line in code.splitlines():
coding_match = coding_declaration.match(line)
if coding_match:
enc = coding_match.group(1)
else:
lines.append(line)
code = "\n".join(lines)
if isinstance(code, bytes):
code = code.decode(enc or DEFAULT_ENCODING)
self.value = code + '\n'
def __str__(self):
return self.value
def __repr__(self):
return 'IPython.macro.Macro(%s)' % repr(self.value)
def __getstate__(self):
""" needed for safe pickling via %store """
return {'value': self.value}
def __add__(self, other):
if isinstance(other, Macro):
return Macro(self.value + other.value)
elif isinstance(other, str):
return Macro(self.value + other)
raise TypeError
| Macro |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor32.py | {
"start": 546,
"end": 650
} | class ____(metaclass=BMeta): ...
def func1(cls: type[B]):
# This should generate an error.
cls()
| B |
python | google__jax | jax/experimental/mosaic/gpu/utils.py | {
"start": 46053,
"end": 48428
} | class ____:
source_bounds: tuple[int, ...]
target_bounds: tuple[int, ...]
partition: tuple[int | None, ...]
base_offset: tuple[ir.Value, ...] | None
def __init__(
self,
elements: tuple[int, ...],
*,
partition: tuple[int | None, ...],
base_offset: tuple[ir.Value, ...] | None = None,
num_chunks: tuple[int, ...] | None = None,
chunk_size: tuple[int, ...] | None = None,
):
self.target_bounds = elements
self.partition = partition
self.base_offset = base_offset
if len(self.target_bounds) != len(self.partition):
raise ValueError
if num_chunks is None == chunk_size is None:
raise ValueError(
"Exactly one of num_chunks and chunk_size must be specified"
)
if num_chunks is not None:
self.source_bounds = num_chunks
else:
assert chunk_size is not None
if len(chunk_size) != len(self.target_bounds):
raise ValueError
source_bounds = []
for els, chunk in zip(elements, chunk_size):
if els % chunk:
raise ValueError("Non-divisible partition", elements, chunk_size)
source_bounds.append(els // chunk)
self.source_bounds = tuple(source_bounds)
seen_dims = set()
for p in self.partition:
if p is None:
continue
if not (0 <= p < len(self.source_bounds)):
raise ValueError
if p in seen_dims:
raise ValueError
seen_dims.add(p)
for tb, p in zip(self.target_bounds, self.partition):
if p is not None and tb % self.source_bounds[p]:
raise ValueError("Non-divisible partitioning")
@property
def num_chunks(self) -> tuple[int, ...]:
return self.source_bounds
@property
def target_block_shape(self):
return tuple(
tb if p is None else tb // self.source_bounds[p]
for tb, p in zip(self.target_bounds, self.partition)
)
def get_base(self, *source_coords: ir.Value | int) -> list[ir.Value]:
coords = []
index = ir.IndexType.get()
for i, (tbs, p) in enumerate(zip(self.target_block_shape, self.partition)):
if p is None:
dim_base = c(0, index)
else:
dim_base = arith.muli(c(tbs, index), source_coords[p])
if self.base_offset is not None:
dim_base = arith.addi(self.base_offset[i], dim_base)
coords.append(dim_base)
return coords
| Partition |
python | openai__openai-python | src/openai/pagination.py | {
"start": 1674,
"end": 2502
} | class ____(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
data: List[_T]
has_more: Optional[bool] = None
@override
def _get_page_items(self) -> List[_T]:
data = self.data
if not data:
return []
return data
@override
def has_next_page(self) -> bool:
has_more = self.has_more
if has_more is not None and has_more is False:
return False
return super().has_next_page()
@override
def next_page_info(self) -> Optional[PageInfo]:
data = self.data
if not data:
return None
item = cast(Any, data[-1])
if not isinstance(item, CursorPageItem) or item.id is None:
# TODO emit warning log
return None
return PageInfo(params={"after": item.id})
| SyncCursorPage |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1520255,
"end": 1521553
} | class ____(Transform):
"""
LoessTransform schema wrapper.
Parameters
----------
loess : str, :class:`FieldName`
The data field of the dependent variable to smooth.
on : str, :class:`FieldName`
The data field of the independent variable to use a predictor.
bandwidth : float
A bandwidth parameter in the range ``[0, 1]`` that determines the amount of
smoothing.
**Default value:** ``0.3``
groupby : Sequence[str, :class:`FieldName`]
The data fields to group by. If not specified, a single group containing all data
objects will be used.
as : Sequence[str, :class:`FieldName`]
The output field names for the smoothed points generated by the loess transform.
**Default value:** The field names of the input x and y values.
"""
_schema = {"$ref": "#/definitions/LoessTransform"}
def __init__(
self,
loess: Optional[str | SchemaBase] = Undefined,
on: Optional[str | SchemaBase] = Undefined,
bandwidth: Optional[float] = Undefined,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
**kwds,
):
super().__init__(
loess=loess, on=on, bandwidth=bandwidth, groupby=groupby, **kwds
)
| LoessTransform |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 16240,
"end": 35813
} | class ____(testing.TestCase):
def run_segment_reduce_test(
self,
segment_reduce_op,
element_wise_reduce_method,
num_indices,
indices_high,
data_dims=tuple(),
num_segments=None,
add_neg1_to_indices=False,
sorted_indices=False,
):
if num_segments is not None and indices_high >= num_segments:
raise ValueError("Indices high cannot be more than num segments")
indices_dims = (num_indices,)
full_data_dims = indices_dims + data_dims
data = np.random.rand(*full_data_dims).astype(np.float32)
segment_ids = np.concatenate(
[
np.arange(indices_high),
np.random.randint(
low=0,
high=indices_high,
size=(indices_dims[0] - indices_high),
),
]
).astype(np.int32)
if sorted_indices:
segment_ids = np.sort(segment_ids, axis=-1)
if add_neg1_to_indices:
segment_ids[0] = -1
outputs = segment_reduce_op(
data, segment_ids, num_segments, sorted=sorted_indices
)
if num_segments is None:
num_segments = np.max(segment_ids).item() + 1
expected_shape = (num_segments,) + data_dims
if segment_reduce_op == kmath.segment_max:
if backend.backend() == "tensorflow":
empty_fill_value = -np.finfo(np.float32).max
else:
empty_fill_value = -np.inf
expected = np.full(expected_shape, empty_fill_value)
else:
expected = np.zeros(expected_shape)
for idx in range(num_indices):
segment_id = segment_ids[idx]
if segment_id == -1:
continue
expected[segment_id] = element_wise_reduce_method(
expected[segment_id], data[idx]
)
self.assertAllClose(outputs, expected)
@parameterized.product(
(
dict(
segment_reduce_op=kmath.segment_sum,
element_wise_reduce_method=_sum_reduce,
),
dict(
segment_reduce_op=kmath.segment_max,
element_wise_reduce_method=_max_reduce,
),
),
sorted_indices=(True, False),
)
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_reduce(
self,
segment_reduce_op,
element_wise_reduce_method,
sorted_indices,
):
# Test 1D case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
sorted_indices=sorted_indices,
)
# Test ND data case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
data_dims=(
3,
3,
),
sorted_indices=sorted_indices,
)
@parameterized.product(
(
dict(
segment_reduce_op=kmath.segment_sum,
element_wise_reduce_method=_sum_reduce,
),
dict(
segment_reduce_op=kmath.segment_max,
element_wise_reduce_method=_max_reduce,
),
),
(
dict(
contains_neg1_in_indices=True,
sorted_indices=False,
),
dict(
contains_neg1_in_indices=False,
sorted_indices=False,
),
dict(
contains_neg1_in_indices=False,
sorted_indices=True,
),
),
)
def test_segment_reduce_explicit_num_segments(
self,
segment_reduce_op,
element_wise_reduce_method,
contains_neg1_in_indices,
sorted_indices,
):
if backend.backend() == "tensorflow" and sorted_indices:
pytest.skip(
"Num segments and sorted_indices=True doesn't work for "
"tensorflow."
)
# Test 1D case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
num_segments=4,
add_neg1_to_indices=contains_neg1_in_indices,
sorted_indices=sorted_indices,
)
# Test ND data case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
data_dims=(
3,
3,
),
num_segments=4,
add_neg1_to_indices=contains_neg1_in_indices,
sorted_indices=sorted_indices,
)
def test_top_k(self):
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2)
self.assertAllClose(values, [4, 3])
self.assertAllClose(indices, [1, 4])
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2, sorted=False)
# Any order ok when `sorted=False`.
self.assertEqual(set(backend.convert_to_numpy(values)), set([4, 3]))
self.assertEqual(set(backend.convert_to_numpy(indices)), set([1, 4]))
x = np.random.rand(5, 5)
outputs = kmath.top_k(x, k=2)
expected_values = np.zeros((5, 2))
expected_indices = np.zeros((5, 2), dtype=np.int32)
for i in range(x.shape[0]):
top_k_indices = np.argsort(x[i])[-2:][::-1]
expected_values[i] = x[i, top_k_indices]
expected_indices[i] = top_k_indices
self.assertAllClose(outputs[0], expected_values)
self.assertAllClose(outputs[1], expected_indices)
def test_in_top_k(self):
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.05, 0.95, 0, 1],
[0.1, 0.8, 0.3, 1],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
# Test tie cases.
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.95, 0.95, 0, 0.95],
[0.1, 0.8, 0.8, 0.95],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, True, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, True, True]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
# Test `nan` in predictions
# https://github.com/keras-team/keras/issues/19995
targets = np.array([1, 0])
predictions = np.array([[0.1, np.nan, 0.5], [0.3, 0.2, 0.5]])
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [False, True]
)
def test_logsumexp(self):
x = np.random.rand(5, 5)
outputs = kmath.logsumexp(x)
expected = np.log(np.sum(np.exp(x)))
self.assertAllClose(outputs, expected)
outputs = kmath.logsumexp(x, axis=1)
expected = np.log(np.sum(np.exp(x), axis=1))
self.assertAllClose(outputs, expected)
def test_extract_sequences(self):
# Test 1D case.
x = np.random.random((10,))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[i] = x[pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected)
# Test N-D case.
x = np.random.random((4, 8))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(4, num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[:, i] = x[:, pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected)
def test_fft(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
def test_fft2(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
def test_ifft2(self):
real = np.random.random((2, 4, 3)).astype(np.float32)
imag = np.random.random((2, 4, 3)).astype(np.float32)
complex_arr = real + 1j * imag
real_output, imag_output = kmath.ifft2((real, imag))
ref = np.fft.ifft2(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
@parameterized.parameters([(None,), (3,), (15,)])
def test_rfft(self, n):
# Test 1D.
x = np.random.random((10,))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 10))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters([(None,), (3,), (15,)])
def test_irfft(self, n):
# Test 1D.
real = np.random.random((10,))
imag = np.random.random((10,))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
real = np.random.random((2, 3, 10))
imag = np.random.random((2, 3, 10))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(32, 8, 32, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_stft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# Test 1D case.
x = np.random.random((32,))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 32))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(8, 4, 8, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_istft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# sequence_stride must <= x[0].shape[-1]
# sequence_stride must >= fft_length / num_sequences
# Test 1D case.
x = np.random.random((256,))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% before assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
# Nans are handled differently in different backends, so zero them out.
output = np.nan_to_num(backend.convert_to_numpy(output), nan=0.0)
ref = np.nan_to_num(ref, nan=0.0)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 256))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% before assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
# Nans are handled differently in different backends, so zero them out.
output = np.nan_to_num(backend.convert_to_numpy(output), nan=0.0)
ref = np.nan_to_num(ref, nan=0.0)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
def test_rsqrt(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float32")
self.assertAllClose(kmath.rsqrt(x), 1 / np.sqrt(x))
self.assertAllClose(kmath.Rsqrt()(x), 1 / np.sqrt(x))
def test_erf_operation_basic(self):
# Sample values for testing
sample_values = np.array([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0])
# Expected output using numpy's approximation of the error function
expected_output = scipy.special.erf(sample_values)
# Output from the erf operation in keras_core
output_from_erf_op = kmath.erf(sample_values)
# Assert that the outputs are close
self.assertAllClose(expected_output, output_from_erf_op, atol=1e-4)
def test_erf_operation_dtype(self):
# Test for float32 and float64 data types
for dtype in ("float32", "float64"):
sample_values = np.array(
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], dtype=dtype
)
expected_output = scipy.special.erf(sample_values)
output_from_erf_op = kmath.erf(sample_values)
self.assertAllClose(expected_output, output_from_erf_op, atol=1e-4)
def test_erf_operation_edge_cases(self):
# Test for edge cases
edge_values = np.array([1e5, -1e5, 1e-5, -1e-5], dtype=np.float64)
expected_output = scipy.special.erf(edge_values)
output_from_edge_erf_op = kmath.erf(edge_values)
self.assertAllClose(expected_output, output_from_edge_erf_op, atol=1e-4)
def test_erfinv_operation_basic(self):
# Sample values for testing
sample_values = np.array([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0])
# Expected output using numpy's approximation of the error function
expected_output = scipy.special.erfinv(sample_values)
# Output from the erf operation in keras_core
output_from_erfinv_op = kmath.erfinv(sample_values)
# Assert that the outputs are close
self.assertAllClose(expected_output, output_from_erfinv_op, atol=1e-4)
def test_erfinv_operation_dtype(self):
# Test for float32 and float64 data types
for dtype in ("float32", "float64"):
sample_values = np.array(
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], dtype=dtype
)
expected_output = scipy.special.erfinv(sample_values)
output_from_erfinv_op = kmath.erfinv(sample_values)
self.assertAllClose(
expected_output, output_from_erfinv_op, atol=1e-4
)
def test_erfinv_operation_edge_cases(self):
# Test for edge cases
edge_values = np.array([1e5, -1e5, 1e-5, -1e-5], dtype=np.float64)
expected_output = scipy.special.erfinv(edge_values)
output_from_edge_erfinv_op = kmath.erfinv(edge_values)
self.assertAllClose(
expected_output, output_from_edge_erfinv_op, atol=1e-4
)
def test_logdet(self):
x = np.array(
[
[4.42, -1.18, 0.06, 0.74],
[-1.18, 1.77, -0.84, -1.16],
[0.06, -0.84, 5.84, 0.55],
[0.74, -1.16, 0.55, 0.77],
],
dtype="float32",
)
out = kmath.logdet(x)
self.assertAllClose(out, -1.1178946, atol=1e-3)
| MathOpsCorrectnessTest |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 21735,
"end": 21891
} | class ____(_TypeVariable):
"""Parameter of a type (typing.TypeVar)."""
_INSTANCE_CLASS: type[TypeParameterInstance] = TypeParameterInstance
| TypeParameter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-tiktok-marketing/unit_tests/integration/test_reports_hourly.py | {
"start": 8114,
"end": 16238
} | class ____(TestCase):
stream_name = "ad_groups_reports_hourly"
advertiser_id = "872746382648"
cursor = "2024-01-01 10:00:00"
cursor_field = "stat_time_hour"
metrics = [
"campaign_name",
"campaign_id",
"adgroup_name",
"placement_type",
"tt_app_id",
"tt_app_name",
"mobile_app_id",
"promotion_type",
"dpa_target_audience_type",
"conversion",
"cost_per_conversion",
"conversion_rate",
"real_time_conversion",
"real_time_cost_per_conversion",
"real_time_conversion_rate",
"result",
"cost_per_result",
"result_rate",
"real_time_result",
"real_time_cost_per_result",
"real_time_result_rate",
"secondary_goal_result",
"cost_per_secondary_goal_result",
"secondary_goal_result_rate",
"spend",
"cpc",
"cpm",
"impressions",
"clicks",
"ctr",
"reach",
"cost_per_1000_reached",
"frequency",
"video_play_actions",
"video_watched_2s",
"video_watched_6s",
"average_video_play",
"average_video_play_per_user",
"video_views_p25",
"video_views_p50",
"video_views_p75",
"video_views_p100",
"profile_visits",
"likes",
"comments",
"shares",
"follows",
"clicks_on_music_disc",
"real_time_app_install",
"real_time_app_install_cost",
"app_install",
]
def catalog(self, sync_mode: SyncMode = SyncMode.full_refresh):
return CatalogBuilder().with_stream(name=self.stream_name, sync_mode=sync_mode).build()
def config(self, include_deleted: bool = False):
config_to_build = ConfigBuilder().with_end_date("2024-01-02")
if include_deleted:
config_to_build = config_to_build.with_include_deleted()
return config_to_build.build()
def state(self):
return (
StateBuilder()
.with_stream_state(
stream_name=self.stream_name,
state={
"states": [
{"partition": {"advertiser_id": self.advertiser_id, "parent_slice": {}}, "cursor": {self.cursor_field: self.cursor}}
]
},
)
.build()
)
@HttpMocker()
def test_basic_read(self, http_mocker: HttpMocker):
mock_advertisers_slices(http_mocker, self.config())
query_params = {
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADGROUP",
"dimensions": '["adgroup_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["start_date"],
"end_date": self.config()["start_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
}
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params=query_params,
),
HttpResponse(body=json.dumps(find_template(self.stream_name, __file__)), status_code=200),
)
query_params["start_date"] = query_params["end_date"] = self.config()["end_date"]
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params=query_params,
),
HttpResponse(body=json.dumps(EMPTY_LIST_RESPONSE), status_code=200),
)
output = read(get_source(config=self.config(), state=None), self.config(), self.catalog())
assert len(output.records) == 2
assert output.records[0].record.data.get("adgroup_id") is not None
assert output.records[0].record.data.get("stat_time_hour") is not None
@HttpMocker()
def test_read_with_state(self, http_mocker: HttpMocker):
mock_advertisers_slices(http_mocker, self.config())
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params={
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADGROUP",
"dimensions": '["adgroup_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["start_date"],
"end_date": self.config()["start_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
},
),
HttpResponse(body=json.dumps(find_template(self.stream_name, __file__)), status_code=200),
)
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params={
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADGROUP",
"dimensions": '["adgroup_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["end_date"],
"end_date": self.config()["end_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
},
),
HttpResponse(body=json.dumps(EMPTY_LIST_RESPONSE), status_code=200),
)
output = read(
source=get_source(config=self.config(), state=self.state()),
config=self.config(),
catalog=self.catalog(sync_mode=SyncMode.incremental),
state=self.state(),
)
assert len(output.records) == 2
assert output.state_messages[1].state.stream.stream_state.states == [
{"cursor": {"stat_time_hour": self.cursor}, "partition": {"advertiser_id": self.advertiser_id, "parent_slice": {}}}
]
@HttpMocker()
def test_read_with_include_deleted(self, http_mocker: HttpMocker):
mock_advertisers_slices(http_mocker, self.config())
filtering = '[{"field_name": "adgroup_status", "filter_type": "IN", "filter_value": "[\\"STATUS_ALL\\"]"}]'
query_params = {
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADGROUP",
"dimensions": '["adgroup_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["start_date"],
"end_date": self.config()["start_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
"filtering": filtering,
}
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params=query_params,
),
HttpResponse(body=json.dumps(find_template(self.stream_name, __file__)), status_code=200),
)
query_params["start_date"] = query_params["end_date"] = self.config()["end_date"]
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params=query_params,
),
HttpResponse(body=json.dumps(EMPTY_LIST_RESPONSE), status_code=200),
)
output = read(
get_source(config=self.config(include_deleted=True), state=None),
self.config(include_deleted=True),
self.catalog(),
)
assert len(output.records) == 2
assert output.records[0].record.data.get("adgroup_id") is not None
assert output.records[0].record.data.get("stat_time_hour") is not None
| TestAdGroupsReportsHourly |
python | great-expectations__great_expectations | great_expectations/metrics/query/row_count.py | {
"start": 239,
"end": 350
} | class ____(QueryMetric[QueryRowCountResult]):
name = "query.row_count"
query: NonEmptyString
| QueryRowCount |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 154445,
"end": 155399
} | class ____(Request):
"""
Validates that the project exists and can be deleted
:param project: Project ID
:type project: str
"""
_service = "projects"
_action = "validate_delete"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"project": {"description": "Project ID", "type": "string"}},
"required": ["project"],
"type": "object",
}
def __init__(self, project: str, **kwargs: Any) -> None:
super(ValidateDeleteRequest, self).__init__(**kwargs)
self.project = project
@schema_property("project")
def project(self) -> str:
return self._property_project
@project.setter
def project(self, value: str) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
| ValidateDeleteRequest |
python | django__django | tests/sessions_tests/tests.py | {
"start": 37005,
"end": 38664
} | class ____(SessionTestsMixin, SimpleTestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
"sessions": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "session",
},
},
SESSION_CACHE_ALIAS="sessions",
)
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertIsNone(caches["default"].get(self.session.cache_key))
self.assertIsNotNone(caches["sessions"].get(self.session.cache_key))
def test_create_and_save(self):
self.session = self.backend()
self.session.create()
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
async def test_create_and_save_async(self):
self.session = self.backend()
await self.session.acreate()
await self.session.asave()
self.assertIsNotNone(caches["default"].get(await self.session.acache_key()))
| CacheSessionTests |
python | pypa__setuptools | setuptools/command/editable_wheel.py | {
"start": 15765,
"end": 16138
} | class ____(Protocol):
def __call__(
self, wheel: WheelFile, files: list[str], mapping: Mapping[str, str]
) -> object: ...
def __enter__(self) -> Self: ...
def __exit__(
self,
_exc_type: type[BaseException] | None,
_exc_value: BaseException | None,
_traceback: TracebackType | None,
) -> object: ...
| EditableStrategy |
python | walkccc__LeetCode | solutions/2907. Maximum Profitable Triplets With Increasing Prices I/2907.py | {
"start": 0,
"end": 446
} | class ____:
def __init__(self, n: int):
self.vals = [0] * (n + 1)
def maximize(self, i: int, val: int) -> None:
while i < len(self.vals):
self.vals[i] = max(self.vals[i], val)
i += FenwickTree.lowbit(i)
def get(self, i: int) -> int:
res = 0
while i > 0:
res = max(res, self.vals[i])
i -= FenwickTree.lowbit(i)
return res
@staticmethod
def lowbit(i: int) -> int:
return i & -i
| FenwickTree |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 18315,
"end": 18888
} | class ____(Screen):
"""A screen that binds keys, including movement keys."""
BINDINGS = AppKeyRecorder.make_bindings("screen_")
async def action_screen_record(self, key: str) -> None:
# Sneaky forward reference. Just for the purposes of testing.
await self.app.action_record(f"screenly_{key}")
def compose(self) -> ComposeResult:
yield FocusableWidgetWithEmptyBindingsNoInherit()
def on_mount(self) -> None:
self.query_one(FocusableWidgetWithEmptyBindingsNoInherit).focus()
| ScreenWithMovementBindingsNoInheritEmptyChild |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 62941,
"end": 67770
} | class ____(nn.Module):
def __init__(self, config: PatchTSTConfig, num_patches: int, distribution_output=None):
r"""
num_patches (`int`):
The number of patches in the input sequence.
distribution_output (`DistributionOutput`, *optional*):
The distribution output layer for probabilistic forecasting. If None, a linear output layer is used.
"""
super().__init__()
self.share_projection = config.share_projection
self.num_input_channels = config.num_input_channels
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
if self.pooling_type or self.use_cls_token:
head_dim = config.d_model
else:
head_dim = config.d_model * num_patches
if not self.share_projection:
# if each channel has its own head
self.projections = nn.ModuleList()
self.dropouts = nn.ModuleList()
self.flattens = nn.ModuleList()
for i in range(self.num_input_channels):
self.flattens.append(nn.Flatten(start_dim=2))
if distribution_output is None:
# use linear head
self.projections.append(nn.Linear(head_dim, config.prediction_length))
else:
# use distribution head
self.projections.append(distribution_output.get_parameter_projection(head_dim))
self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity())
else:
# all the channels share the same head
self.flatten = nn.Flatten(start_dim=2)
if distribution_output is None:
# use linear head
self.projection = nn.Linear(head_dim, config.prediction_length)
else:
# use distribution head
self.projection = distribution_output.get_parameter_projection(head_dim)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, forecast_len, num_channels)`
"""
if self.use_cls_token:
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding[:, :, 0, :]
else:
if self.pooling_type == "mean":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == "max":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.max(dim=2).values
else:
# pooled_embedding: [bs x num_channels x num_patches x d_model]
pooled_embedding = embedding
if not self.share_projection:
output = []
for i in range(self.num_input_channels):
# pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)]
pooled_embedding = self.flattens[i](pooled_embedding[:, i, :])
pooled_embedding = self.dropouts[i](pooled_embedding)
# pooled_embedding: [bs x forecast_len]
# or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head
pooled_embedding = self.projections[i](pooled_embedding)
output.append(pooled_embedding)
# output: [bs x num_channels x forecast_len]
output = torch.stack(output, dim=1)
else:
# pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)]
pooled_embedding = self.flatten(pooled_embedding)
pooled_embedding = self.dropout(pooled_embedding)
# output: [bs x num_channels x forecast_len] or
# tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head
output = self.projection(pooled_embedding)
if isinstance(output, tuple):
# output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels])
output = tuple(z.transpose(2, 1) for z in output)
else:
output = output.transpose(2, 1) # [bs x forecast_len x num_channels]
return output
@auto_docstring(
custom_intro="""
The PatchTST for prediction model.
"""
)
| PatchTSTPredictionHead |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 102560,
"end": 102857
} | class ____(sgqlc.types.Enum):
"""Properties by which GitHub Sponsors activity connections can be
ordered.
Enumeration Choices:
* `TIMESTAMP`: Order activities by when they happened.
"""
__schema__ = github_schema
__choices__ = ("TIMESTAMP",)
| SponsorsActivityOrderField |
python | django__django | django/template/defaulttags.py | {
"start": 11203,
"end": 11860
} | class ____(Node):
def __init__(self, count, method, common):
self.count = count
self.method = method
self.common = common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == "w":
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == "p":
paras = ["<p>%s</p>" % p for p in paras]
return "\n\n".join(paras)
GroupedResult = namedtuple("GroupedResult", ["grouper", "list"])
| LoremNode |
python | pypa__warehouse | warehouse/manage/views/oidc_publishers.py | {
"start": 1329,
"end": 29746
} | class ____:
def __init__(self, project, request):
self.request = request
self.project = project
self.metrics = self.request.find_service(IMetricsService, context=None)
self.github_publisher_form = GitHubPublisherForm(
self.request.POST,
api_token=self.request.registry.settings.get("github.token"),
)
_gl_issuers = GitLabPublisher.get_available_issuer_urls(
organization=project.organization
)
self.gitlab_publisher_form = GitLabPublisherForm(
self.request.POST,
issuer_url_choices=_gl_issuers,
)
self.google_publisher_form = GooglePublisherForm(self.request.POST)
self.activestate_publisher_form = ActiveStatePublisherForm(self.request.POST)
self.prefilled_provider = None
@property
def _ratelimiters(self):
return {
"user.oidc": self.request.find_service(
IRateLimiter, name="user_oidc.publisher.register"
),
"ip.oidc": self.request.find_service(
IRateLimiter, name="ip_oidc.publisher.register"
),
}
def _hit_ratelimits(self):
self._ratelimiters["user.oidc"].hit(self.request.user.id)
self._ratelimiters["ip.oidc"].hit(self.request.remote_addr)
def _check_ratelimits(self):
if not self._ratelimiters["user.oidc"].test(self.request.user.id):
raise TooManyOIDCRegistrations(
resets_in=self._ratelimiters["user.oidc"].resets_in(
self.request.user.id
)
)
if not self._ratelimiters["ip.oidc"].test(self.request.remote_addr):
raise TooManyOIDCRegistrations(
resets_in=self._ratelimiters["ip.oidc"].resets_in(
self.request.remote_addr
)
)
@property
def default_response(self):
return {
"project": self.project,
"github_publisher_form": self.github_publisher_form,
"gitlab_publisher_form": self.gitlab_publisher_form,
"google_publisher_form": self.google_publisher_form,
"activestate_publisher_form": self.activestate_publisher_form,
"disabled": {
"GitHub": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GITHUB_OIDC
),
"GitLab": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GITLAB_OIDC
),
"Google": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GOOGLE_OIDC
),
"ActiveState": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC
),
},
"prefilled_provider": self.prefilled_provider,
}
@view_config(request_method="GET")
def manage_project_oidc_publishers(self):
if self.request.flags.disallow_oidc():
self.request.session.flash(
self.request._(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
@view_config(request_method="GET", request_param="provider")
def manage_project_oidc_publishers_prefill(self):
provider_mapping = {
"github": self.github_publisher_form,
"gitlab": self.gitlab_publisher_form,
"google": self.google_publisher_form,
"activestate": self.activestate_publisher_form,
}
params = self.request.params
provider = params.get("provider")
provider = provider.lower() if provider else None
if provider in provider_mapping:
# The forms can be pre-filled by passing URL parameters. For example,
# https://(...)//publishing?provider=github&owner=octo&repository=repo
# will pre-fill the GitHub repository fields with `octo/repo`.
provider_mapping[provider].process(params)
self.prefilled_provider = provider
return self.manage_project_oidc_publishers()
@view_config(
request_method="POST",
request_param=ConstrainEnvironmentForm.__params__,
)
def constrain_environment(self):
if self.request.flags.disallow_oidc():
self.request.session.flash(
(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment("warehouse.oidc.constrain_publisher_environment.attempt")
form = ConstrainEnvironmentForm(self.request.POST)
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be constrained"),
queue="error",
)
return self.default_response
publisher = self.request.db.get(
OIDCPublisher, form.constrained_publisher_id.data
)
if publisher is None or publisher not in self.project.oidc_publishers:
self.request.session.flash(
"Invalid publisher for project",
queue="error",
)
return self.default_response
# First we add the new (constrained) trusted publisher
if isinstance(publisher, GitHubPublisher):
constrained_publisher = GitHubPublisher(
repository_name=publisher.repository_name,
repository_owner=publisher.repository_owner,
repository_owner_id=publisher.repository_owner_id,
workflow_filename=publisher.workflow_filename,
environment=form.constrained_environment_name.data,
)
elif isinstance(publisher, GitLabPublisher):
constrained_publisher = GitLabPublisher(
namespace=publisher.namespace,
project=publisher.project,
workflow_filepath=publisher.workflow_filepath,
environment=form.constrained_environment_name.data,
issuer_url=publisher.issuer_url,
)
else:
self.request.session.flash(
"Can only constrain the environment for GitHub and GitLab publishers",
queue="error",
)
return self.default_response
# The user might have already manually created the new constrained publisher
# before clicking the magic link to constrain the existing publisher.
if constrained_publisher.exists(self.request.db):
self.request.session.flash(
self.request._(
f"{publisher} is already registered with {self.project.name}"
),
queue="error",
)
return self.default_response
if publisher.environment != "":
self.request.session.flash(
"Can only constrain the environment for publishers without an "
"environment configured",
queue="error",
)
return self.default_response
self.request.db.add(constrained_publisher)
self.request.db.flush() # ensure constrained_publisher.id is available
self.project.oidc_publishers.append(constrained_publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherAdded,
request=self.request,
additional={
"publisher": constrained_publisher.publisher_name,
"id": str(constrained_publisher.id),
"specifier": str(constrained_publisher),
"url": constrained_publisher.publisher_url(),
"submitted_by": self.request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": True,
},
)
# Then, we remove the old trusted publisher from the project
# and, if there are no projects left associated with the publisher,
# we delete it entirely.
self.project.oidc_publishers.remove(publisher)
if len(publisher.projects) == 0:
self.request.db.delete(publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherRemoved,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
},
)
self.request.session.flash(
self.request._(
f"Trusted publisher for project {self.project.name!r} has been "
f"constrained to environment {constrained_publisher.environment!r}"
),
queue="success",
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
request_param=GitHubPublisherForm.__params__,
)
def add_github_oidc_publisher(self):
if self.request.flags.disallow_oidc(AdminFlagValue.DISALLOW_GITHUB_OIDC):
self.request.session.flash(
self.request._(
"GitHub-based trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment(
"warehouse.oidc.add_publisher.attempt", tags=["publisher:GitHub"]
)
try:
self._check_ratelimits()
except TooManyOIDCRegistrations as exc:
self.metrics.increment(
"warehouse.oidc.add_publisher.ratelimited", tags=["publisher:GitHub"]
)
return HTTPTooManyRequests(
self.request._(
"There have been too many attempted trusted publisher "
"registrations. Try again later."
),
retry_after=exc.resets_in.total_seconds(),
)
self._hit_ratelimits()
response = self.default_response
form = response["github_publisher_form"]
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be registered"),
queue="error",
)
return response
# GitHub OIDC publishers are unique on the tuple of
# (repository_name, repository_owner, workflow_filename, environment),
# so we check for an already registered one before creating.
publisher = (
self.request.db.query(GitHubPublisher)
.filter(
GitHubPublisher.repository_name == form.repository.data,
GitHubPublisher.repository_owner == form.normalized_owner,
GitHubPublisher.workflow_filename == form.workflow_filename.data,
GitHubPublisher.environment == form.normalized_environment,
)
.one_or_none()
)
if publisher is None:
publisher = GitHubPublisher(
repository_name=form.repository.data,
repository_owner=form.normalized_owner,
repository_owner_id=form.owner_id,
workflow_filename=form.workflow_filename.data,
environment=form.normalized_environment,
)
self.request.db.add(publisher)
# Each project has a unique set of OIDC publishers; the same
# publisher can't be registered to the project more than once.
if publisher in self.project.oidc_publishers:
self.request.session.flash(
self.request._(
f"{publisher} is already registered with {self.project.name}"
),
queue="error",
)
return response
for user in self.project.users:
send_trusted_publisher_added_email(
self.request,
user,
project_name=self.project.name,
publisher=publisher,
)
self.project.oidc_publishers.append(publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherAdded,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
self.request.session.flash(
f"Added {publisher} in {publisher.publisher_url()} to {self.project.name}",
queue="success",
)
self.metrics.increment(
"warehouse.oidc.add_publisher.ok", tags=["publisher:GitHub"]
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
request_param=GitLabPublisherForm.__params__,
)
def add_gitlab_oidc_publisher(self):
if self.request.flags.disallow_oidc(AdminFlagValue.DISALLOW_GITLAB_OIDC):
self.request.session.flash(
self.request._(
"GitLab-based trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment(
"warehouse.oidc.add_publisher.attempt", tags=["publisher:GitLab"]
)
try:
self._check_ratelimits()
except TooManyOIDCRegistrations as exc:
self.metrics.increment(
"warehouse.oidc.add_publisher.ratelimited", tags=["publisher:GitLab"]
)
return HTTPTooManyRequests(
self.request._(
"There have been too many attempted trusted publisher "
"registrations. Try again later."
),
retry_after=exc.resets_in.total_seconds(),
)
self._hit_ratelimits()
response = self.default_response
form = response["gitlab_publisher_form"]
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be registered"),
queue="error",
)
return response
# GitLab OIDC publishers are unique on the tuple of
# (namespace, project, workflow_filepath, environment),
# so we check for an already registered one before creating.
publisher = (
self.request.db.query(GitLabPublisher)
.filter(
GitLabPublisher.namespace == form.namespace.data,
GitLabPublisher.project == form.project.data,
GitLabPublisher.workflow_filepath == form.workflow_filepath.data,
GitLabPublisher.environment == form.normalized_environment,
GitLabPublisher.issuer_url == form.issuer_url.data,
)
.one_or_none()
)
if publisher is None:
publisher = GitLabPublisher(
namespace=form.namespace.data,
project=form.project.data,
workflow_filepath=form.workflow_filepath.data,
environment=form.normalized_environment,
issuer_url=form.issuer_url.data,
)
self.request.db.add(publisher)
# Each project has a unique set of OIDC publishers; the same
# publisher can't be registered to the project more than once.
if publisher in self.project.oidc_publishers:
self.request.session.flash(
self.request._(
f"{publisher} is already registered with {self.project.name}"
),
queue="error",
)
return response
for user in self.project.users:
send_trusted_publisher_added_email(
self.request,
user,
project_name=self.project.name,
publisher=publisher,
)
self.project.oidc_publishers.append(publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherAdded,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
self.request.session.flash(
f"Added {publisher} in {publisher.publisher_url()} to {self.project.name}",
queue="success",
)
self.metrics.increment(
"warehouse.oidc.add_publisher.ok", tags=["publisher:GitLab"]
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
request_param=GooglePublisherForm.__params__,
)
def add_google_oidc_publisher(self):
if self.request.flags.disallow_oidc(AdminFlagValue.DISALLOW_GOOGLE_OIDC):
self.request.session.flash(
self.request._(
"Google-based trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment(
"warehouse.oidc.add_publisher.attempt", tags=["publisher:Google"]
)
try:
self._check_ratelimits()
except TooManyOIDCRegistrations as exc:
self.metrics.increment(
"warehouse.oidc.add_publisher.ratelimited", tags=["publisher:Google"]
)
return HTTPTooManyRequests(
self.request._(
"There have been too many attempted trusted publisher "
"registrations. Try again later."
),
retry_after=exc.resets_in.total_seconds(),
)
self._hit_ratelimits()
response = self.default_response
form = response["google_publisher_form"]
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be registered"),
queue="error",
)
return response
# Google OIDC publishers are unique on the tuple of (email, sub), so we
# check for an already registered one before creating.
publisher = (
self.request.db.query(GooglePublisher)
.filter(
GooglePublisher.email == form.email.data,
GooglePublisher.sub == form.sub.data,
)
.one_or_none()
)
if publisher is None:
publisher = GooglePublisher(
email=form.email.data,
sub=form.sub.data,
)
self.request.db.add(publisher)
# Each project has a unique set of OIDC publishers; the same
# publisher can't be registered to the project more than once.
if publisher in self.project.oidc_publishers:
self.request.session.flash(
self.request._(
f"{publisher} is already registered with {self.project.name}"
),
queue="error",
)
return response
for user in self.project.users:
send_trusted_publisher_added_email(
self.request,
user,
project_name=self.project.name,
publisher=publisher,
)
self.project.oidc_publishers.append(publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherAdded,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
self.request.session.flash(
f"Added {publisher} "
+ (f"in {publisher.publisher_url()}" if publisher.publisher_url() else "")
+ f" to {self.project.name}",
queue="success",
)
self.metrics.increment(
"warehouse.oidc.add_publisher.ok", tags=["publisher:Google"]
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
request_param=ActiveStatePublisherForm.__params__,
)
def add_activestate_oidc_publisher(self):
if self.request.flags.disallow_oidc(AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC):
self.request.session.flash(
self.request._(
"ActiveState-based trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment(
"warehouse.oidc.add_publisher.attempt", tags=["publisher:ActiveState"]
)
try:
self._check_ratelimits()
except TooManyOIDCRegistrations as exc:
self.metrics.increment(
"warehouse.oidc.add_publisher.ratelimited",
tags=["publisher:ActiveState"],
)
return HTTPTooManyRequests(
self.request._(
"There have been too many attempted trusted publisher "
"registrations. Try again later."
),
retry_after=exc.resets_in.total_seconds(),
)
self._hit_ratelimits()
response = self.default_response
form = response["activestate_publisher_form"]
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be registered"),
queue="error",
)
return response
# Check for an already registered publisher before creating.
publisher = (
self.request.db.query(ActiveStatePublisher)
.filter(
ActiveStatePublisher.organization == form.organization.data,
ActiveStatePublisher.activestate_project_name == form.project.data,
ActiveStatePublisher.actor_id == form.actor_id,
)
.one_or_none()
)
if publisher is None:
publisher = ActiveStatePublisher(
organization=form.organization.data,
activestate_project_name=form.project.data,
actor=form.actor.data,
actor_id=form.actor_id,
)
self.request.db.add(publisher)
# Each project has a unique set of OIDC publishers; the same
# publisher can't be registered to the project more than once.
if publisher in self.project.oidc_publishers:
self.request.session.flash(
self.request._(
f"{publisher} is already registered with {self.project.name}"
),
queue="error",
)
return response
for user in self.project.users:
send_trusted_publisher_added_email(
self.request,
user,
project_name=self.project.name,
publisher=publisher,
)
self.project.oidc_publishers.append(publisher)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherAdded,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
self.request.session.flash(
f"Added {publisher} in {publisher.publisher_url()} to {self.project.name}",
queue="success",
)
self.metrics.increment(
"warehouse.oidc.add_publisher.ok", tags=["publisher:ActiveState"]
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
request_param=DeletePublisherForm.__params__,
)
def delete_oidc_publisher(self):
if self.request.flags.disallow_oidc():
self.request.session.flash(
(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
self.metrics.increment("warehouse.oidc.delete_publisher.attempt")
form = DeletePublisherForm(self.request.POST)
if form.validate():
publisher = self.request.db.get(OIDCPublisher, form.publisher_id.data)
# publisher will be `None` here if someone manually futzes with the form.
if publisher is None or publisher not in self.project.oidc_publishers:
self.request.session.flash(
"Invalid publisher for project",
queue="error",
)
return self.default_response
for user in self.project.users:
send_trusted_publisher_removed_email(
self.request,
user,
project_name=self.project.name,
publisher=publisher,
)
self.project.record_event(
tag=EventTag.Project.OIDCPublisherRemoved,
request=self.request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": self.request.user.username,
},
)
# We remove this publisher from the project's list of publishers
# and, if there are no projects left associated with the publisher,
# we delete it entirely.
self.project.oidc_publishers.remove(publisher)
if len(publisher.projects) == 0:
self.request.db.delete(publisher)
self.request.session.flash(
self.request._(
f"Removed trusted publisher for project {self.project.name!r}"
),
queue="success",
)
self.metrics.increment(
"warehouse.oidc.delete_publisher.ok",
tags=[f"publisher:{publisher.publisher_name}"],
)
return HTTPSeeOther(self.request.path)
return self.default_response
| ManageOIDCPublisherViews |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT000.py | {
"start": 111,
"end": 166
} | class ____(str, Enum): # Ok
__slots__ = ["foo"]
| Fine |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/_logging.py | {
"start": 180,
"end": 1801
} | class ____(JsonFormatter):
"""
Log formatter for kopf objects.
This formatter will filter unserializable fields from the log record,
which the `prefect` JSON formatter is unable to do.
"""
def __init__(
self,
*args: Any,
refkey: Optional[str] = None,
**kwargs: Any,
) -> None:
# Avoid type checking, as the args are not in the parent constructor.
reserved_attrs = kwargs.pop("reserved_attrs", RESERVED_ATTRS)
reserved_attrs = set(reserved_attrs)
reserved_attrs |= {"k8s_skip", "k8s_ref", "settings"}
kwargs |= dict(reserved_attrs=reserved_attrs)
kwargs.setdefault("timestamp", True)
super().__init__(*args, **kwargs)
self._refkey: str = refkey or DEFAULT_JSON_REFKEY
def add_fields(
self,
log_record: dict[str, object],
record: logging.LogRecord,
message_dict: dict[str, object],
) -> None:
super().add_fields(log_record, record, message_dict)
if self._refkey and hasattr(record, "k8s_ref"):
ref = getattr(record, "k8s_ref")
log_record[self._refkey] = ref
if "severity" not in log_record:
log_record["severity"] = (
"debug"
if record.levelno <= logging.DEBUG
else "info"
if record.levelno <= logging.INFO
else "warn"
if record.levelno <= logging.WARNING
else "error"
if record.levelno <= logging.ERROR
else "fatal"
)
| KopfObjectJsonFormatter |
python | getsentry__sentry | src/sentry/migrations/0941_create_temporary_verification_code_table.py | {
"start": 358,
"end": 2899
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0940_auditlog_json_field"),
]
operations = [
migrations.CreateModel(
name="UserMergeVerificationCode",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"token",
models.CharField(
default=sentry.users.models.user_merge_verification_code.generate_token,
max_length=64,
),
),
(
"expires_at",
models.DateTimeField(
default=sentry.users.models.user_merge_verification_code.generate_expires_at
),
),
(
"user",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
unique=True,
),
),
],
options={
"db_table": "sentry_user_verification_codes_temp",
},
),
]
| Migration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 123944,
"end": 124455
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of AbortQueuedMigrations"""
__schema__ = github_schema
__field_names__ = ("owner_id", "client_mutation_id")
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
"""The ID of the organization that is running the migrations."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| AbortQueuedMigrationsInput |
python | python-openxml__python-docx | src/docx/image/bmp.py | {
"start": 119,
"end": 1347
} | class ____(BaseImageHeader):
"""Image header parser for BMP images."""
@classmethod
def from_stream(cls, stream):
"""Return |Bmp| instance having header properties parsed from the BMP image in
`stream`."""
stream_rdr = StreamReader(stream, LITTLE_ENDIAN)
px_width = stream_rdr.read_long(0x12)
px_height = stream_rdr.read_long(0x16)
horz_px_per_meter = stream_rdr.read_long(0x26)
vert_px_per_meter = stream_rdr.read_long(0x2A)
horz_dpi = cls._dpi(horz_px_per_meter)
vert_dpi = cls._dpi(vert_px_per_meter)
return cls(px_width, px_height, horz_dpi, vert_dpi)
@property
def content_type(self):
"""MIME content type for this image, unconditionally `image/bmp` for BMP
images."""
return MIME_TYPE.BMP
@property
def default_ext(self):
"""Default filename extension, always 'bmp' for BMP images."""
return "bmp"
@staticmethod
def _dpi(px_per_meter):
"""Return the integer pixels per inch from `px_per_meter`, defaulting to 96 if
`px_per_meter` is zero."""
if px_per_meter == 0:
return 96
return int(round(px_per_meter * 0.0254))
| Bmp |
python | walkccc__LeetCode | solutions/3091. Apply Operations to Make Sum of Array Greater Than or Equal to k/3091-2.py | {
"start": 0,
"end": 564
} | class ____:
def minOperations(self, k: int) -> int:
# The required operations are
# 1. Increase `1` to `x`
# 2. Duplicate `x`, `y` times, to `sum` s.t. x * (1 + y) >= k.
# The number of operations used would be (x - 1) + y. Equivalently, the
# problem can be rephrased as finding min(x - 1 + y) s.t. x * (1 + y) >= k.
# Optimally, `x` should equal to `1 + y`, implying that x^2 >= k, and
# hence, x >= sqrt(k) and y = ceil(k / x) - 1.
x = math.isqrt(k)
y = (k - 1) // x + 1 - 1 # ceil(k / x) - 1
return x - 1 + y
| Solution |
python | django__django | tests/queries/tests.py | {
"start": 141877,
"end": 142807
} | class ____(SimpleTestCase):
def test_invalid_order_by(self):
msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name"
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by("*")
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values("creator__name").order_by("queries_author.name")
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
| QuerySetExceptionTests |
python | keras-team__keras | keras/src/layers/convolutional/depthwise_conv_test.py | {
"start": 5525,
"end": 10920
} | class ____(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv1d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv1D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv2d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv2D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `depth_multiplier` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `depth_multiplier`. "
"Expected a strictly positive value. Received "
"depth_multiplier=0.",
):
layers.DepthwiseConv1D(depth_multiplier=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument must be a tuple of 2 "
r"integers. Received kernel_size=\(1, 0\), including values "
r"\{0\} that do not satisfy `value > 0`",
):
layers.DepthwiseConv2D(depth_multiplier=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument must be a tuple of \d+ "
r"integers. Received strides=\(1, 0\), including values \{0\} "
r"that do not satisfy `value > 0`",
):
layers.DepthwiseConv2D(
depth_multiplier=2, kernel_size=(2, 2), strides=(1, 0)
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaisesRegex(
ValueError,
r"`strides > 1` not supported in conjunction with "
r"`dilation_rate > 1`. Received: strides=\(2, 2\) and "
r"dilation_rate=\(2, 1\)",
):
layers.DepthwiseConv2D(
depth_multiplier=2,
kernel_size=(2, 2),
strides=2,
dilation_rate=(2, 1),
)
| DepthwiseConvBasicTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 543956,
"end": 544435
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateSponsorship"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "sponsorship")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
sponsorship = sgqlc.types.Field("Sponsorship", graphql_name="sponsorship")
"""The sponsorship that was started."""
| CreateSponsorshipPayload |
python | tensorflow__tensorflow | tensorflow/python/distribute/test_util_test.py | {
"start": 2606,
"end": 3124
} | class ____(test.TestCase):
def testLogicalCPUs(self):
# TODO(b/273484131): Causing segmentation fault.
if (test.is_gpu_available() and sys.version_info.major == 3 and
sys.version_info.minor == 8):
self.skipTest('Causing segmentation fault in Python 3.8 / GPU')
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 3)
cpu_device = config.list_physical_devices('CPU')[0]
self.assertLen(config.get_logical_device_configuration(cpu_device), 3)
| LogicalDevicesTest |
python | pandas-dev__pandas | pandas/tests/frame/test_query_eval.py | {
"start": 47405,
"end": 62080
} | class ____:
@pytest.fixture
def df(self):
"""
Yields a dataframe with strings that may or may not need escaping
by backticks. The last two columns cannot be escaped by backticks
and should raise a ValueError.
"""
return DataFrame(
{
"A": [1, 2, 3],
"B B": [3, 2, 1],
"C C": [4, 5, 6],
"C C": [7, 4, 3],
"C_C": [8, 9, 10],
"D_D D": [11, 1, 101],
"E.E": [6, 3, 5],
"F-F": [8, 1, 10],
"1e1": [2, 4, 8],
"def": [10, 11, 2],
"A (x)": [4, 1, 3],
"B(x)": [1, 1, 5],
"B (x)": [2, 7, 4],
" &^ :!€$?(} > <++*'' ": [2, 5, 6],
"": [10, 11, 1],
" A": [4, 7, 9],
" ": [1, 2, 1],
"it's": [6, 3, 1],
"that's": [9, 1, 8],
"☺": [8, 7, 6],
"xy (z)": [1, 2, 3], # noqa: RUF001
"xy (z\\uff09": [4, 5, 6], # noqa: RUF001
"foo#bar": [2, 4, 5],
1: [5, 7, 9],
}
)
def test_single_backtick_variable_query(self, df):
res = df.query("1 < `B B`")
expect = df[1 < df["B B"]]
tm.assert_frame_equal(res, expect)
def test_two_backtick_variables_query(self, df):
res = df.query("1 < `B B` and 4 < `C C`")
expect = df[(1 < df["B B"]) & (4 < df["C C"])]
tm.assert_frame_equal(res, expect)
def test_single_backtick_variable_expr(self, df):
res = df.eval("A + `B B`")
expect = df["A"] + df["B B"]
tm.assert_series_equal(res, expect)
def test_two_backtick_variables_expr(self, df):
res = df.eval("`B B` + `C C`")
expect = df["B B"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_already_underscore_variable(self, df):
res = df.eval("`C_C` + A")
expect = df["C_C"] + df["A"]
tm.assert_series_equal(res, expect)
def test_same_name_but_underscores(self, df):
res = df.eval("C_C + `C C`")
expect = df["C_C"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_mixed_underscores_and_spaces(self, df):
res = df.eval("A + `D_D D`")
expect = df["A"] + df["D_D D"]
tm.assert_series_equal(res, expect)
def test_backtick_quote_name_with_no_spaces(self, df):
res = df.eval("A + `C_C`")
expect = df["A"] + df["C_C"]
tm.assert_series_equal(res, expect)
def test_special_characters(self, df):
res = df.eval("`E.E` + `F-F` - A")
expect = df["E.E"] + df["F-F"] - df["A"]
tm.assert_series_equal(res, expect)
def test_start_with_digit(self, df):
res = df.eval("A + `1e1`")
expect = df["A"] + df["1e1"]
tm.assert_series_equal(res, expect)
def test_keyword(self, df):
res = df.eval("A + `def`")
expect = df["A"] + df["def"]
tm.assert_series_equal(res, expect)
def test_unneeded_quoting(self, df):
res = df.query("`A` > 2")
expect = df[df["A"] > 2]
tm.assert_frame_equal(res, expect)
def test_parenthesis(self, df):
res = df.query("`A (x)` > 2")
expect = df[df["A (x)"] > 2]
tm.assert_frame_equal(res, expect)
def test_empty_string(self, df):
res = df.query("`` > 5")
expect = df[df[""] > 5]
tm.assert_frame_equal(res, expect)
def test_multiple_spaces(self, df):
res = df.query("`C C` > 5")
expect = df[df["C C"] > 5]
tm.assert_frame_equal(res, expect)
def test_start_with_spaces(self, df):
res = df.eval("` A` + ` `")
expect = df[" A"] + df[" "]
tm.assert_series_equal(res, expect)
def test_ints(self, df):
res = df.query("`1` == 7")
expect = df[df[1] == 7]
tm.assert_frame_equal(res, expect)
def test_lots_of_operators_string(self, df):
res = df.query("` &^ :!€$?(} > <++*'' ` > 4")
expect = df[df[" &^ :!€$?(} > <++*'' "] > 4]
tm.assert_frame_equal(res, expect)
def test_missing_attribute(self, df):
message = "module 'pandas' has no attribute 'thing'"
with pytest.raises(AttributeError, match=message):
df.eval("@pd.thing")
def test_quote(self, df):
res = df.query("`it's` > `that's`")
expect = df[df["it's"] > df["that's"]]
tm.assert_frame_equal(res, expect)
def test_character_outside_range_smiley(self, df):
res = df.query("`☺` > 4")
expect = df[df["☺"] > 4]
tm.assert_frame_equal(res, expect)
def test_character_outside_range_2_byte_parens(self, df):
# GH 49633
res = df.query("`xy (z)` == 2") # noqa: RUF001
expect = df[df["xy (z)"] == 2] # noqa: RUF001
tm.assert_frame_equal(res, expect)
def test_character_outside_range_and_actual_backslash(self, df):
# GH 49633
res = df.query("`xy (z\\uff09` == 2") # noqa: RUF001
expect = df[df["xy \uff08z\\uff09"] == 2]
tm.assert_frame_equal(res, expect)
def test_hashtag(self, df):
res = df.query("`foo#bar` > 4")
expect = df[df["foo#bar"] > 4]
tm.assert_frame_equal(res, expect)
def test_expr_with_column_name_with_hashtag_character(self):
# GH 59285
df = DataFrame((1, 2, 3), columns=["a#"])
result = df.query("`a#` < 2")
expected = df[df["a#"] < 2]
tm.assert_frame_equal(result, expected)
def test_expr_with_comment(self):
# GH 59285
df = DataFrame((1, 2, 3), columns=["a#"])
result = df.query("`a#` < 2 # This is a comment")
expected = df[df["a#"] < 2]
tm.assert_frame_equal(result, expected)
def test_expr_with_column_name_with_backtick_and_hash(self):
# GH 59285
df = DataFrame((1, 2, 3), columns=["a`#b"])
result = df.query("`a``#b` < 2")
expected = df[df["a`#b"] < 2]
tm.assert_frame_equal(result, expected)
def test_expr_with_column_name_with_backtick(self):
# GH 59285
df = DataFrame({"a`b": (1, 2, 3), "ab": (4, 5, 6)})
result = df.query("`a``b` < 2")
# Note: Formatting checks may wrongly consider the above ``inline code``.
expected = df[df["a`b"] < 2]
tm.assert_frame_equal(result, expected)
def test_expr_with_string_with_backticks(self):
# GH 59285
df = DataFrame(("`", "`````", "``````````"), columns=["#backticks"])
result = df.query("'```' < `#backticks`")
expected = df["```" < df["#backticks"]]
tm.assert_frame_equal(result, expected)
def test_expr_with_string_with_backticked_substring_same_as_column_name(self):
# GH 59285
df = DataFrame(("`", "`````", "``````````"), columns=["#backticks"])
result = df.query("'`#backticks`' < `#backticks`")
expected = df["`#backticks`" < df["#backticks"]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"col1,col2,expr",
[
("it's", "that's", "`it's` < `that's`"),
('it"s', 'that"s', '`it"s` < `that"s`'),
("it's", 'that\'s "nice"', "`it's` < `that's \"nice\"`"),
("it's", "that's #cool", "`it's` < `that's #cool` # This is a comment"),
],
)
def test_expr_with_column_names_with_special_characters(self, col1, col2, expr):
# GH 59285
df = DataFrame(
[
{col1: 1, col2: 2},
{col1: 3, col2: 4},
{col1: -1, col2: -2},
{col1: -3, col2: -4},
]
)
result = df.query(expr)
expected = df[df[col1] < df[col2]]
tm.assert_frame_equal(result, expected)
def test_expr_with_no_backticks(self):
# GH 59285
df = DataFrame(("aaa", "vvv", "zzz"), columns=["column_name"])
result = df.query("'value' < column_name")
expected = df["value" < df["column_name"]]
tm.assert_frame_equal(result, expected)
def test_expr_with_no_quotes_and_backtick_is_unmatched(self):
# GH 59285
df = DataFrame((1, 5, 10), columns=["column-name"])
with pytest.raises((SyntaxError, TokenError), match="invalid syntax"):
df.query("5 < `column-name")
def test_expr_with_no_quotes_and_backtick_is_matched(self):
# GH 59285
df = DataFrame((1, 5, 10), columns=["column-name"])
result = df.query("5 < `column-name`")
expected = df[5 < df["column-name"]]
tm.assert_frame_equal(result, expected)
def test_expr_with_backtick_opened_before_quote_and_backtick_is_unmatched(self):
# GH 59285
df = DataFrame((1, 5, 10), columns=["It's"])
with pytest.raises(
(SyntaxError, TokenError), match="unterminated string literal"
):
df.query("5 < `It's")
def test_expr_with_backtick_opened_before_quote_and_backtick_is_matched(self):
# GH 59285
df = DataFrame((1, 5, 10), columns=["It's"])
result = df.query("5 < `It's`")
expected = df[5 < df["It's"]]
tm.assert_frame_equal(result, expected)
def test_expr_with_quote_opened_before_backtick_and_quote_is_unmatched(self):
# GH 59285
df = DataFrame(("aaa", "vvv", "zzz"), columns=["column-name"])
with pytest.raises(
(SyntaxError, TokenError), match="unterminated string literal"
):
df.query("`column-name` < 'It`s that\\'s \"quote\" #hash")
def test_expr_with_quote_opened_before_backtick_and_quote_is_matched_at_end(self):
# GH 59285
df = DataFrame(("aaa", "vvv", "zzz"), columns=["column-name"])
result = df.query("`column-name` < 'It`s that\\'s \"quote\" #hash'")
expected = df[df["column-name"] < 'It`s that\'s "quote" #hash']
tm.assert_frame_equal(result, expected)
def test_expr_with_quote_opened_before_backtick_and_quote_is_matched_in_mid(self):
# GH 59285
df = DataFrame(("aaa", "vvv", "zzz"), columns=["column-name"])
result = df.query("'It`s that\\'s \"quote\" #hash' < `column-name`")
expected = df['It`s that\'s "quote" #hash' < df["column-name"]]
tm.assert_frame_equal(result, expected)
def test_call_non_named_expression(self, df):
"""
Only attributes and variables ('named functions') can be called.
.__call__() is not an allowed attribute because that would allow
calling anything.
https://github.com/pandas-dev/pandas/pull/32460
"""
def func(*_):
return 1
funcs = [func] # noqa: F841
df.eval("@func()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0]()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0].__call__()")
def test_ea_dtypes(self, any_numeric_ea_and_arrow_dtype):
# GH#29618
df = DataFrame(
[[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype
)
warning = RuntimeWarning if NUMEXPR_INSTALLED else None
with tm.assert_produces_warning(warning):
result = df.eval("c = b - a")
expected = DataFrame(
[[1, 2, 1], [3, 4, 1]],
columns=["a", "b", "c"],
dtype=any_numeric_ea_and_arrow_dtype,
)
tm.assert_frame_equal(result, expected)
def test_ea_dtypes_and_scalar(self):
# GH#29618
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"], dtype="Float64")
warning = RuntimeWarning if NUMEXPR_INSTALLED else None
with tm.assert_produces_warning(warning):
result = df.eval("c = b - 1")
expected = DataFrame(
[[1, 2, 1], [3, 4, 3]], columns=["a", "b", "c"], dtype="Float64"
)
tm.assert_frame_equal(result, expected)
def test_ea_dtypes_and_scalar_operation(self, any_numeric_ea_and_arrow_dtype):
# GH#29618
df = DataFrame(
[[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype
)
result = df.eval("c = 2 - 1")
expected = DataFrame(
{
"a": Series([1, 3], dtype=any_numeric_ea_and_arrow_dtype),
"b": Series([2, 4], dtype=any_numeric_ea_and_arrow_dtype),
"c": Series([1, 1], dtype=result["c"].dtype),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])
def test_query_ea_dtypes(self, dtype):
if dtype == "int64[pyarrow]":
pytest.importorskip("pyarrow")
# GH#50261
df = DataFrame({"a": [1, 2]}, dtype=dtype)
ref = {2} # noqa: F841
warning = RuntimeWarning if dtype == "Int64" and NUMEXPR_INSTALLED else None
with tm.assert_produces_warning(warning):
result = df.query("a in @ref")
expected = DataFrame({"a": [2]}, index=range(1, 2), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("engine", ["python", "numexpr"])
@pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])
def test_query_ea_equality_comparison(self, dtype, engine):
# GH#50261
warning = RuntimeWarning if engine == "numexpr" else None
if engine == "numexpr" and not NUMEXPR_INSTALLED:
pytest.skip("numexpr not installed")
if dtype == "int64[pyarrow]":
pytest.importorskip("pyarrow")
df = DataFrame(
{"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)}
)
with tm.assert_produces_warning(warning):
result = df.query("A == B", engine=engine)
expected = DataFrame(
{
"A": Series([1, 2], dtype="Int64", index=range(0, 4, 2)),
"B": Series([1, 2], dtype=dtype, index=range(0, 4, 2)),
}
)
tm.assert_frame_equal(result, expected)
def test_all_nat_in_object(self):
# GH#57068
now = pd.Timestamp.now("UTC") # noqa: F841
df = DataFrame({"a": pd.to_datetime([None, None], utc=True)}, dtype=object)
result = df.query("a > @now")
expected = DataFrame({"a": []}, dtype=object)
tm.assert_frame_equal(result, expected)
| TestDataFrameQueryBacktickQuoting |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 56805,
"end": 97827
} | class ____:
async def create_concurrency_limit(self, session, tag, limit):
cl_create = actions.ConcurrencyLimitCreate(
tag=tag,
concurrency_limit=limit,
).model_dump(mode="json")
cl_model = schemas.core.ConcurrencyLimit(**cl_create)
await concurrency_limits.create_concurrency_limit(
session=session, concurrency_limit=cl_model
)
async def delete_concurrency_limit(self, session, tag):
await concurrency_limits.delete_concurrency_limit_by_tag(session, tag)
async def count_concurrency_slots(self, session, tag):
return len(
(
await concurrency_limits.read_concurrency_limit_by_tag(session, tag)
).active_slots
)
async def read_concurrency_slots(self, session, tag):
return (
await concurrency_limits.read_concurrency_limit_by_tag(session, tag)
).active_slots
async def test_basic_concurrency_limiting(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "some tag", 1)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# before any runs, no active concurrency slots are in use
assert (await self.count_concurrency_slots(session, "some tag")) == 0
task1_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["some tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
# a first task run against a concurrency limited tag will be accepted
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
task2_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["some tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
# the first task hasn't completed, so the concurrently running second task is
# told to wait
assert task2_running_ctx.response_status == SetStateStatus.WAIT
# the number of slots occupied by active runs is equal to the concurrency limit
assert (await self.count_concurrency_slots(session, "some tag")) == 1
task1_completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task1_running_ctx.run,
run_tags=["some tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_completed_ctx = await stack.enter_async_context(
rule(task1_completed_ctx, *completed_transition)
)
await task1_completed_ctx.validate_proposed_state()
# the first task run will transition into a completed state, yielding a
# concurrency slot
assert task1_completed_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "some tag")) == 0
# the second task tries to run again, this time the transition will be accepted
# now that a concurrency slot has been freed
task2_run_retry_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=task2_running_ctx.run,
run_tags=["some tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_run_retry_ctx = await stack.enter_async_context(
rule(task2_run_retry_ctx, *running_transition)
)
await task2_run_retry_ctx.validate_proposed_state()
assert task2_run_retry_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "some tag")) == 1
async def test_concurrency_limit_cancelling_transition(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "some tag", 1)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
cancelling_transition = (states.StateType.RUNNING, states.StateType.CANCELLING)
cancelled_transition = (states.StateType.CANCELLING, states.StateType.CANCELLED)
# before any runs, no active concurrency slots are in use
assert (await self.count_concurrency_slots(session, "some tag")) == 0
task1_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["some tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
# a first task run against a concurrency limited tag will be accepted
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
task2_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["some tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
# the first task hasn't completed, so the concurrently running second task is
# told to wait
assert task2_running_ctx.response_status == SetStateStatus.WAIT
# the number of slots occupied by active runs is equal to the concurrency limit
assert (await self.count_concurrency_slots(session, "some tag")) == 1
task1_cancelling_ctx = await initialize_orchestration(
session,
"task",
*cancelling_transition,
run_override=task1_running_ctx.run,
run_tags=["some tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_cancelling_ctx = await stack.enter_async_context(
rule(task1_cancelling_ctx, *cancelling_transition)
)
await task1_cancelling_ctx.validate_proposed_state()
# the first task run will transition into a cancelling state, but
# maintain a hold on the concurrency slot
assert task1_cancelling_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "some tag")) == 1
task1_cancelled_ctx = await initialize_orchestration(
session,
"task",
*cancelled_transition,
run_override=task1_running_ctx.run,
run_tags=["some tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_cancelled_ctx = await stack.enter_async_context(
rule(task1_cancelled_ctx, *cancelled_transition)
)
await task1_cancelled_ctx.validate_proposed_state()
# the first task run will transition into a cancelled state, yielding a
# concurrency slot
assert task1_cancelled_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "some tag")) == 0
# the second task tries to run again, this time the transition will be accepted
# now that a concurrency slot has been freed
task2_run_retry_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=task2_running_ctx.run,
run_tags=["some tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_run_retry_ctx = await stack.enter_async_context(
rule(task2_run_retry_ctx, *running_transition)
)
await task2_run_retry_ctx.validate_proposed_state()
assert task2_run_retry_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "some tag")) == 1
async def test_concurrency_limiting_aborts_transitions_with_zero_limit(
self,
session,
run_type,
initialize_orchestration,
):
# concurrency limits of 0 will deadlock without a short-circuit
await self.create_concurrency_limit(session, "the worst limit", 0)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["the worst limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
# instead of a WAIT response, Prefect should direct the client to ABORT
assert ctx.response_status == SetStateStatus.ABORT
async def test_returning_concurrency_slots_on_fizzle(
self,
session,
run_type,
initialize_orchestration,
):
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {initial_state.type, proposed_state.type}
)
)
await self.reject_transition(
mutated_state, reason="gotta fizzle some rules, for fun"
)
async def after_transition(self, initial_state, validated_state, context):
pass
async def cleanup(self, initial_state, validated_state, context):
pass
await self.create_concurrency_limit(session, "a nice little limit", 1)
concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
StateMutatingRule,
]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["a nice little limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.REJECT
assert (await self.count_concurrency_slots(session, "a nice little limit")) == 0
async def test_one_run_wont_consume_multiple_slots(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "a generous limit", 10)
concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["a generous limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "a generous limit")) == 1
duplicate_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=ctx.run,
run_tags=["a generous limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
duplicate_ctx = await stack.enter_async_context(
rule(duplicate_ctx, *running_transition)
)
await duplicate_ctx.validate_proposed_state()
# we might want to protect against a identical transitions from the same run
# from being accepted, but this orchestration rule is the wrong place to do it
assert duplicate_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "a generous limit")) == 1
async def test_task_restart_does_not_consume_multiple_slots(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "a generous limit", 10)
concurrency_policy = [
SecureTaskConcurrencySlots,
]
# we should have no consumed slots yet
assert (await self.count_concurrency_slots(session, "a generous limit")) == 0
start_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *start_transition, run_tags=["a generous limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *start_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# PENDING -> RUNNING should consume a slot
assert (await self.count_concurrency_slots(session, "a generous limit")) == 1
running_transition = (states.StateType.RUNNING, states.StateType.RUNNING)
restart_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=ctx.run,
run_tags=["a generous limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
restart_ctx = await stack.enter_async_context(
rule(restart_ctx, *running_transition)
)
await restart_ctx.validate_proposed_state()
# RUNNING -> RUNNING should not consume another slot
assert restart_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "a generous limit")) == 1
async def test_concurrency_race_condition_new_tags_arent_double_counted(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "primary tag", 2)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
task1_running_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["primary tag", "secondary tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
await self.create_concurrency_limit(session, "secondary tag", 1)
# the concurrency limit on "secondary tag" was created after the first task
# started running, so no runs against the second limit were counted
assert (await self.count_concurrency_slots(session, "primary tag")) == 1
assert (await self.count_concurrency_slots(session, "secondary tag")) == 0
task2_running_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["primary tag", "secondary tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
assert task2_running_ctx.response_status == SetStateStatus.ACCEPT
# both concurrency limits have an active slot consumed by the second task
assert (await self.count_concurrency_slots(session, "primary tag")) == 2
assert (await self.count_concurrency_slots(session, "secondary tag")) == 1
task1_completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task1_running_ctx.run,
run_tags=["primary tag", "secondary tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_completed_ctx = await stack.enter_async_context(
rule(task1_completed_ctx, *completed_transition)
)
await task1_completed_ctx.validate_proposed_state()
# the first task completes, but despite having tags associated with both
# concurrency limits, it only releases a concurrency slot from the first tag as
# the task entered a running state before the second limit was created
assert task1_completed_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "primary tag")) == 1
assert (await self.count_concurrency_slots(session, "secondary tag")) == 1
task2_completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task2_running_ctx.run,
run_tags=["primary tag", "secondary tag"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_completed_ctx = await stack.enter_async_context(
rule(task2_completed_ctx, *completed_transition)
)
await task2_completed_ctx.validate_proposed_state()
# after the second task completes, all concurrency slots are released
assert task2_completed_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "primary tag")) == 0
assert (await self.count_concurrency_slots(session, "secondary tag")) == 0
async def test_concurrency_race_condition_deleted_tags_dont_impact_execution(
self, session, run_type, initialize_orchestration
):
await self.create_concurrency_limit(session, "big limit", 2)
await self.create_concurrency_limit(session, "small limit", 1)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
task1_running_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["big limit", "small limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "small limit")) == 1
task2_running_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["big limit", "small limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
# the small limit was hit, preventing the transition
assert task2_running_ctx.response_status == SetStateStatus.WAIT
# removing the small limit should allow runs again
await self.delete_concurrency_limit(session, "small limit")
task3_running_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["big limit", "small limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task3_running_ctx = await stack.enter_async_context(
rule(task3_running_ctx, *running_transition)
)
await task3_running_ctx.validate_proposed_state()
assert task3_running_ctx.response_status == SetStateStatus.ACCEPT
async def test_concurrency_race_condition_limit_increases_dont_impact_execution(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "changing limit", 1)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
task1_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["changing limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
await self.create_concurrency_limit(session, "changing limit", 2)
task2_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["changing limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
assert task2_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "changing limit")) == 2
async def test_concurrency_race_condition_limit_decreases_impact_new_runs(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "shrinking limit", 2)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
task1_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shrinking limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
# lowering the limit to 1 will prevent any more runs from being submitted
await self.create_concurrency_limit(session, "shrinking limit", 1)
task2_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shrinking limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task2_running_ctx = await stack.enter_async_context(
rule(task2_running_ctx, *running_transition)
)
await task2_running_ctx.validate_proposed_state()
assert task2_running_ctx.response_status == SetStateStatus.WAIT
assert (await self.count_concurrency_slots(session, "shrinking limit")) == 1
async def test_concurrency_race_condition_limit_decreases_dont_impact_existing_runs(
self,
session,
run_type,
initialize_orchestration,
):
await self.create_concurrency_limit(session, "shrinking limit", 2)
concurrency_policy = [SecureTaskConcurrencySlots, ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
task1_running_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shrinking limit"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(task1_running_ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
# even if the limit is lowered to 0, the existing run can complete
await self.create_concurrency_limit(session, "shrinking limit", 0)
assert (await self.count_concurrency_slots(session, "shrinking limit")) == 1
task1_completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task1_running_ctx.run,
run_tags=["shrinking limit"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
task1_completed_ctx = await stack.enter_async_context(
rule(task1_completed_ctx, *completed_transition)
)
await task1_completed_ctx.validate_proposed_state()
# the concurrency slot is released as expected
assert task1_completed_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "shrinking limit")) == 0
async def test_returning_concurrency_slots_when_transitioning_out_of_running_even_on_fizzle(
self,
session,
run_type,
initialize_orchestration,
):
"""Make sure that we return the concurrency slot when even on a fizzle as long as we transition
out of running, with ReleaseTaskConcurrencySlots listed first in priority.
"""
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {
initial_state.type,
proposed_state.type,
states.StateType.RUNNING,
states.StateType.CANCELLING,
}
)
)
await self.reject_transition(
mutated_state, reason="gotta fizzle some rules, for fun"
)
async def after_transition(self, initial_state, validated_state, context):
pass
async def cleanup(self, initial_state, validated_state, context):
pass
accept_concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
reject_concurrency_policy = [
ReleaseTaskConcurrencySlots,
SecureTaskConcurrencySlots,
StateMutatingRule,
]
await self.create_concurrency_limit(session, "small", 1)
# Fill the concurrency slot by transitioning into a running state
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["small"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in accept_concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "small")) == 1
# Make sure that the concurrency slot is released even though the transition was
# rejected, because the task was still moved out of a RUNNING state
pending_transition = (states.StateType.RUNNING, states.StateType.PENDING)
task1_pending_ctx = await initialize_orchestration(
session,
"task",
*pending_transition,
run_override=task1_running_ctx.run,
run_tags=["small"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in reject_concurrency_policy:
task1_pending_ctx = await stack.enter_async_context(
rule(task1_pending_ctx, *pending_transition)
)
await task1_pending_ctx.validate_proposed_state()
assert task1_pending_ctx.response_status == SetStateStatus.REJECT
assert task1_pending_ctx.validated_state.type != states.StateType.RUNNING
assert (await self.count_concurrency_slots(session, "small")) == 0
async def test_returning_concurrency_slots_when_transitioning_out_of_running_even_on_invalidation(
self,
session,
run_type,
initialize_orchestration,
):
"""Make sure that we return the concurrency slot when even on a fizzle as long as we transition
out of running, with ReleaseTaskConcurrencySlots listed last in priority.
"""
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {
initial_state.type,
proposed_state.type,
states.StateType.CANCELLING,
}
)
)
await self.reject_transition(
mutated_state, reason="gotta fizzle some rules, for fun"
)
async def after_transition(self, initial_state, validated_state, context):
pass
async def cleanup(self, initial_state, validated_state, context):
pass
accept_concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
reject_concurrency_policy = [
StateMutatingRule,
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
await self.create_concurrency_limit(session, "small", 1)
# Fill the concurrency slot by transitioning into a running state
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["small"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in accept_concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "small")) == 1
# Make sure that the concurrency slot is released even though the transition was
# rejected, because the task was still moved out of a RUNNING state
pending_transition = (states.StateType.RUNNING, states.StateType.PENDING)
task1_pending_ctx = await initialize_orchestration(
session,
"task",
*pending_transition,
run_override=task1_running_ctx.run,
run_tags=["small"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in reject_concurrency_policy:
task1_pending_ctx = await stack.enter_async_context(
rule(task1_pending_ctx, *pending_transition)
)
await task1_pending_ctx.validate_proposed_state()
assert task1_pending_ctx.response_status == SetStateStatus.REJECT
assert task1_pending_ctx.validated_state.type != states.StateType.RUNNING
assert (await self.count_concurrency_slots(session, "small")) == 0
async def test_releasing_concurrency_slots_does_not_happen_if_nullified_with_release_first(
self,
session,
run_type,
initialize_orchestration,
):
"""Make sure that concurrency slots are not released if the transition is nullified,
with ReleaseTaskConcurrencySlots listed first in priority
"""
class NullifiedTransition(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
await self.abort_transition(reason="For testing purposes")
async def after_transition(self, initial_state, validated_state, context):
pass
async def cleanup(self, initial_state, validated_state, context):
pass
accept_concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
abort_concurrency_policy = [
ReleaseTaskConcurrencySlots,
SecureTaskConcurrencySlots,
NullifiedTransition,
]
await self.create_concurrency_limit(session, "small", 1)
# Fill the concurrency slot by transitioning into a running state
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["small"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in accept_concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "small")) == 1
# Make sure that the concurrency slot is not released because the transition
# was aborted
pending_transition = (states.StateType.RUNNING, states.StateType.PENDING)
task1_pending_ctx = await initialize_orchestration(
session,
"task",
*pending_transition,
run_override=task1_running_ctx.run,
run_tags=["small"],
)
async with contextlib.AsyncExitStack() as stack: # Here
for rule in abort_concurrency_policy:
task1_pending_ctx = await stack.enter_async_context(
rule(task1_pending_ctx, *pending_transition)
)
await task1_pending_ctx.validate_proposed_state()
assert task1_pending_ctx.response_status == SetStateStatus.ABORT
assert (await self.count_concurrency_slots(session, "small")) == 1
async def test_releasing_concurrency_slots_does_not_happen_if_nullified_with_release_last(
self,
session,
run_type,
initialize_orchestration,
):
"""Make sure that concurrency slots are not released if the transition is nullified,
with ReleaseTaskConcurrencySlots listed last in priority
"""
class NullifiedTransition(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
await self.abort_transition(reason="For testing purposes")
async def after_transition(self, initial_state, validated_state, context):
pass
async def cleanup(self, initial_state, validated_state, context):
pass
accept_concurrency_policy = [
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
abort_concurrency_policy = [
NullifiedTransition,
SecureTaskConcurrencySlots,
ReleaseTaskConcurrencySlots,
]
await self.create_concurrency_limit(session, "small", 1)
# Fill the concurrency slot by transitioning into a running state
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["small"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in accept_concurrency_policy:
task1_running_ctx = await stack.enter_async_context(
rule(ctx, *running_transition)
)
await task1_running_ctx.validate_proposed_state()
assert task1_running_ctx.response_status == SetStateStatus.ACCEPT
assert (await self.count_concurrency_slots(session, "small")) == 1
# Make sure that the concurrency slot is not released because the transition
# was aborted
pending_transition = (states.StateType.RUNNING, states.StateType.PENDING)
task1_pending_ctx = await initialize_orchestration(
session,
"task",
*pending_transition,
run_override=task1_running_ctx.run,
run_tags=["small"],
)
async with contextlib.AsyncExitStack() as stack: # Here
for rule in abort_concurrency_policy:
task1_pending_ctx = await stack.enter_async_context(
rule(task1_pending_ctx, *pending_transition)
)
await task1_pending_ctx.validate_proposed_state()
assert task1_pending_ctx.response_status == SetStateStatus.ABORT
assert (await self.count_concurrency_slots(session, "small")) == 1
| TestTaskConcurrencyLimits |
python | numba__numba | numba/tests/test_serialize.py | {
"start": 8053,
"end": 9652
} | class ____(TestCase):
"""This test case includes issues specific to the cloudpickle implementation.
"""
_numba_parallel_test_ = False
def test_dynamic_class_reset_on_unpickle(self):
# a dynamic class
class Klass:
classvar = None
def mutator():
Klass.classvar = 100
def check():
self.assertEqual(Klass.classvar, 100)
saved = dumps(Klass)
mutator()
check()
loads(saved)
# Without the patch, each `loads(saved)` will reset `Klass.classvar`
check()
loads(saved)
check()
@unittest.skipIf(__name__ == "__main__",
"Test cannot run as when module is __main__")
def test_main_class_reset_on_unpickle(self):
mp = get_context('spawn')
proc = mp.Process(target=check_main_class_reset_on_unpickle)
proc.start()
proc.join(timeout=60)
self.assertEqual(proc.exitcode, 0)
def test_dynamic_class_reset_on_unpickle_new_proc(self):
# a dynamic class
class Klass:
classvar = None
# serialize Klass in this process
saved = dumps(Klass)
# Check the reset problem in a new process
mp = get_context('spawn')
proc = mp.Process(target=check_unpickle_dyn_class_new_proc, args=(saved,))
proc.start()
proc.join(timeout=60)
self.assertEqual(proc.exitcode, 0)
def test_dynamic_class_issue_7356(self):
cfunc = numba.njit(issue_7356)
self.assertEqual(cfunc(), (100, 100))
| TestCloudPickleIssues |
python | pytest-dev__pytest | src/_pytest/doctest.py | {
"start": 4903,
"end": 7489
} | class ____(Exception):
def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None:
super().__init__()
self.failures = failures
def _init_runner_class() -> type[doctest.DocTestRunner]:
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""Runner to collect failures.
Note that the out variable in this case is a list instead of a
stdout-like object.
"""
def __init__(
self,
checker: doctest.OutputChecker | None = None,
verbose: bool | None = None,
optionflags: int = 0,
continue_on_failure: bool = True,
) -> None:
super().__init__(checker=checker, verbose=verbose, optionflags=optionflags)
self.continue_on_failure = continue_on_failure
def report_failure(
self,
out,
test: doctest.DocTest,
example: doctest.Example,
got: str,
) -> None:
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(
self,
out,
test: doctest.DocTest,
example: doctest.Example,
exc_info: tuple[type[BaseException], BaseException, types.TracebackType],
) -> None:
if isinstance(exc_info[1], OutcomeException):
raise exc_info[1]
if isinstance(exc_info[1], bdb.BdbQuit):
outcomes.exit("Quitting debugger")
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(
checker: doctest.OutputChecker | None = None,
verbose: bool | None = None,
optionflags: int = 0,
continue_on_failure: bool = True,
) -> doctest.DocTestRunner:
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
# Type ignored because the continue_on_failure argument is only defined on
# PytestDoctestRunner, which is lazily defined so can't be used as a type.
return RUNNER_CLASS( # type: ignore
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
| MultipleDoctestFailures |
python | walkccc__LeetCode | solutions/1120. Maximum Average Subtree/1120.py | {
"start": 114,
"end": 622
} | class ____:
def maximumAverageSubtree(self, root: TreeNode | None) -> float:
def maximumAverage(root: TreeNode | None) -> T:
if not root:
return T(0, 0, 0)
left = maximumAverage(root.left)
right = maximumAverage(root.right)
summ = root.val + left.summ + right.summ
count = 1 + left.count + right.count
maxAverage = max(summ / count, left.maxAverage, right.maxAverage)
return T(summ, count, maxAverage)
return maximumAverage(root).maxAverage
| Solution |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-mlx/llama_index/llms/mlx/tokenizer_utils.py | {
"start": 7162,
"end": 10065
} | class ____:
"""
A wrapper that combines an HF tokenizer and a detokenizer.
Accessing any attribute other than the ``detokenizer`` is forwarded to the
huggingface tokenizer.
"""
def __init__(self, tokenizer, detokenizer_class=NaiveStreamingDetokenizer) -> None:
self._tokenizer = tokenizer
self._detokenizer = detokenizer_class(tokenizer)
def __getattr__(self, attr) -> object:
if attr == "detokenizer":
return self._detokenizer
else:
return getattr(self._tokenizer, attr)
def _match(a, b):
if type(a) is not type(b):
return False
if isinstance(a, dict):
return len(a) == len(b) and all(k in b and _match(a[k], b[k]) for k in a)
if isinstance(a, list):
return len(a) == len(b) and all(_match(ai, bi) for ai, bi in zip(a, b))
return a == b
def _is_spm_decoder(decoder):
_target_description = {
"type": "Sequence",
"decoders": [
{"type": "Replace", "pattern": {"String": "▁"}, "content": " "},
{"type": "ByteFallback"},
{"type": "Fuse"},
{"type": "Strip", "content": " ", "start": 1, "stop": 0},
],
}
return _match(_target_description, decoder)
def _is_spm_decoder_no_space(decoder):
_target_description = {
"type": "Sequence",
"decoders": [
{"type": "Replace", "pattern": {"String": "▁"}, "content": " "},
{"type": "ByteFallback"},
{"type": "Fuse"},
],
}
return _match(_target_description, decoder)
def _is_bpe_decoder(decoder):
_target_description = {
"type": "ByteLevel",
"add_prefix_space": False,
"trim_offsets": False,
"use_regex": False,
}
return _match(_target_description, decoder)
def load_tokenizer(model_path, tokenizer_config_extra={}):
"""
Load a huggingface tokenizer and try to infer the type of streaming
detokenizer to use.
Note, to use a fast streaming tokenizer, pass a local file path rather than
a Hugging Face repo ID.
"""
detokenizer_class = NaiveStreamingDetokenizer
tokenizer_file = model_path / "tokenizer.json"
if tokenizer_file.exists():
tokenizer_content = json.load(tokenizer_file.open())
if "decoder" in tokenizer_content:
if _is_spm_decoder(tokenizer_content["decoder"]):
detokenizer_class = SPMStreamingDetokenizer
elif _is_spm_decoder_no_space(tokenizer_content["decoder"]):
detokenizer_class = partial(SPMStreamingDetokenizer, trim_space=False)
elif _is_bpe_decoder(tokenizer_content["decoder"]):
detokenizer_class = BPEStreamingDetokenizer
return TokenizerWrapper(
AutoTokenizer.from_pretrained(model_path, **tokenizer_config_extra),
detokenizer_class,
)
| TokenizerWrapper |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 262,
"end": 5880
} | class ____(Module):
"""General N-dimensional downsampling over a sliding window."""
init: int | float | Array
operation: Callable[[Array, Array], Array]
num_spatial_dims: int = field(static=True)
kernel_size: tuple[int, ...] = field(static=True)
stride: tuple[int, ...] = field(static=True)
padding: tuple[tuple[int, int], ...] = field(static=True)
use_ceil: bool = field(static=True)
def __init__(
self,
init: int | float | Array,
operation: Callable[[Array, Array], Array],
num_spatial_dims: int,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `init`: The initial value for the reduction.
- `operation`: The operation applied to the inputs of each window.
- `num_spatial_dims`: The number of spatial dimensions.
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
!!! info
In order for `Pool` to be differentiable, `operation(init, x) == x` needs to
be true for all finite `x`. For further details see
[https://www.tensorflow.org/xla/operation_semantics#reducewindow](https://www.tensorflow.org/xla/operation_semantics#reducewindow)
and [https://github.com/google/jax/issues/7718](https://github.com/google/jax/issues/7718).
""" # noqa: E501
self.operation = operation
self.init = init
self.num_spatial_dims = num_spatial_dims
self.use_ceil = use_ceil
if isinstance(kernel_size, int):
self.kernel_size = (kernel_size,) * num_spatial_dims
elif isinstance(kernel_size, Sequence):
self.kernel_size = tuple(kernel_size)
else:
raise ValueError(
"`kernel_size` must either be an int or tuple of length "
f"{num_spatial_dims} containing ints."
)
if isinstance(stride, int):
self.stride = (stride,) * num_spatial_dims
elif isinstance(stride, Sequence):
self.stride = tuple(stride)
else:
raise ValueError(
"`stride` must either be an int or tuple of length "
f"{num_spatial_dims} containing ints."
)
if isinstance(padding, int):
self.padding = tuple((padding, padding) for _ in range(num_spatial_dims))
elif isinstance(padding, Sequence) and len(padding) == num_spatial_dims:
if all_sequences(padding):
self.padding = tuple(padding) # pyright: ignore
else:
self.padding = tuple((p, p) for p in padding)
else:
raise ValueError(
"`padding` must either be an int or tuple of length "
f"{num_spatial_dims} containing ints or tuples of length 2."
)
def _update_padding_for_ceil(self, input_shape):
new_padding = []
for input_size, (left_padding, right_padding), kernel_size, stride in zip(
input_shape[1:], self.padding, self.kernel_size, self.stride
):
if (input_size + left_padding + right_padding - kernel_size) % stride == 0:
new_padding.append((left_padding, right_padding))
else:
new_padding.append((left_padding, right_padding + stride))
return tuple(new_padding)
def _check_is_padding_valid(self, padding):
for (left_padding, right_padding), kernel_size in zip(
padding, self.kernel_size
):
if max(left_padding, right_padding) > kernel_size:
raise RuntimeError(
"Paddings should be less than the size of the kernel. "
f"Padding {(left_padding, right_padding)} received for kernel size "
f"{kernel_size}."
)
@named_scope("eqx.nn.Pool")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape
`(channels, dim_1, ..., dim_N)`, where `N = num_spatial_dims`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim_1, ..., new_dim_N)`.
"""
assert len(x.shape) == self.num_spatial_dims + 1, (
f"Input should have {self.num_spatial_dims} spatial dimensions, "
f"but input has shape {x.shape}"
)
if self.use_ceil:
padding = self._update_padding_for_ceil(x.shape)
else:
padding = self.padding
self._check_is_padding_valid(padding)
x = jnp.moveaxis(x, 0, -1)
x = jnp.expand_dims(x, axis=0)
x = lax.reduce_window(
x,
self.init,
self.operation,
(1,) + self.kernel_size + (1,),
(1,) + self.stride + (1,),
((0, 0),) + padding + ((0, 0),),
)
x = jnp.squeeze(x, axis=0)
x = jnp.moveaxis(x, -1, 0)
return x
| Pool |
python | encode__django-rest-framework | tests/test_utils.py | {
"start": 993,
"end": 2134
} | class ____(ModelViewSet):
serializer_class = ModelSerializer
queryset = BasicModel.objects.all()
@action(detail=False)
def list_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True)
def detail_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, name='Custom Name')
def named_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, suffix='Custom Suffix')
def suffixed_action(self, request, *args, **kwargs):
raise NotImplementedError
router = SimpleRouter()
router.register(r'resources', ResourceViewSet)
urlpatterns = [
path('', Root.as_view()),
path('resource/', ResourceRoot.as_view()),
path('resource/customname', CustomNameResourceInstance.as_view()),
path('resource/<int:key>', ResourceInstance.as_view()),
path('resource/<int:key>/', NestedResourceRoot.as_view()),
path('resource/<int:key>/<str:other>', NestedResourceInstance.as_view()),
]
urlpatterns += router.urls
@override_settings(ROOT_URLCONF='tests.test_utils')
| ResourceViewSet |
python | doocs__leetcode | solution/1600-1699/1638.Count Substrings That Differ by One Character/Solution2.py | {
"start": 0,
"end": 642
} | class ____:
def countSubstrings(self, s: str, t: str) -> int:
ans = 0
m, n = len(s), len(t)
f = [[0] * (n + 1) for _ in range(m + 1)]
g = [[0] * (n + 1) for _ in range(m + 1)]
for i, a in enumerate(s, 1):
for j, b in enumerate(t, 1):
if a == b:
f[i][j] = f[i - 1][j - 1] + 1
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
if s[i] == t[j]:
g[i][j] = g[i + 1][j + 1] + 1
else:
ans += (f[i][j] + 1) * (g[i + 1][j + 1] + 1)
return ans
| Solution |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 3890,
"end": 4084
} | class ____(IntEnum):
LT = 0 # less than
LTE = 1 # less than or equal
EQ = 2 # equal
GT = 3 # greater than
GTE = 4 # greater than or equal
NE = 5 # not equal
| RangeType |
python | networkx__networkx | networkx/linalg/tests/test_modularity.py | {
"start": 103,
"end": 3056
} | class ____:
@classmethod
def setup_class(cls):
deg = [3, 2, 2, 1, 0]
cls.G = nx.havel_hakimi_graph(deg)
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond". (Used for test_directed_laplacian)
cls.DG = nx.DiGraph()
cls.DG.add_edges_from(
(
(1, 2),
(1, 3),
(3, 1),
(3, 2),
(3, 5),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
)
)
def test_modularity(self):
"Modularity matrix"
# fmt: off
B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.],
[0.25, -0.5, 0.5, -0.25, 0.],
[0.25, 0.5, -0.5, -0.25, 0.],
[0.625, -0.25, -0.25, -0.125, 0.],
[0., 0., 0., 0., 0.]])
# fmt: on
permutation = [4, 0, 1, 2, 3]
np.testing.assert_equal(nx.modularity_matrix(self.G), B)
np.testing.assert_equal(
nx.modularity_matrix(self.G, nodelist=permutation),
B[np.ix_(permutation, permutation)],
)
def test_modularity_weight(self):
"Modularity matrix with weights"
# fmt: off
B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.],
[0.25, -0.5, 0.5, -0.25, 0.],
[0.25, 0.5, -0.5, -0.25, 0.],
[0.625, -0.25, -0.25, -0.125, 0.],
[0., 0., 0., 0., 0.]])
# fmt: on
G_weighted = self.G.copy()
for n1, n2 in G_weighted.edges():
G_weighted.edges[n1, n2]["weight"] = 0.5
# The following test would fail in networkx 1.1
np.testing.assert_equal(nx.modularity_matrix(G_weighted), B)
# The following test that the modularity matrix get rescaled accordingly
np.testing.assert_equal(
nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B
)
def test_directed_modularity(self):
"Directed Modularity matrix"
# fmt: off
B = np.array([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4],
[0., 0., 0., 0., 0., 0.],
[0.7, 0.4, -0.3, -0.6, 0.4, -0.6],
[-0.2, -0.4, -0.2, -0.4, 0.6, 0.6],
[-0.2, -0.4, -0.2, 0.6, -0.4, 0.6],
[-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]])
# fmt: on
node_permutation = [5, 1, 2, 3, 4, 6]
idx_permutation = [4, 0, 1, 2, 3, 5]
mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG))
np.testing.assert_equal(mm, B)
np.testing.assert_equal(
nx.directed_modularity_matrix(self.DG, nodelist=node_permutation),
B[np.ix_(idx_permutation, idx_permutation)],
)
| TestModularity |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler_resource_changing.py | {
"start": 535,
"end": 679
} | class ____(TuneController):
def get_live_trials(self):
return [t for t in self._trials if t.status != "TERMINATED"]
| MockTuneController |
python | altair-viz__altair | tools/schemapi/codegen.py | {
"start": 990,
"end": 1191
} | class ____:
"""Object whose repr() is a string of code."""
def __init__(self, code: str):
self.code = code
def __repr__(self) -> str:
return self.code
@dataclass
| CodeSnippet |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_http.py | {
"start": 3412,
"end": 3483
} | class ____(GoEthereumNetModuleTest):
pass
| TestGoEthereumNetModuleTest |
python | python-pillow__Pillow | src/PIL/ImageOps.py | {
"start": 14231,
"end": 25567
} | class ____(Protocol):
"""
An object that supports the ``getmesh`` method, taking an image as an
argument, and returning a list of tuples. Each tuple contains two tuples,
the source box as a tuple of 4 integers, and a tuple of 8 integers for the
final quadrilateral, in order of top left, bottom left, bottom right, top
right.
"""
def getmesh(
self, image: Image.Image
) -> list[
tuple[tuple[int, int, int, int], tuple[int, int, int, int, int, int, int, int]]
]: ...
def deform(
image: Image.Image,
deformer: SupportsGetMesh,
resample: int = Image.Resampling.BILINEAR,
) -> Image.Image:
"""
Deform the image.
:param image: The image to deform.
:param deformer: A deformer object. Any object that implements a
``getmesh`` method can be used.
:param resample: An optional resampling filter. Same values possible as
in the PIL.Image.transform function.
:return: An image.
"""
return image.transform(
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
)
def equalize(image: Image.Image, mask: Image.Image | None = None) -> Image.Image:
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
:param image: The image to equalize.
:param mask: An optional mask. If given, only the pixels selected by
the mask are included in the analysis.
:return: An image.
"""
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = [_f for _f in h[b : b + 256] if _f]
if len(histo) <= 1:
lut.extend(list(range(256)))
else:
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
if not step:
lut.extend(list(range(256)))
else:
n = step // 2
for i in range(256):
lut.append(n // step)
n = n + h[i + b]
return _lut(image, lut)
def expand(
image: Image.Image,
border: int | tuple[int, ...] = 0,
fill: str | int | tuple[int, ...] = 0,
) -> Image.Image:
"""
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
"""
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, image.mode)
if image.palette:
mode = image.palette.mode
palette = ImagePalette.ImagePalette(mode, image.getpalette(mode))
if isinstance(color, tuple) and (len(color) == 3 or len(color) == 4):
color = palette.getcolor(color)
else:
palette = None
out = Image.new(image.mode, (width, height), color)
if palette:
out.putpalette(palette.palette, mode)
out.paste(image, (left, top))
return out
def fit(
image: Image.Image,
size: tuple[int, int],
method: int = Image.Resampling.BICUBIC,
bleed: float = 0.0,
centering: tuple[float, float] = (0.5, 0.5),
) -> Image.Image:
"""
Returns a resized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to resize and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# https://www.cazabon.com
centering_x, centering_y = centering
if not 0.0 <= centering_x <= 1.0:
centering_x = 0.5
if not 0.0 <= centering_y <= 1.0:
centering_y = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (
image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2,
)
# calculate the aspect ratio of the live_size
live_size_ratio = live_size[0] / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = size[0] / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio == output_ratio:
# live_size is already the needed ratio
crop_width = live_size[0]
crop_height = live_size[1]
elif live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering_x
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering_y
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
# resize the image and return it
return image.resize(size, method, box=crop)
def flip(image: Image.Image) -> Image.Image:
"""
Flip the image vertically (top to bottom).
:param image: The image to flip.
:return: An image.
"""
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
def grayscale(image: Image.Image) -> Image.Image:
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L")
def invert(image: Image.Image) -> Image.Image:
"""
Invert (negate) the image.
:param image: The image to invert.
:return: An image.
"""
lut = list(range(255, -1, -1))
return image.point(lut) if image.mode == "1" else _lut(image, lut)
def mirror(image: Image.Image) -> Image.Image:
"""
Flip image horizontally (left to right).
:param image: The image to mirror.
:return: An image.
"""
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
def posterize(image: Image.Image, bits: int) -> Image.Image:
"""
Reduce the number of bits for each color channel.
:param image: The image to posterize.
:param bits: The number of bits to keep for each channel (1-8).
:return: An image.
"""
mask = ~(2 ** (8 - bits) - 1)
lut = [i & mask for i in range(256)]
return _lut(image, lut)
def solarize(image: Image.Image, threshold: int = 128) -> Image.Image:
"""
Invert all pixel values above a threshold.
:param image: The image to solarize.
:param threshold: All pixels above this grayscale level are inverted.
:return: An image.
"""
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255 - i)
return _lut(image, lut)
@overload
def exif_transpose(image: Image.Image, *, in_place: Literal[True]) -> None: ...
@overload
def exif_transpose(
image: Image.Image, *, in_place: Literal[False] = False
) -> Image.Image: ...
def exif_transpose(image: Image.Image, *, in_place: bool = False) -> Image.Image | None:
"""
If an image has an EXIF Orientation tag, other than 1, transpose the image
accordingly, and remove the orientation data.
:param image: The image to transpose.
:param in_place: Boolean. Keyword-only argument.
If ``True``, the original image is modified in-place, and ``None`` is returned.
If ``False`` (default), a new :py:class:`~PIL.Image.Image` object is returned
with the transposition applied. If there is no transposition, a copy of the
image will be returned.
"""
image.load()
image_exif = image.getexif()
orientation = image_exif.get(ExifTags.Base.Orientation, 1)
method = {
2: Image.Transpose.FLIP_LEFT_RIGHT,
3: Image.Transpose.ROTATE_180,
4: Image.Transpose.FLIP_TOP_BOTTOM,
5: Image.Transpose.TRANSPOSE,
6: Image.Transpose.ROTATE_270,
7: Image.Transpose.TRANSVERSE,
8: Image.Transpose.ROTATE_90,
}.get(orientation)
if method is not None:
if in_place:
image.im = image.im.transpose(method)
image._size = image.im.size
else:
transposed_image = image.transpose(method)
exif_image = image if in_place else transposed_image
exif = exif_image.getexif()
if ExifTags.Base.Orientation in exif:
del exif[ExifTags.Base.Orientation]
if "exif" in exif_image.info:
exif_image.info["exif"] = exif.tobytes()
elif "Raw profile type exif" in exif_image.info:
exif_image.info["Raw profile type exif"] = exif.tobytes().hex()
for key in ("XML:com.adobe.xmp", "xmp"):
if key in exif_image.info:
for pattern in (
r'tiff:Orientation="([0-9])"',
r"<tiff:Orientation>([0-9])</tiff:Orientation>",
):
value = exif_image.info[key]
if isinstance(value, str):
value = re.sub(pattern, "", value)
elif isinstance(value, tuple):
value = tuple(
re.sub(pattern.encode(), b"", v) for v in value
)
else:
value = re.sub(pattern.encode(), b"", value)
exif_image.info[key] = value
if not in_place:
return transposed_image
elif not in_place:
return image.copy()
return None
| SupportsGetMesh |
python | google__jax | jax/_src/test_util.py | {
"start": 42030,
"end": 42985
} | class ____:
def __repr__(self):
return "<not present>"
@contextmanager
def assert_global_configs_unchanged():
starting_cache = compilation_cache._cache
starting_config = config.config.values.copy()
yield
ending_config = config.config.values
ending_cache = compilation_cache._cache
if starting_config != ending_config:
differing = {k: (starting_config.get(k, NotPresent()), ending_config.get(k, NotPresent()))
for k in (starting_config.keys() | ending_config.keys())
if (k not in starting_config or k not in ending_config
or starting_config[k] != ending_config[k])}
raise AssertionError(f"Test changed global config values. Differing values are: {differing}")
if starting_cache is not ending_cache:
raise AssertionError(
f"Test changed the compilation cache object: before test it was "
f"{starting_cache}, now it is {ending_cache}"
)
| NotPresent |
python | pytorch__pytorch | test/test_autograd.py | {
"start": 505424,
"end": 521541
} | class ____(TestCase):
def _run_py_multithread_fn(
self, fn, args=(), num_threads=10, kwargs=None, pass_idx=False
):
class PropagatingThread(threading.Thread):
"""Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
"""
def run(self):
self.exception = None
try:
self.ret = super().run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super().join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for idx in range(num_threads):
p = PropagatingThread(target=fn, args=((idx, *args) if pass_idx else args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multi_grad_all_hooks(self):
# Multihooks should behave independently per execution of backward
# Test that the hook fired the number of times we ran backward
# even if those executions occur concurrently on different threads
t1 = torch.rand(2, requires_grad=True)
t2 = torch.rand(2, requires_grad=True)
t3 = torch.rand(2, requires_grad=True)
t4 = torch.rand(2, requires_grad=True)
res = None
count = [0]
hook_lock = threading.Lock()
def hook(grads):
nonlocal res
with hook_lock:
count[0] += 1
grad_is_none = [g is not None for g in grads]
if res is None:
res = grad_is_none
else:
self.assertEqual(res, grad_is_none)
handle = torch.autograd.graph.register_multi_grad_hook((t1, t2, t3, t4), hook)
out = (t2 * t3).sum()
def backward_retain_graph(out, t2, t3):
out.backward(inputs=(t2, t3), retain_graph=True)
self._run_py_multithread_fn(backward_retain_graph, (out, t2, t3), num_threads=5)
self.assertEqual(count[0], 5)
self.assertEqual(res, [False, True, True, False])
# Leave one hook partially applied
res = None
count = [0]
err_count = [0]
bw_count = [0]
bw_count_lock = threading.Lock()
err_count_lock = threading.Lock()
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
with bw_count_lock:
bw_count[0] += 1
if bw_count[0] == 1:
raise RuntimeError("error message")
else:
return gO
out = (Func.apply(t2) * t3).sum()
def backward_retain_graph(out, t2, t3):
try:
out.backward(inputs=(t2, t3), retain_graph=True)
except RuntimeError:
with err_count_lock:
err_count[0] += 1
self._run_py_multithread_fn(backward_retain_graph, (out, t2, t3), num_threads=5)
self.assertEqual(count[0], 4)
self.assertEqual(err_count[0], 1)
self.assertEqual(res, [False, True, True, False])
handle.remove()
def test_multi_grad_any_hooks(self):
# Multihooks should behave independently per execution of backward
# Test that the hook fired the number of times we ran backward
# even if those executions occur concurrently on different threads
t1 = torch.rand(2, requires_grad=True)
t2 = torch.rand(2, requires_grad=True)
t3 = torch.rand(2, requires_grad=True)
t4 = torch.rand(2, requires_grad=True)
res = None
count = [0]
hook_lock = threading.Lock()
def hook(grad):
nonlocal res
with hook_lock:
count[0] += 1
if res is None:
res = "foo"
else:
self.assertEqual(res, "foo")
torch.autograd.graph.register_multi_grad_hook(
(t1, t2, t3, t4), hook, mode="any"
)
out = (t2 * t3).sum()
def backward_retain_graph(out, t2, t3):
out.backward(inputs=(t2, t3), retain_graph=True)
self._run_py_multithread_fn(backward_retain_graph, (out, t2, t3), num_threads=5)
self.assertEqual(count[0], 5)
self.assertEqual(res, "foo")
# Raise an error in one thread's backward
res = None
count = [0]
err_count = [0]
bw_count = [0]
bw_count_lock = threading.Lock()
err_count_lock = threading.Lock()
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
with bw_count_lock:
bw_count[0] += 1
if bw_count[0] == 1:
raise RuntimeError("error message")
else:
return gO
out = (Func.apply(t2) * t3).sum()
def backward_retain_graph(out, t2, t3):
try:
out.backward(inputs=(t2, t3), retain_graph=True)
except RuntimeError:
with err_count_lock:
err_count[0] += 1
self._run_py_multithread_fn(backward_retain_graph, (out, t2, t3), num_threads=5)
# Expect all 5 threads to increment count since the hook runs before
# the custom backward
self.assertEqual(count[0], 5)
self.assertEqual(err_count[0], 1)
self.assertEqual(res, "foo")
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without propagating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x**2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain**2
self._run_py_multithread_fn(
train_fn_no_retain_graph, (y_no_retain,), num_threads=5
)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x**2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain**2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(
x_retain.grad,
5 * (4 * x_retain**3 + 6 * (x_retain**2) + 4 * x_retain + 1),
)
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle**2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle**2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(
torch.randn(5, 5, requires_grad=True)
)
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
# https://github.com/pytorch/pytorch/issues/75852
def test_cat_stack_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
def fn2(x1, x2):
return torch.stack((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
torch.autograd.gradcheck(fn2, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn2, [inp_c, inp_r], check_forward_ad=True)
def test_set_multithreading_enabled_as_context_manager_and_function(self):
# Test as a context manager
with torch.autograd.set_multithreading_enabled(False):
self.assertFalse(torch.autograd.is_multithreading_enabled())
self.assertTrue(torch.autograd.is_multithreading_enabled())
with torch.autograd.set_multithreading_enabled(True):
self.assertTrue(torch.autograd.is_multithreading_enabled())
self.assertTrue(torch.autograd.is_multithreading_enabled())
with torch.autograd.set_multithreading_enabled(False):
torch.autograd.set_multithreading_enabled(True)
self.assertTrue(torch.autograd.is_multithreading_enabled())
self.assertTrue(torch.autograd.is_multithreading_enabled())
torch.autograd.set_multithreading_enabled(False)
self.assertFalse(torch.autograd.is_multithreading_enabled())
torch.autograd.set_multithreading_enabled(True)
self.assertTrue(torch.autograd.is_multithreading_enabled())
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_custom_function_propagates_errors_from_device_thread(self):
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
raise RuntimeError("blah")
return gO
t = torch.tensor([1.0, 2.0], requires_grad=True, device=torch.device("cuda"))
out = MyFunc.apply(t).sum()
with self.assertRaisesRegex(RuntimeError, "blah"):
out.backward()
| TestMultithreadAutograd |
python | google__jax | jax/_src/debugger/colab_lib.py | {
"start": 2504,
"end": 4298
} | class ____(DOMElement):
"""An immutable DOM element."""
_uuid: str = dataclasses.field(init=False)
name: str
children: list[str | DOMElement]
attrs: dict[str, str]
def html(self):
attr_str = ""
if self.attrs:
attr_str = " " + (" ".join(
[f"{key}=\"{value}\"" for key, value in self.attrs.items()]))
children = []
children = "\n".join([str(c) for c in self.children])
return f"<{self.name}{attr_str}>{children}</{self.name}>"
def render(self):
display.display(display.HTML(self.html()))
def attr(self, key: str) -> str:
return self.attrs[key]
def __str__(self):
return self.html()
def __repr__(self):
return self.html()
def append(self, child: DOMElement) -> DOMElement:
return dataclasses.replace(self, children=[*self.children, child])
def replace(self, **kwargs) -> DOMElement:
return dataclasses.replace(self, **kwargs)
def _style_dict_to_str(style_dict: dict[str, Any]) -> str:
return " ".join([f"{k}: {v};" for k, v in style_dict.items()])
def dynamic(elem: StaticDOMElement) -> DynamicDiv:
return DynamicDiv(elem)
def _make_elem(tag: str, *children: Element, **attrs) -> StaticDOMElement:
"""Helper function for making DOM elements."""
return StaticDOMElement(tag, list(children), attrs)
code = functools.partial(_make_elem, "code")
div = functools.partial(_make_elem, "div")
li = functools.partial(_make_elem, "li")
ol = functools.partial(_make_elem, "ol")
pre = functools.partial(_make_elem, "pre")
progress = functools.partial(_make_elem, "progress")
span = functools.partial(_make_elem, "span")
def css(text: str) -> StaticDOMElement:
return StaticDOMElement("style", [text], {})
def style(*args, **kwargs):
return _style_dict_to_str(dict(*args, **kwargs))
| StaticDOMElement |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 288,
"end": 371
} | class ____:
cls_var = 100
def __init__(self):
pass
| ClassWithInitAndVars |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/daemon.py | {
"start": 8282,
"end": 9776
} | class ____(DagsterDaemon[TContext], ABC):
def __init__(
self,
interval_seconds,
*,
interval_jitter_seconds: float = 0,
startup_jitter_seconds: float = 0,
):
self.interval_seconds = check.numeric_param(interval_seconds, "interval_seconds")
self.interval_jitter_seconds = interval_jitter_seconds
self.startup_jitter_seconds = startup_jitter_seconds
super().__init__()
def core_loop(
self,
workspace_process_context: TContext,
shutdown_event: Event,
) -> DaemonIterator:
if self.startup_jitter_seconds:
time.sleep(random.uniform(0, self.startup_jitter_seconds))
while True:
interval = self.interval_seconds + random.uniform(0, self.interval_jitter_seconds)
start_time = time.time()
yield SpanMarker.START_SPAN
try:
yield from self.run_iteration(workspace_process_context)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
self._logger.error("Caught error:\n%s", error_info)
yield error_info
yield SpanMarker.END_SPAN
while time.time() - start_time < interval:
shutdown_event.wait(0.5)
yield None
yield None
@abstractmethod
def run_iteration(self, workspace_process_context: TContext) -> DaemonIterator: ...
| IntervalDaemon |
python | getsentry__sentry | tests/sentry/backup/test_imports.py | {
"start": 3565,
"end": 3862
} | class ____(BackupTransactionTestCase):
def export_to_tmp_file_and_clear_database(self, tmp_dir) -> Path:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
export_to_file(tmp_path, ExportScope.Global)
clear_database()
return tmp_path
| ImportTestCase |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_verification.py | {
"start": 8395,
"end": 12502
} | class ____(torch.fx.Interpreter):
"""Interpreter for verifying converted ONNX model accuracy by comparing intermediate values.
To compare models, first initialize the interpreter with an ONNX program.
Then, call the :meth:`run` method with the input arguments to execute the model.
The :meth:`run` method will execute the model and populate the
:attr:`verification_infos` attribute with the verification information for each value.
::
onnx_program = torch.onnx.export(model, args, dynamo=True)
interpreter = _VerificationInterpreter(onnx_program)
interpreter.run(*args)
verification_infos = interpreter.verification_infos
for info in verification_infos:
print("value name:", info.name, info)
The verification information includes the maximum absolute difference, maximum relative
difference, and histograms of absolute and relative differences between the expected
and actual values. See :class:`VerificationInfo` for more details.
Attributes:
verification_infos: A list of verification information for each value.
It is populated when the `run` method is called.
"""
def __init__(self, onnx_program: torch.onnx.ONNXProgram) -> None:
"""Initialize the _VerificationInterpreter with an ONNX program.
Args:
onnx_program: The ONNX program to verify.
"""
if onnx_program.exported_program is None:
raise ValueError(
"The ONNX program does not contain an exported_program. "
"Please provide an exported_program to verify the ONNX program."
)
super().__init__(onnx_program.exported_program.module())
self._onnx_program = onnx_program
self._onnx_values = _create_value_mapping(onnx_program.model.graph)
self._args: tuple[Any, ...] = ()
self.verification_infos: list[VerificationInfo] = []
def run(
self,
*args: Any,
initial_env: dict[torch.fx.Node, Any] | None = None,
enable_io_processing: bool = True,
) -> Any:
"""Run the interpreter with the given input arguments.
This method executes the model and populates the :attr:`verification_infos` attribute
with the verification information for each value.
Args:
args: The input arguments for the model.
initial_env: The initial environment for the interpreter.
enable_io_processing: Whether to enable IO processing.
Returns:
Any: The result of executing the model.
"""
self.verification_infos = []
self._args = args
return super().run(
*args,
initial_env=initial_env,
enable_io_processing=enable_io_processing,
)
def run_node(self, n: torch.fx.Node) -> Any:
result = super().run_node(n)
if n.op != "call_function":
return result
node_name = n.name
if node_name not in self._onnx_values:
return result
try:
(onnx_result,) = self._onnx_program.compute_values([node_name], self._args)
except Exception:
logger.warning(
"Failed to compute value for node %s", node_name, exc_info=True
)
return result
info = VerificationInfo.from_tensors(
name=node_name,
expected=result,
actual=onnx_result,
)
self.verification_infos.append(info)
if info.max_abs_diff > 0.01 or info.max_rel_diff > 0.1:
logger.warning(
"Verification info for node %s: max_abs_diff: %s, max_rel_diff: %s",
node_name,
info.max_abs_diff,
info.max_rel_diff,
)
else:
logger.info(
"Verification info for node %s: max_abs_diff: %s, max_rel_diff: %s",
node_name,
info.max_abs_diff,
info.max_rel_diff,
)
return result
| _VerificationInterpreter |
python | ray-project__ray | python/ray/data/tests/unit/test_datatype.py | {
"start": 5603,
"end": 8755
} | class ____:
"""Test type conversion methods."""
def test_to_arrow_dtype_arrow_passthrough(self):
"""Test that Arrow types return themselves."""
dt = DataType.from_arrow(pa.int64())
result = dt.to_arrow_dtype()
assert result == pa.int64()
def test_to_arrow_dtype_numpy_conversion(self):
"""Test conversion from NumPy to Arrow types."""
dt = DataType.from_numpy(np.dtype("int32"))
result = dt.to_arrow_dtype()
assert result == pa.int32()
def test_to_arrow_dtype_python_conversion(self):
"""Test conversion from Python to Arrow types."""
dt = DataType(int)
result = dt.to_arrow_dtype([1])
# Python int should map to int64 in Arrow
assert result == pa.int64()
@pytest.mark.parametrize(
"source_dt,expected_result",
[
# NumPy types should return themselves
(DataType.from_numpy(np.dtype("int32")), np.dtype("int32")),
(DataType.from_numpy(np.dtype("float64")), np.dtype("float64")),
# Python types should fall back to object
(DataType(str), np.dtype("object")),
(DataType(list), np.dtype("object")),
],
)
def test_to_numpy_dtype(self, source_dt, expected_result):
"""Test to_numpy_dtype conversion."""
result = source_dt.to_numpy_dtype()
assert result == expected_result
def test_to_numpy_dtype_arrow_basic_types(self):
"""Test Arrow to NumPy conversion for types that should work."""
# Test basic types that should convert properly
test_cases = [
(pa.int32(), np.dtype("int32")),
(pa.float64(), np.dtype("float64")),
(pa.bool_(), np.dtype("bool")),
]
for pa_type, expected_np_dtype in test_cases:
dt = DataType.from_arrow(pa_type)
result = dt.to_numpy_dtype()
# Some Arrow types may not convert exactly as expected,
# so let's just verify the result is a valid numpy dtype
assert isinstance(result, np.dtype)
def test_to_numpy_dtype_complex_arrow_fallback(self):
"""Test that complex Arrow types fall back to object dtype."""
complex_dt = DataType.from_arrow(pa.list_(pa.int32()))
result = complex_dt.to_numpy_dtype()
assert result == np.dtype("object")
@pytest.mark.parametrize("python_type", [int, str, float, bool, list])
def test_to_python_type_success(self, python_type):
"""Test to_python_type returns the original Python type."""
dt = DataType(python_type)
result = dt.to_python_type()
assert result == python_type
@pytest.mark.parametrize(
"non_python_dt",
[
DataType.from_arrow(pa.int64()),
DataType.from_numpy(np.dtype("float32")),
],
)
def test_to_python_type_failure(self, non_python_dt):
"""Test to_python_type raises ValueError for non-Python types."""
with pytest.raises(ValueError, match="is not backed by a Python type"):
non_python_dt.to_python_type()
| TestDataTypeConversions |
python | pallets__werkzeug | src/werkzeug/datastructures/accept.py | {
"start": 11774,
"end": 12177
} | class ____(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value: str, item: str) -> bool:
def _normalize(name: str) -> str:
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == "*" or _normalize(value) == _normalize(item)
| CharsetAccept |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_provider.py | {
"start": 21413,
"end": 22355
} | class ____(TrivialProvider):
def __init__(self, conjecturedata):
super().__init__(conjecturedata)
self.n = 0
def draw_integer(self, **constraints):
self.n += 1
if self.n == 1:
return 1
raise BackendCannotProceed("verified")
def test_raising_verified_after_failure_is_sound():
# see https://github.com/pschanely/hypothesis-crosshair/issues/31#issuecomment-2852940574
with temp_register_backend("soundness_test", SoundnessTestProvider):
@given(st.integers())
@settings(backend="soundness_test", database=None)
def f(n):
assert n != 1
with pytest.raises(AssertionError) as e:
f()
# full message as of writing: "backend='soundness_test' claimed to
# verify this test passes - please send them a bug report!"
assert all("backend" not in note for note in e.value.__notes__)
| SoundnessTestProvider |
python | getsentry__sentry | tests/sentry/models/test_apigrant.py | {
"start": 217,
"end": 833
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
self.grant = ApiGrant.objects.create(
user=self.user, application=self.application, redirect_uri="https://example.com"
)
def test_default_string_serialization(self) -> None:
default_msg = f"api_grant_id={self.grant.id}, user_id={self.user.id}, application_id={self.application.id} is cool"
assert f"{self.grant} is cool" == default_msg
| ApiGrantTest |
python | pandas-dev__pandas | asv_bench/benchmarks/arithmetic.py | {
"start": 430,
"end": 1182
} | class ____:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
| IntFrameWithScalar |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/reddit/tests.py | {
"start": 240,
"end": 578
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = RedditProvider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""{
"name": "wayward710"}""",
)
]
def get_expected_to_str(self):
return "wayward710"
| RedditTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1587073,
"end": 1587208
} | class ____(sgqlc.types.Union):
"""Users and teams."""
__schema__ = github_schema
__types__ = (Team, User)
| DeploymentReviewer |
python | great-expectations__great_expectations | tests/scripts/test_public_api_report.py | {
"start": 10283,
"end": 11670
} | class ____:
def test_get_all_public_api_definitions(self, public_api_checker: PublicAPIChecker):
observed = public_api_checker.get_all_public_api_definitions()
assert len(observed) == 6
assert {d.name for d in observed} == {
"ExamplePublicAPIClass",
"example_multiple_decorator_public_method",
"example_public_api_method",
"example_public_api_module_level_function",
"example_public_classmethod",
"example_public_staticmethod",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def _class_and_function_definitions(
self, tree: ast.AST
) -> List[Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]]:
"""Helper function to find class and function definitions from ast tree for tests."""
definitions = []
for node in ast.walk(tree):
if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
definitions.append(node)
return definitions
def test_is_definition_marked_public_api_yes(
self, public_api_checker: PublicAPIChecker, tmp_path: pathlib.Path
):
file_string = """
@public_api
def example_public_api_module_level_function():
pass
@public_api
| TestPublicAPIChecker |
python | urllib3__urllib3 | test/test_retry.py | {
"start": 372,
"end": 16767
} | class ____:
def test_string(self) -> None:
"""Retry string representation looks the way we expect"""
retry = Retry()
assert (
str(retry)
== "Retry(total=10, connect=None, read=None, redirect=None, status=None)"
)
for _ in range(3):
retry = retry.increment(method="GET")
assert (
str(retry)
== "Retry(total=7, connect=None, read=None, redirect=None, status=None)"
)
def test_retry_both_specified(self) -> None:
"""Total can win if it's lower than the connect value"""
error = ConnectTimeoutError()
retry = Retry(connect=3, total=2)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError) as e:
retry.increment(error=error)
assert e.value.reason == error
def test_retry_higher_total_loses(self) -> None:
"""A lower connect timeout than the total is honored"""
error = ConnectTimeoutError()
retry = Retry(connect=2, total=3)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError):
retry.increment(error=error)
def test_retry_higher_total_loses_vs_read(self) -> None:
"""A lower read timeout than the total is honored"""
error = ReadTimeoutError(DUMMY_POOL, "/", "read timed out")
retry = Retry(read=2, total=3)
retry = retry.increment(method="GET", error=error)
retry = retry.increment(method="GET", error=error)
with pytest.raises(MaxRetryError):
retry.increment(method="GET", error=error)
def test_retry_total_none(self) -> None:
"""if Total is none, connect error should take precedence"""
error = ConnectTimeoutError()
retry = Retry(connect=2, total=None)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError) as e:
retry.increment(error=error)
assert e.value.reason == error
timeout_error = ReadTimeoutError(DUMMY_POOL, "/", "read timed out")
retry = Retry(connect=2, total=None)
retry = retry.increment(method="GET", error=timeout_error)
retry = retry.increment(method="GET", error=timeout_error)
retry = retry.increment(method="GET", error=timeout_error)
assert not retry.is_exhausted()
def test_retry_default(self) -> None:
"""If no value is specified, should retry connects 3 times"""
retry = Retry()
assert retry.total == 10
assert retry.connect is None
assert retry.read is None
assert retry.redirect is None
assert retry.other is None
error = ConnectTimeoutError()
retry = Retry(connect=1)
retry = retry.increment(error=error)
with pytest.raises(MaxRetryError):
retry.increment(error=error)
retry = Retry(connect=1)
retry = retry.increment(error=error)
assert not retry.is_exhausted()
assert Retry(0).raise_on_redirect
assert not Retry(False).raise_on_redirect
def test_retry_other(self) -> None:
"""If an unexpected error is raised, should retry other times"""
other_error = SSLError()
retry = Retry(connect=1)
retry = retry.increment(error=other_error)
retry = retry.increment(error=other_error)
assert not retry.is_exhausted()
retry = Retry(other=1)
retry = retry.increment(error=other_error)
with pytest.raises(MaxRetryError) as e:
retry.increment(error=other_error)
assert e.value.reason == other_error
def test_retry_read_zero(self) -> None:
"""No second chances on read timeouts, by default"""
error = ReadTimeoutError(DUMMY_POOL, "/", "read timed out")
retry = Retry(read=0)
with pytest.raises(MaxRetryError) as e:
retry.increment(method="GET", error=error)
assert e.value.reason == error
def test_status_counter(self) -> None:
resp = HTTPResponse(status=400)
retry = Retry(status=2)
retry = retry.increment(response=resp)
retry = retry.increment(response=resp)
msg = ResponseError.SPECIFIC_ERROR.format(status_code=400)
with pytest.raises(MaxRetryError, match=msg):
retry.increment(response=resp)
def test_backoff(self) -> None:
"""Backoff is computed correctly"""
max_backoff = Retry.DEFAULT_BACKOFF_MAX
retry = Retry(total=100, backoff_factor=0.2)
assert retry.get_backoff_time() == 0 # First request
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0 # First retry
retry = retry.increment(method="GET")
assert retry.backoff_factor == 0.2
assert retry.total == 98
assert retry.get_backoff_time() == 0.4 # Start backoff
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0.8
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 1.6
for _ in range(10):
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == max_backoff
def test_configurable_backoff_max(self) -> None:
"""Configurable backoff is computed correctly"""
max_backoff = 1
retry = Retry(total=100, backoff_factor=0.2, backoff_max=max_backoff)
assert retry.get_backoff_time() == 0 # First request
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0 # First retry
retry = retry.increment(method="GET")
assert retry.backoff_factor == 0.2
assert retry.total == 98
assert retry.get_backoff_time() == 0.4 # Start backoff
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0.8
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == max_backoff
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == max_backoff
def test_backoff_jitter(self) -> None:
"""Backoff with jitter is computed correctly"""
max_backoff = 1
jitter = 0.4
retry = Retry(
total=100,
backoff_factor=0.2,
backoff_max=max_backoff,
backoff_jitter=jitter,
)
assert retry.get_backoff_time() == 0 # First request
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0 # First retry
retry = retry.increment(method="GET")
assert retry.backoff_factor == 0.2
assert retry.total == 98
assert 0.4 <= retry.get_backoff_time() <= 0.8 # Start backoff
retry = retry.increment(method="GET")
assert 0.8 <= retry.get_backoff_time() <= max_backoff
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == max_backoff
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == max_backoff
def test_zero_backoff(self) -> None:
retry = Retry()
assert retry.get_backoff_time() == 0
retry = retry.increment(method="GET")
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0
def test_backoff_reset_after_redirect(self) -> None:
retry = Retry(total=100, redirect=5, backoff_factor=0.2)
retry = retry.increment(method="GET")
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0.4
redirect_response = HTTPResponse(status=302, headers={"location": "test"})
retry = retry.increment(method="GET", response=redirect_response)
assert retry.get_backoff_time() == 0
retry = retry.increment(method="GET")
retry = retry.increment(method="GET")
assert retry.get_backoff_time() == 0.4
def test_sleep(self) -> None:
# sleep a very small amount of time so our code coverage is happy
retry = Retry(backoff_factor=0.0001)
retry = retry.increment(method="GET")
retry = retry.increment(method="GET")
retry.sleep()
def test_status_forcelist(self) -> None:
retry = Retry(status_forcelist=range(500, 600))
assert not retry.is_retry("GET", status_code=200)
assert not retry.is_retry("GET", status_code=400)
assert retry.is_retry("GET", status_code=500)
retry = Retry(total=1, status_forcelist=[418])
assert not retry.is_retry("GET", status_code=400)
assert retry.is_retry("GET", status_code=418)
# String status codes are not matched.
retry = Retry(total=1, status_forcelist=["418"]) # type: ignore[list-item]
assert not retry.is_retry("GET", status_code=418)
def test_allowed_methods_with_status_forcelist(self) -> None:
# Falsey allowed_methods means to retry on any method.
retry = Retry(status_forcelist=[500], allowed_methods=None)
assert retry.is_retry("GET", status_code=500)
assert retry.is_retry("POST", status_code=500)
# Criteria of allowed_methods and status_forcelist are ANDed.
retry = Retry(status_forcelist=[500], allowed_methods=["POST"])
assert not retry.is_retry("GET", status_code=500)
assert retry.is_retry("POST", status_code=500)
def test_exhausted(self) -> None:
assert not Retry(0).is_exhausted()
assert Retry(-1).is_exhausted()
assert Retry(1).increment(method="GET").total == 0
@pytest.mark.parametrize("total", [-1, 0])
def test_disabled(self, total: int) -> None:
with pytest.raises(MaxRetryError):
Retry(total).increment(method="GET")
def test_error_message(self) -> None:
retry = Retry(total=0)
with pytest.raises(MaxRetryError, match="read timed out") as e:
retry = retry.increment(
method="GET", error=ReadTimeoutError(DUMMY_POOL, "/", "read timed out")
)
assert "Caused by redirect" not in str(e.value)
retry = Retry(total=1)
retry = retry.increment("POST", "/")
with pytest.raises(MaxRetryError, match=ResponseError.GENERIC_ERROR) as e:
retry = retry.increment("POST", "/")
assert "Caused by redirect" not in str(e.value)
assert isinstance(e.value.reason, ResponseError)
retry = Retry(total=1)
response = HTTPResponse(status=500)
msg = ResponseError.SPECIFIC_ERROR.format(status_code=500)
retry = retry.increment("POST", "/", response=response)
with pytest.raises(MaxRetryError, match=msg) as e:
retry = retry.increment("POST", "/", response=response)
assert "Caused by redirect" not in str(e.value)
retry = Retry(connect=1)
retry = retry.increment(error=ConnectTimeoutError("conntimeout"))
with pytest.raises(MaxRetryError, match="conntimeout") as e:
retry = retry.increment(error=ConnectTimeoutError("conntimeout"))
assert "Caused by redirect" not in str(e.value)
def test_history(self) -> None:
retry = Retry(total=10, allowed_methods=frozenset(["GET", "POST"]))
assert retry.history == tuple()
connection_error = ConnectTimeoutError("conntimeout")
retry = retry.increment("GET", "/test1", None, connection_error)
test_history1 = (RequestHistory("GET", "/test1", connection_error, None, None),)
assert retry.history == test_history1
read_error = ReadTimeoutError(DUMMY_POOL, "/test2", "read timed out")
retry = retry.increment("POST", "/test2", None, read_error)
test_history2 = (
RequestHistory("GET", "/test1", connection_error, None, None),
RequestHistory("POST", "/test2", read_error, None, None),
)
assert retry.history == test_history2
response = HTTPResponse(status=500)
retry = retry.increment("GET", "/test3", response, None)
test_history3 = (
RequestHistory("GET", "/test1", connection_error, None, None),
RequestHistory("POST", "/test2", read_error, None, None),
RequestHistory("GET", "/test3", None, 500, None),
)
assert retry.history == test_history3
def test_retry_method_not_allowed(self) -> None:
error = ReadTimeoutError(DUMMY_POOL, "/", "read timed out")
retry = Retry()
with pytest.raises(ReadTimeoutError):
retry.increment(method="POST", error=error)
def test_retry_default_remove_headers_on_redirect(self) -> None:
retry = Retry()
assert retry.remove_headers_on_redirect == {
"authorization",
"proxy-authorization",
"cookie",
}
def test_retry_set_remove_headers_on_redirect(self) -> None:
retry = Retry(remove_headers_on_redirect=["X-API-Secret"])
assert retry.remove_headers_on_redirect == {"x-api-secret"}
@pytest.mark.parametrize("value", ["-1", "+1", "1.0", "\xb2"]) # \xb2 = ^2
def test_parse_retry_after_invalid(self, value: str) -> None:
retry = Retry()
with pytest.raises(InvalidHeader):
retry.parse_retry_after(value)
@pytest.mark.parametrize(
"value, expected", [("0", 0), ("1000", 1000), ("\t42 ", 42)]
)
def test_parse_retry_after(self, value: str, expected: int) -> None:
retry = Retry()
assert retry.parse_retry_after(value) == expected
@pytest.mark.parametrize("respect_retry_after_header", [True, False])
def test_respect_retry_after_header_propagated(
self, respect_retry_after_header: bool
) -> None:
retry = Retry(respect_retry_after_header=respect_retry_after_header)
new_retry = retry.new()
assert new_retry.respect_retry_after_header == respect_retry_after_header
@pytest.mark.parametrize(
"retry_after_header,respect_retry_after_header,sleep_duration",
[
("3600", True, 3600),
("3600", False, None),
# Will sleep due to header is 1 hour in future
("Mon, 3 Jun 2019 12:00:00 UTC", True, 3600),
# Won't sleep due to not respecting header
("Mon, 3 Jun 2019 12:00:00 UTC", False, None),
# Won't sleep due to current time reached
("Mon, 3 Jun 2019 11:00:00 UTC", True, None),
# Won't sleep due to current time reached + not respecting header
("Mon, 3 Jun 2019 11:00:00 UTC", False, None),
# Handle all the formats in RFC 7231 Section 7.1.1.1
("Mon, 03 Jun 2019 11:30:12 GMT", True, 1812),
("Monday, 03-Jun-19 11:30:12 GMT", True, 1812),
# Assume that datetimes without a timezone are in UTC per RFC 7231
("Mon Jun 3 11:30:12 2019", True, 1812),
],
)
@pytest.mark.parametrize(
"stub_timezone",
[
"UTC",
"Asia/Jerusalem",
None,
],
indirect=True,
)
@pytest.mark.usefixtures("stub_timezone")
def test_respect_retry_after_header_sleep(
self,
retry_after_header: str,
respect_retry_after_header: bool,
sleep_duration: int | None,
) -> None:
retry = Retry(respect_retry_after_header=respect_retry_after_header)
with (
mock.patch(
"time.time",
return_value=datetime.datetime(
2019, 6, 3, 11, tzinfo=datetime.timezone.utc
).timestamp(),
),
mock.patch("time.sleep") as sleep_mock,
):
# for the default behavior, it must be in RETRY_AFTER_STATUS_CODES
response = HTTPResponse(
status=503, headers={"Retry-After": retry_after_header}
)
retry.sleep(response)
# The expected behavior is that we'll only sleep if respecting
# this header (since we won't have any backoff sleep attempts)
if respect_retry_after_header and sleep_duration is not None:
sleep_mock.assert_called_with(sleep_duration)
else:
sleep_mock.assert_not_called()
| TestRetry |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 12976,
"end": 13580
} | class ____:
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :exc:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == "invalid":
raise MyCustomException("invalid!")
"""
| DontWrapMixin |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 83012,
"end": 83880
} | class ____(Request):
"""
Gets model information
:param model: Model id
:type model: str
"""
_service = "models"
_action = "get_by_id"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"model": {"description": "Model id", "type": "string"}},
"required": ["model"],
"type": "object",
}
def __init__(self, model: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
| GetByIdRequest |
python | doocs__leetcode | solution/2700-2799/2780.Minimum Index of a Valid Split/Solution.py | {
"start": 0,
"end": 340
} | class ____:
def minimumIndex(self, nums: List[int]) -> int:
x, cnt = Counter(nums).most_common(1)[0]
cur = 0
for i, v in enumerate(nums, 1):
if v == x:
cur += 1
if cur * 2 > i and (cnt - cur) * 2 > len(nums) - i:
return i - 1
return -1
| Solution |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 36522,
"end": 37337
} | class ____:
"""Test fa_IR address provider methods"""
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in FaIrAddressProvider.city_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"(?:سوئیت|واحد) \d{3}", secondary_address)
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in FaIrAddressProvider.states
| TestFaIr |
python | spyder-ide__spyder | spyder/api/utils.py | {
"start": 680,
"end": 1399
} | class ____:
"""Utility class used to represent a prefixed string tuple."""
def __init__(self, path=None):
self.children = {}
self.path = path
def __iter__(self):
prefix = [((self.path,), self)]
while prefix != []:
current_prefix, node = prefix.pop(0)
prefix += [(current_prefix + (c,), node.children[c])
for c in node.children]
yield current_prefix
def add_path(self, path):
prefix, *rest = path
if prefix not in self.children:
self.children[prefix] = PrefixNode(prefix)
if len(rest) > 0:
child = self.children[prefix]
child.add_path(rest)
| PrefixNode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.