language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/network-delay-time.py | {
"start": 245,
"end": 1138
} | class ____(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
adj = [[] for _ in xrange(N)]
for u, v, w in times:
adj[u-1].append((v-1, w))
result = 0
lookup = set()
best = collections.defaultdict(lambda: float("inf"))
best[K-1] = 0
min_heap = [(0, K-1)]
while min_heap and len(lookup) != N:
result, u = heapq.heappop(min_heap)
lookup.add(u)
if best[u] < result:
continue
for v, w in adj[u]:
if v in lookup: continue
if result+w < best[v]:
best[v] = result+w
heapq.heappush(min_heap, (result+w, v))
return result if len(lookup) == N else -1
| Solution |
python | apache__avro | lang/py/avro/errors.py | {
"start": 2845,
"end": 3274
} | class ____(AvroException):
def __init__(self, fail_msg, writers_schema=None, readers_schema=None, *args):
writers_message = f"\nWriter's Schema: {_safe_pretty(writers_schema)}" if writers_schema else ""
readers_message = f"\nReader's Schema: {_safe_pretty(readers_schema)}" if readers_schema else ""
super().__init__((fail_msg or "") + writers_message + readers_message, *args)
| SchemaResolutionException |
python | huggingface__transformers | src/transformers/processing_utils.py | {
"start": 4359,
"end": 8044
} | class ____(TypedDict, total=False):
"""
Keyword arguments for text processing. For extended documentation, check out tokenization_utils_base methods and
docstrings associated.
Attributes:
add_special_tokens (`bool`, *optional*)
Whether or not to add special tokens when encoding the sequences.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*)
Activates and controls padding.
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*):
Activates and controls truncation.
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
stride (`int`, *optional*):
If set, the overflowing tokens will contain some tokens from the end of the truncated sequence.
is_split_into_words (`bool`, *optional*):
Whether or not the input is already pre-tokenized.
pad_to_multiple_of (`int`, *optional*):
If set, will pad the sequence to a multiple of the provided value.
return_token_type_ids (`bool`, *optional*):
Whether to return token type IDs.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask.
return_overflowing_tokens (`bool`, *optional*):
Whether or not to return overflowing token sequences.
return_special_tokens_mask (`bool`, *optional*):
Whether or not to return special tokens mask information.
return_offsets_mapping (`bool`, *optional*):
Whether or not to return `(char_start, char_end)` for each token.
return_length (`bool`, *optional*):
Whether or not to return the lengths of the encoded inputs.
verbose (`bool`, *optional*):
Whether or not to print more information and warnings.
padding_side (`str`, *optional*):
The side on which padding will be applied.
return_mm_token_type_ids (`bool`, *optional*):
Whether to return multimodal token type ids indicating mm placeholder token positions.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
"""
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]
text_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]
text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]
add_special_tokens: Optional[bool]
padding: Annotated[Optional[Union[bool, str, PaddingStrategy]], padding_validator()]
truncation: Annotated[Optional[Union[bool, str, TruncationStrategy]], truncation_validator()]
max_length: Annotated[Optional[int], positive_int()]
stride: Annotated[Optional[int], positive_int()]
is_split_into_words: Optional[bool]
pad_to_multiple_of: Annotated[Optional[int], positive_int()]
return_token_type_ids: Optional[bool]
return_attention_mask: Optional[bool]
return_overflowing_tokens: Optional[bool]
return_special_tokens_mask: Optional[bool]
return_offsets_mapping: Optional[bool]
return_length: Optional[bool]
verbose: Optional[bool]
padding_side: Optional[Literal["left", "right"]]
return_mm_token_type_ids: Optional[bool]
return_tensors: Annotated[Optional[Union[str, TensorType]], tensor_type_validator()]
| TextKwargs |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 81467,
"end": 81810
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('set', c_nvmlSystemEventSet_t)
]
def __init__(self):
super(c_nvmlSystemEventSetCreateRequest_v1_t, self).__init__(version=nvmlSystemEventSetCreateRequest_v1)
nvmlSystemEventSetFreeRequest_v1 = 0x1000010
| c_nvmlSystemEventSetCreateRequest_v1_t |
python | apache__airflow | providers/pagerduty/tests/unit/pagerduty/notifications/test_pagerduty.py | {
"start": 1168,
"end": 5059
} | class ____:
@mock.patch("airflow.providers.pagerduty.notifications.pagerduty.PagerdutyEventsHook")
def test_notifier(self, mock_pagerduty_event_hook):
dag = DAG("test_notifier")
notifier = send_pagerduty_notification(summary="DISK at 99%", severity="critical", action="trigger")
notifier({"dag": dag})
mock_pagerduty_event_hook.return_value.send_event.assert_called_once_with(
summary="DISK at 99%",
severity="critical",
action="trigger",
source="airflow",
class_type=None,
component=None,
custom_details=None,
group=None,
images=None,
links=None,
dedup_key=None,
)
@mock.patch("airflow.providers.pagerduty.notifications.pagerduty.PagerdutyEventsHook")
def test_notifier_with_notifier_class(self, mock_pagerduty_event_hook):
dag = DAG("test_notifier")
notifier = PagerdutyNotifier(summary="DISK at 99%", severity="critical", action="trigger")
notifier({"dag": dag})
mock_pagerduty_event_hook.return_value.send_event.assert_called_once_with(
summary="DISK at 99%",
severity="critical",
action="trigger",
source="airflow",
class_type=None,
component=None,
custom_details=None,
group=None,
images=None,
links=None,
dedup_key=None,
)
@mock.patch("airflow.providers.pagerduty.notifications.pagerduty.PagerdutyEventsHook")
def test_notifier_templated(self, mock_pagerduty_event_hook):
dag = DAG("test_notifier")
notifier = PagerdutyNotifier(
summary="DISK at 99% {{dag.dag_id}}",
severity="critical {{dag.dag_id}}",
source="database {{dag.dag_id}}",
dedup_key="srv0555-{{dag.dag_id}}",
custom_details={
"free space": "1%",
"ping time": "1500ms",
"load avg": 0.75,
"template": "{{dag.dag_id}}",
},
group="prod-datapipe {{dag.dag_id}}",
component="database {{dag.dag_id}}",
class_type="disk {{dag.dag_id}}",
)
context = {"dag": dag}
notifier(context)
mock_pagerduty_event_hook.return_value.send_event.assert_called_once_with(
action="trigger",
summary="DISK at 99% test_notifier",
severity="critical test_notifier",
source="database test_notifier",
dedup_key="srv0555-test_notifier",
custom_details={
"free space": "1%",
"ping time": "1500ms",
"load avg": 0.75,
"template": "test_notifier",
},
group="prod-datapipe test_notifier",
component="database test_notifier",
class_type="disk test_notifier",
images=None,
links=None,
)
@pytest.mark.asyncio
@mock.patch(
"airflow.providers.pagerduty.notifications.pagerduty.PagerdutyEventsAsyncHook.send_event",
new_callable=mock.AsyncMock,
)
async def test_async_notifier(self, mock_async_hook, create_dag_without_db):
notifier = send_pagerduty_notification(summary="DISK at 99%", severity="critical", action="trigger")
await notifier.async_notify({"dag": create_dag_without_db("test_pagerduty_notifier")})
mock_async_hook.assert_called_once_with(
summary="DISK at 99%",
severity="critical",
action="trigger",
source="airflow",
class_type=None,
component=None,
custom_details=None,
group=None,
images=None,
links=None,
dedup_key=None,
)
| TestPagerdutyNotifier |
python | fastai__fastai | fastai/vision/gan.py | {
"start": 727,
"end": 2784
} | class ____(Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self,
generator:nn.Module=None, # The generator PyTorch module
critic:nn.Module=None, # The discriminator PyTorch module
gen_mode:None|bool=False # Whether the GAN should be set to generator mode
):
if generator is not None: self.generator=generator
if critic is not None: self.critic =critic
store_attr('gen_mode')
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self,
gen_mode:None|bool=None # Whether the GAN should be set to generator mode
):
"Put the module in generator mode if `gen_mode` is `True`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
# %% ../../nbs/24_vision.gan.ipynb 13
@delegates(ConvLayer.__init__)
def basic_critic(
in_size:int, # Input size for the critic (same as the output size of the generator)
n_channels:int, # Number of channels of the input for the critic
n_features:int=64, # Number of features used in the critic
n_extra_layers:int=0, # Number of extra hidden layers in the critic
norm_type:NormType=NormType.Batch, # Type of normalization to use in the critic
**kwargs
) -> nn.Sequential:
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)]
while cur_size > 4:
layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()]
return nn.Sequential(*layers)
# %% ../../nbs/24_vision.gan.ipynb 14
| GANModule |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_base.py | {
"start": 1444,
"end": 15675
} | class ____(
CosmologyTest,
ParameterH0TestMixin,
ParameterOm0TestMixin,
ParameterOde0TestMixin,
ParameterTcmb0TestMixin,
ParameterNeffTestMixin,
Parameterm_nuTestMixin,
ParameterOb0TestMixin,
):
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# Default cosmology args and kwargs
self._cls_args = dict(
H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one
)
self.cls_kwargs = dict(
Tcmb0=3.0 * u.K,
Ob0=0.03 * u.one,
name=self.__class__.__name__,
meta={"a": "b"},
)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, cached_property)
# on the instance
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, cached_property)
# on the instance
assert np.allclose(
cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0)
)
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, cached_property)
# on the instance
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._nu_info.has_massive_nu
def test_h(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``h``."""
# on the class
assert isinstance(cosmo_cls.h, cached_property)
# on the instance
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, cached_property)
# on the instance
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, cached_property)
# on the instance
assert cosmo.hubble_distance == (const.c / cosmo.H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, cached_property)
# on the instance
assert cosmo.critical_density0.unit == u.g / u.cm**3
assert u.allclose( # sanity check
cosmo.critical_density0, 3 * cosmo.H0**2 / (8 * np.pi * const.G)
)
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, cached_property)
# on the instance
# Ogamma cor \propto T^4/rhocrit
expect = a_B_c2 * cosmo.Tcmb0.value**4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test ``cached_property`` ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, cached_property)
# on the instance
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo.__dict__["Neff"] * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(
cosmo.nu_relative_density(0), 0.22710731766 * cosmo.__dict__["Neff"]
)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert (
cosmo.Otot0
== cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
)
# ---------------------------------------------------------------
# Methods
_FLRW_redshift_methods = get_redshift_methods(
FLRW, include_private=True, include_z2=False
)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", sorted(_FLRW_redshift_methods))
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z),
)
def test_scale_factor0(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.scale_factor`."""
assert isinstance(cosmo.scale_factor0, u.Quantity)
assert cosmo.scale_factor0.unit == u.one
assert cosmo.scale_factor0 == 1
assert np.allclose(cosmo.scale_factor0, cosmo.scale_factor(0))
@pytest.mark.parametrize("z", valid_zs)
def test_scale_factor(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.scale_factor`."""
assert np.allclose(cosmo.scale_factor(z), 1 / (1 + np.array(z)))
# -------------------------------------------
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
def test_comoving_distance_1arg_equal_to_2arg(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.comoving_distance`."""
# Special case of z1 = 0
z = np.linspace(0, 1, 10)
assert u.allclose(cosmo.comoving_distance(z), cosmo.comoving_distance(0, z))
# General case of z1, z2
z1 = z
z2 = z + 1
assert u.allclose(
cosmo.comoving_distance(z2) - cosmo.comoving_distance(z1),
cosmo.comoving_distance(z1, z2),
)
@pytest.mark.skipif(
not (HAS_PANDAS and HAS_SCIPY), reason="requires pandas and scipy"
)
def test_luminosity_distance_pandas(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.luminosity_distance`.
Regression test for https://github.com/astropy/astropy/issues/15576.
"""
import pandas as pd
z = pd.Series([0.1, 0.2, 0.3])
d = cosmo.luminosity_distance(z)
assert isinstance(d, u.Quantity)
assert d.unit == u.Mpc
np.testing.assert_array_equal(d, cosmo.luminosity_distance(np.array(z)))
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = dict(cosmo.parameters)
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c == cosmo
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n, v in filter_keys_from_items(c.parameters, ("H0",)):
v_expect = getattr(cosmo, n)
assert_quantity_allclose(v, v_expect, atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n, v in filter_keys_from_items(c.parameters, ("H0", "Tcmb0")):
v_expect = getattr(cosmo, n)
assert_quantity_allclose(v, v_expect, atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologyTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
# ===============================================================
# Usage Tests
# TODO: this test should be subsumed by other tests
@pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale"))
def test_distance_broadcast(self, cosmo, method):
"""Test distance methods broadcast z correctly."""
g = getattr(cosmo, method)
z = np.linspace(0.1, 1, 6)
z2d = z.reshape(2, 3)
z3d = z.reshape(3, 2, 1)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z2d)
assert value_2d.shape == z2d.shape
value_3d = g(z3d)
assert value_3d.shape == z3d.shape
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
z = np.array([1.0, 2.0, 3.0, 4.0])
cosmo = cosmo_cls(*args, **kwargs)
assert u.allclose(cosmo.comoving_distance(z), expected, rtol=1e-4)
# ==============================================================================
| FLRWTest |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 2486,
"end": 2671
} | class ____(BaseModel):
type: Literal["screenshot"]
"""Specifies the event type.
For a screenshot action, this property is always set to `screenshot`.
"""
| ActionScreenshot |
python | django-extensions__django-extensions | tests/templatetags/test_indent_text.py | {
"start": 102,
"end": 525
} | class ____(TestCase):
"""Test for indentby exceptions."""
def test_should_raise_TemplateSyntaxError_if_args_lenght_not_in_2_4(self):
content = """{% load indent_text %}
{% indentby %}
Hello World
{% endindentby %}"""
with self.assertRaisesRegex(
TemplateSyntaxError, "indentby tag requires 1 or 3 arguments"
):
Template(content).render(Context())
| IndentByTagExceptions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes9.py | {
"start": 725,
"end": 793
} | class ____(TypedDict):
x: Required[int]
y: Required[int]
| TD_A1 |
python | ray-project__ray | python/ray/serve/_private/client.py | {
"start": 1799,
"end": 23733
} | class ____:
def __init__(
self,
controller: ActorHandle,
):
self._controller: ServeController = controller
self._shutdown = False
self._http_config: HTTPOptions = ray.get(controller.get_http_config.remote())
self._root_url = ray.get(controller.get_root_url.remote())
# Each handle has the overhead of long poll client, therefore cached.
self.handle_cache = dict()
self._evicted_handle_keys = set()
@property
def root_url(self):
return self._root_url
@property
def http_config(self):
return self._http_config
def __reduce__(self):
raise RayServeException(("Ray Serve client cannot be serialized."))
def shutdown_cached_handles(self):
"""Shuts down all cached handles.
Remove the reference to the cached handles so that they can be
garbage collected.
"""
for cache_key in list(self.handle_cache):
self.handle_cache[cache_key].shutdown()
del self.handle_cache[cache_key]
async def shutdown_cached_handles_async(self):
"""Shuts down all cached handles asynchronously.
Remove the reference to the cached handles so that they can be
garbage collected.
"""
async def shutdown_task(cache_key):
await self.handle_cache[cache_key].shutdown_async()
del self.handle_cache[cache_key]
await asyncio.gather(
*[shutdown_task(cache_key) for cache_key in list(self.handle_cache)]
)
def shutdown(self, timeout_s: float = 30.0) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
self.shutdown_cached_handles()
if ray.is_initialized() and not self._shutdown:
try:
ray.get(self._controller.graceful_shutdown.remote(), timeout=timeout_s)
except ray.exceptions.RayActorError:
# Controller has been shut down.
pass
except TimeoutError:
logger.warning(
f"Controller failed to shut down within {timeout_s}s. "
"Check controller logs for more details."
)
self._shutdown = True
async def shutdown_async(self, timeout_s: float = 30.0) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
await self.shutdown_cached_handles_async()
if ray.is_initialized() and not self._shutdown:
try:
await asyncio.wait_for(
self._controller.graceful_shutdown.remote(), timeout=timeout_s
)
except ray.exceptions.RayActorError:
# Controller has been shut down.
pass
except TimeoutError:
logger.warning(
f"Controller failed to shut down within {timeout_s}s. "
"Check controller logs for more details."
)
self._shutdown = True
def _wait_for_deployment_healthy(self, name: str, timeout_s: int = -1):
"""Waits for the named deployment to enter "HEALTHY" status.
Raises RuntimeError if the deployment enters the "UNHEALTHY" status
instead.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s or timeout_s < 0:
status_bytes = ray.get(self._controller.get_deployment_status.remote(name))
if status_bytes is None:
raise RuntimeError(
f"Waiting for deployment {name} to be HEALTHY, "
"but deployment doesn't exist."
)
status = DeploymentStatusInfo.from_proto(
DeploymentStatusInfoProto.FromString(status_bytes)
)
if status.status == DeploymentStatus.HEALTHY:
break
elif status.status == DeploymentStatus.UNHEALTHY:
raise RuntimeError(
f"Deployment {name} is UNHEALTHY: " f"{status.message}"
)
else:
# Guard against new unhandled statuses being added.
assert status.status == DeploymentStatus.UPDATING
logger.debug(
f"Waiting for {name} to be healthy, current status: "
f"{status.status}."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Deployment {name} did not become HEALTHY after {timeout_s}s."
)
def _wait_for_deployment_deleted(
self, name: str, app_name: str, timeout_s: int = 60
):
"""Waits for the named deployment to be shut down and deleted.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s:
curr_status_bytes = ray.get(
self._controller.get_deployment_status.remote(name)
)
if curr_status_bytes is None:
break
curr_status = DeploymentStatusInfo.from_proto(
DeploymentStatusInfoProto.FromString(curr_status_bytes)
)
logger.debug(
f"Waiting for {name} to be deleted, current status: {curr_status}."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(f"Deployment {name} wasn't deleted after {timeout_s}s.")
def _wait_for_deployment_created(
self, deployment_name: str, app_name: str, timeout_s: int = -1
):
"""Waits for the named deployment to be created.
A deployment being created simply means that its been registered
with the deployment state manager. The deployment state manager
will then continue to reconcile the deployment towards its
target state.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s or timeout_s < 0:
status_bytes = ray.get(
self._controller.get_deployment_status.remote(deployment_name, app_name)
)
if status_bytes is not None:
break
logger.debug(
f"Waiting for deployment '{deployment_name}' in application "
f"'{app_name}' to be created."
)
time.sleep(CLIENT_CHECK_CREATION_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Deployment '{deployment_name}' in application '{app_name}' "
f"did not become HEALTHY after {timeout_s}s."
)
def _wait_for_application_running(self, name: str, timeout_s: int = -1):
"""Waits for the named application to enter "RUNNING" status.
Raises:
RuntimeError: if the application enters the "DEPLOY_FAILED" status instead.
TimeoutError: if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s or timeout_s < 0:
status_bytes = ray.get(self._controller.get_serve_status.remote(name))
if status_bytes is None:
raise RuntimeError(
f"Waiting for application {name} to be RUNNING, "
"but application doesn't exist."
)
status = StatusOverview.from_proto(
StatusOverviewProto.FromString(status_bytes)
)
if status.app_status.status == ApplicationStatus.RUNNING:
break
elif status.app_status.status == ApplicationStatus.DEPLOY_FAILED:
raise RuntimeError(
f"Deploying application {name} failed: {status.app_status.message}"
)
logger.debug(
f"Waiting for {name} to be RUNNING, current status: "
f"{status.app_status.status}."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Application {name} did not become RUNNING after {timeout_s}s."
)
@_ensure_connected
def wait_for_proxies_serving(
self, wait_for_applications_running: bool = True
) -> None:
"""Wait for the proxies to be ready to serve requests."""
proxy_handles = ray.get(self._controller.get_proxies.remote())
serving_refs = [
handle.serving.remote(
wait_for_applications_running=wait_for_applications_running
)
for handle in proxy_handles.values()
]
done, pending = ray.wait(
serving_refs,
timeout=HTTP_PROXY_TIMEOUT,
num_returns=len(serving_refs),
)
if len(pending) > 0:
raise TimeoutError(f"Proxies not available after {HTTP_PROXY_TIMEOUT}s.")
# Ensure the proxies are either serving or dead.
for ref in done:
try:
ray.get(ref, timeout=1)
except ray.exceptions.RayActorError:
pass
except Exception:
raise TimeoutError(
f"Proxies not available after {HTTP_PROXY_TIMEOUT}s."
)
@_ensure_connected
def deploy_applications(
self,
built_apps: Sequence[BuiltApplication],
*,
wait_for_ingress_deployment_creation: bool = True,
wait_for_applications_running: bool = True,
) -> List[DeploymentHandle]:
name_to_deployment_args_list = {}
name_to_application_args = {}
for app in built_apps:
deployment_args_list = []
for deployment in app.deployments:
if deployment.logging_config is None and app.logging_config:
deployment = deployment.options(logging_config=app.logging_config)
is_ingress = deployment.name == app.ingress_deployment_name
deployment_args = get_deploy_args(
deployment.name,
ingress=is_ingress,
replica_config=deployment._replica_config,
deployment_config=deployment._deployment_config,
version=deployment._version or get_random_string(),
route_prefix=app.route_prefix if is_ingress else None,
)
deployment_args_proto = DeploymentArgs()
deployment_args_proto.deployment_name = deployment_args[
"deployment_name"
]
deployment_args_proto.deployment_config = deployment_args[
"deployment_config_proto_bytes"
]
deployment_args_proto.replica_config = deployment_args[
"replica_config_proto_bytes"
]
deployment_args_proto.deployer_job_id = deployment_args[
"deployer_job_id"
]
if deployment_args["route_prefix"]:
deployment_args_proto.route_prefix = deployment_args["route_prefix"]
deployment_args_proto.ingress = deployment_args["ingress"]
deployment_args_list.append(deployment_args_proto.SerializeToString())
application_args_proto = ApplicationArgs()
application_args_proto.external_scaler_enabled = app.external_scaler_enabled
name_to_deployment_args_list[app.name] = deployment_args_list
name_to_application_args[
app.name
] = application_args_proto.SerializeToString()
# Validate applications before sending to controller
self._check_ingress_deployments(built_apps)
ray.get(
self._controller.deploy_applications.remote(
name_to_deployment_args_list, name_to_application_args
)
)
handles = []
for app in built_apps:
# The deployment state is not guaranteed to be created after
# deploy_application returns; the application state manager will
# need another reconcile iteration to create it.
if wait_for_ingress_deployment_creation:
self._wait_for_deployment_created(app.ingress_deployment_name, app.name)
if wait_for_applications_running:
self._wait_for_application_running(app.name)
if app.route_prefix is not None:
url_part = " at " + self._root_url + app.route_prefix
else:
url_part = ""
logger.info(f"Application '{app.name}' is ready{url_part}.")
handles.append(
self.get_handle(
app.ingress_deployment_name, app.name, check_exists=False
)
)
return handles
@_ensure_connected
def deploy_apps(
self,
config: Union[ServeApplicationSchema, ServeDeploySchema],
_blocking: bool = False,
) -> None:
"""Starts a task on the controller that deploys application(s) from a config.
Args:
config: A single-application config (ServeApplicationSchema) or a
multi-application config (ServeDeploySchema)
_blocking: Whether to block until the application is running.
Raises:
RayTaskError: If the deploy task on the controller fails. This can be
because a single-app config was deployed after deploying a multi-app
config, or vice versa.
"""
ray.get(self._controller.apply_config.remote(config))
if _blocking:
timeout_s = 60
if isinstance(config, ServeDeploySchema):
app_names = {app.name for app in config.applications}
else:
app_names = {config.name}
start = time.time()
while time.time() - start < timeout_s:
statuses = self.list_serve_statuses()
app_to_status = {
status.name: status.app_status.status
for status in statuses
if status.name in app_names
}
if len(app_names) == len(app_to_status) and set(
app_to_status.values()
) == {ApplicationStatus.RUNNING}:
break
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Serve application isn't running after {timeout_s}s."
)
self.wait_for_proxies_serving(wait_for_applications_running=True)
def _check_ingress_deployments(
self, built_apps: Sequence[BuiltApplication]
) -> None:
"""Check @serve.ingress of deployments across applications.
Raises: RayServeException if more than one @serve.ingress
is found among deployments in any single application.
"""
for app in built_apps:
num_ingress_deployments = 0
for deployment in app.deployments:
if inspect.isclass(deployment.func_or_class) and issubclass(
deployment.func_or_class, ASGIAppReplicaWrapper
):
num_ingress_deployments += 1
if num_ingress_deployments > 1:
raise RayServeException(
f'Found multiple FastAPI deployments in application "{app.name}".'
"Please only include one deployment with @serve.ingress "
"in your application to avoid this issue."
)
@_ensure_connected
def delete_apps(self, names: List[str], blocking: bool = True):
if not names:
return
logger.info(f"Deleting app {names}")
self._controller.delete_apps.remote(names)
if blocking:
start = time.time()
while time.time() - start < 60:
curr_statuses_bytes = ray.get(
self._controller.get_serve_statuses.remote(names)
)
all_deleted = True
for cur_status_bytes in curr_statuses_bytes:
cur_status = StatusOverview.from_proto(
StatusOverviewProto.FromString(cur_status_bytes)
)
if cur_status.app_status.status != ApplicationStatus.NOT_STARTED:
all_deleted = False
if all_deleted:
return
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Some of these applications weren't deleted after 60s: {names}"
)
@_ensure_connected
def delete_all_apps(self, blocking: bool = True):
"""Delete all applications"""
all_apps = []
for status_bytes in ray.get(self._controller.list_serve_statuses.remote()):
proto = StatusOverviewProto.FromString(status_bytes)
status = StatusOverview.from_proto(proto)
all_apps.append(status.name)
self.delete_apps(all_apps, blocking)
@_ensure_connected
def get_deployment_info(
self, name: str, app_name: str
) -> Tuple[DeploymentInfo, str]:
deployment_route = DeploymentRoute.FromString(
ray.get(self._controller.get_deployment_info.remote(name, app_name))
)
return (
DeploymentInfo.from_proto(deployment_route.deployment_info),
deployment_route.route if deployment_route.route != "" else None,
)
@_ensure_connected
def get_serve_status(self, name: str = SERVE_DEFAULT_APP_NAME) -> StatusOverview:
proto = StatusOverviewProto.FromString(
ray.get(self._controller.get_serve_status.remote(name))
)
return StatusOverview.from_proto(proto)
@_ensure_connected
def list_serve_statuses(self) -> List[StatusOverview]:
statuses_bytes = ray.get(self._controller.list_serve_statuses.remote())
return [
StatusOverview.from_proto(StatusOverviewProto.FromString(status_bytes))
for status_bytes in statuses_bytes
]
@_ensure_connected
def get_all_deployment_statuses(self) -> List[DeploymentStatusInfo]:
statuses_bytes = ray.get(self._controller.get_all_deployment_statuses.remote())
return [
DeploymentStatusInfo.from_proto(
DeploymentStatusInfoProto.FromString(status_bytes)
)
for status_bytes in statuses_bytes
]
@_ensure_connected
def get_serve_details(self) -> Dict:
return ray.get(self._controller.get_serve_instance_details.remote())
@_ensure_connected
def get_handle(
self,
deployment_name: str,
app_name: Optional[str] = SERVE_DEFAULT_APP_NAME,
check_exists: bool = True,
) -> DeploymentHandle:
"""Construct a handle for the specified deployment.
Args:
deployment_name: Deployment name.
app_name: Application name.
check_exists: If False, then Serve won't check the deployment
is registered. True by default.
Returns:
DeploymentHandle
"""
deployment_id = DeploymentID(name=deployment_name, app_name=app_name)
cache_key = (deployment_name, app_name, check_exists)
if cache_key in self.handle_cache:
return self.handle_cache[cache_key]
if check_exists:
all_deployments = ray.get(self._controller.list_deployment_ids.remote())
if deployment_id not in all_deployments:
raise KeyError(f"{deployment_id} does not exist.")
handle = DeploymentHandle(deployment_name, app_name)
self.handle_cache[cache_key] = handle
if cache_key in self._evicted_handle_keys:
logger.warning(
"You just got a ServeHandle that was evicted from internal "
"cache. This means you are getting too many ServeHandles in "
"the same process, this will bring down Serve's performance. "
"Please post a github issue at "
"https://github.com/ray-project/ray/issues to let the Serve "
"team to find workaround for your use case."
)
if len(self.handle_cache) > MAX_CACHED_HANDLES:
# Perform random eviction to keep the handle cache from growing
# infinitely. We used use WeakValueDictionary but hit
# https://github.com/ray-project/ray/issues/18980.
evict_key = random.choice(list(self.handle_cache.keys()))
self._evicted_handle_keys.add(evict_key)
self.handle_cache.pop(evict_key)
return handle
@_ensure_connected
def record_request_routing_info(self, info: RequestRoutingInfo):
"""Record replica routing information for a replica.
Args:
info: RequestRoutingInfo including deployment name, replica tag,
multiplex model ids, and routing stats.
"""
self._controller.record_request_routing_info.remote(info)
@_ensure_connected
def update_global_logging_config(self, logging_config: LoggingConfig):
"""Reconfigure the logging config for the controller & proxies."""
self._controller.reconfigure_global_logging_config.remote(logging_config)
| ServeControllerClient |
python | pypa__warehouse | tests/unit/admin/test_services.py | {
"start": 1335,
"end": 5000
} | class ____:
def test_verify_service(self):
assert verifyClass(ISponsorLogoStorage, GCSSponsorLogoStorage)
def test_basic_init(self):
bucket = pretend.stub()
storage = GCSSponsorLogoStorage(bucket)
assert storage.bucket is bucket
def test_create_service(self):
service = pretend.stub(
get_bucket=pretend.call_recorder(lambda bucket_name: pretend.stub())
)
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: service),
registry=pretend.stub(settings={"sponsorlogos.bucket": "froblob"}),
)
GCSSponsorLogoStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="gcloud.gcs")]
assert service.get_bucket.calls == [pretend.call("froblob")]
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
blob = pretend.stub(
upload_from_filename=pretend.call_recorder(lambda file_path: None),
make_public=pretend.call_recorder(lambda: None),
public_url="http://files/sponsorlogos/thelogo.png",
content_type=None,
)
bucket = pretend.stub(blob=pretend.call_recorder(lambda path: blob))
storage = GCSSponsorLogoStorage(bucket)
result = storage.store("foo/bar.txt", filename)
assert result == "http://files/sponsorlogos/thelogo.png"
assert bucket.blob.calls == [pretend.call("foo/bar.txt")]
assert blob.make_public.calls == [pretend.call()]
assert blob.upload_from_filename.calls == [pretend.call(filename)]
def test_stores_file_with_prefix(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
blob = pretend.stub(
upload_from_filename=pretend.call_recorder(lambda file_path: None),
make_public=pretend.call_recorder(lambda: None),
public_url="http://files/sponsorlogos/thelogo.png",
content_type=pretend.call_recorder(lambda x: None),
)
bucket = pretend.stub(blob=pretend.call_recorder(lambda path: blob))
storage = GCSSponsorLogoStorage(bucket, prefix="sponsorlogos")
result = storage.store("foo/bar.txt", filename)
assert result == "http://files/sponsorlogos/thelogo.png"
assert bucket.blob.calls == [pretend.call("sponsorlogos/foo/bar.txt")]
assert blob.make_public.calls == [pretend.call()]
assert blob.upload_from_filename.calls == [pretend.call(filename)]
def test_stores_metadata(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
blob = pretend.stub(
upload_from_filename=pretend.call_recorder(lambda file_path: None),
make_public=pretend.call_recorder(lambda: None),
public_url="http://files/sponsorlogos/thelogo.png",
patch=pretend.call_recorder(lambda: None),
content_type=None,
)
bucket = pretend.stub(blob=pretend.call_recorder(lambda path: blob))
storage = GCSSponsorLogoStorage(bucket)
meta = {"foo": "bar"}
result = storage.store("foo/bar.txt", filename, "image/png", meta=meta)
assert result == "http://files/sponsorlogos/thelogo.png"
assert blob.make_public.calls == [pretend.call()]
assert blob.content_type == "image/png"
assert blob.metadata == meta
| TestGCSSponsorLogoStorage |
python | google__jax | tests/multiprocess/pmap_test.py | {
"start": 1099,
"end": 3502
} | class ____(jt_multiprocess.MultiProcessTest):
@jtu.ignore_warning(category=DeprecationWarning)
def testBasic(self):
elems_per_host = 4
devices = jax.local_devices()
x = [np.arange(i, i + elems_per_host) + jax.process_index() * elems_per_host
for i in range(len(devices))]
y = jax.device_put_sharded(x, devices)
f = jax.pmap(lambda x: lax.psum(x, "i"), axis_name="i")
out = f(y)
expected_out = np.array([
np.arange(i, i + elems_per_host) + p * elems_per_host # pylint: disable=g-complex-comprehension
for p in range(jax.process_count()) for i in range(len(devices))
])
self.assertIsInstance(out, array.ArrayImpl)
if jax.config.jax_pmap_shmap_merge:
self.assertIsInstance(out.sharding, jax.sharding.NamedSharding)
else:
self.assertIsInstance(out.sharding, jax.sharding.PmapSharding)
np.testing.assert_array_equal(
out, np.array([expected_out.sum(axis=0)] * len(devices)))
def testLocalPmap(self):
z = jax.pmap(
lambda x: lax.axis_index("i"),
axis_name="i",
devices=jax.local_devices(),
)(np.arange(jax.local_device_count()))
np.testing.assert_array_equal(z, np.arange(jax.local_device_count()))
@parameterized.named_parameters(
("sharded_dim_0", 0),
("sharded_dim_1", 1),
)
@jtu.ignore_warning(category=DeprecationWarning)
def test_default_pmap_sharding(self, sharded_dim):
if jax.config.jax_pmap_shmap_merge:
self.skipTest("Does not apply for pmap shard_map merge")
n = jax.local_device_count()
shape = (n, 1) if sharded_dim == 0 else (1, n)
ps = jax.sharding.PmapSharding.default(shape, sharded_dim)
inp = jnp.arange(np.prod(shape)).reshape(shape)
compiled = jax.pmap(lambda x: x, in_axes=sharded_dim).lower(inp).compile()
pmap_in_sharding, = compiled._executable.unsafe_call.in_handler.in_shardings
self.assertEqual(ps._device_assignment, pmap_in_sharding._device_assignment)
self.assertEqual(ps.sharding_spec, pmap_in_sharding.sharding_spec)
def test_global_axis_size_initial_style(self):
xs = jnp.ones(jax.local_device_count())
pmapped_f = jax.pmap(lambda x: jax.lax.all_gather(x, "i"), axis_name="i")
jaxpr = jax.make_jaxpr(pmapped_f)(xs)
jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, xs) # does not crash
if __name__ == "__main__":
jt_multiprocess.main()
| PmapTestMultiHost |
python | wandb__wandb | wandb/sdk/artifacts/_models/manifest.py | {
"start": 267,
"end": 965
} | class ____(ArtifactsBase, alias_generator=to_camel):
"""Data model for the v1 artifact manifest."""
version: Literal[1]
contents: Dict[str, ArtifactManifestEntry]
storage_policy: str
storage_policy_config: StoragePolicyConfig
@field_validator("contents", mode="before")
def _validate_entries(cls, v: Any) -> Any:
# The dict keys should be the `entry.path` values, but they've
# historically been dropped from the JSON objects. This restores
# them on instantiation.
# Pydantic will handle converting dicts -> ArtifactManifestEntries.
return {path: {**dict(entry), "path": path} for path, entry in v.items()}
| ArtifactManifestV1Data |
python | numba__numba | numba/tests/test_serialize.py | {
"start": 350,
"end": 7462
} | class ____(TestCase):
def run_with_protocols(self, meth, *args, **kwargs):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
meth(proto, *args, **kwargs)
@contextlib.contextmanager
def simulate_fresh_target(self):
hwstr = 'cpu'
dispatcher_cls = resolve_dispatcher_from_str(hwstr)
old_descr = dispatcher_cls.targetdescr
# Simulate fresh targetdescr
dispatcher_cls.targetdescr = type(dispatcher_cls.targetdescr)(hwstr)
try:
yield
finally:
# Be sure to reinstantiate old descriptor, otherwise other
# objects may be out of sync.
dispatcher_cls.targetdescr = old_descr
def check_call(self, proto, func, expected_result, args):
def check_result(func):
if (isinstance(expected_result, type)
and issubclass(expected_result, Exception)):
self.assertRaises(expected_result, func, *args)
else:
self.assertPreciseEqual(func(*args), expected_result)
# Control
check_result(func)
pickled = pickle.dumps(func, proto)
with self.simulate_fresh_target():
new_func = pickle.loads(pickled)
check_result(new_func)
def test_call_with_sig(self):
from .serialize_usecases import add_with_sig
self.run_with_protocols(self.check_call, add_with_sig, 5, (1, 4))
# Compilation has been disabled => float inputs will be coerced to int
self.run_with_protocols(self.check_call, add_with_sig, 5, (1.2, 4.2))
def test_call_without_sig(self):
from .serialize_usecases import add_without_sig
self.run_with_protocols(self.check_call, add_without_sig, 5, (1, 4))
self.run_with_protocols(self.check_call, add_without_sig, 5.5, (1.2, 4.3))
# Object mode is enabled
self.run_with_protocols(self.check_call, add_without_sig, "abc", ("a", "bc"))
def test_call_nopython(self):
from .serialize_usecases import add_nopython
self.run_with_protocols(self.check_call, add_nopython, 5.5, (1.2, 4.3))
# Object mode is disabled
self.run_with_protocols(self.check_call, add_nopython, TypingError, (object(), object()))
def test_call_nopython_fail(self):
from .serialize_usecases import add_nopython_fail
# Compilation fails
self.run_with_protocols(self.check_call, add_nopython_fail, TypingError, (1, 2))
def test_call_objmode_with_global(self):
from .serialize_usecases import get_global_objmode
self.run_with_protocols(self.check_call, get_global_objmode, 7.5, (2.5,))
def test_call_closure(self):
from .serialize_usecases import closure
inner = closure(1)
self.run_with_protocols(self.check_call, inner, 6, (2, 3))
def check_call_closure_with_globals(self, **jit_args):
from .serialize_usecases import closure_with_globals
inner = closure_with_globals(3.0, **jit_args)
self.run_with_protocols(self.check_call, inner, 7.0, (4.0,))
def test_call_closure_with_globals_nopython(self):
self.check_call_closure_with_globals(nopython=True)
def test_call_closure_with_globals_objmode(self):
self.check_call_closure_with_globals(forceobj=True)
def test_call_closure_calling_other_function(self):
from .serialize_usecases import closure_calling_other_function
inner = closure_calling_other_function(3.0)
self.run_with_protocols(self.check_call, inner, 11.0, (4.0, 6.0))
def test_call_closure_calling_other_closure(self):
from .serialize_usecases import closure_calling_other_closure
inner = closure_calling_other_closure(3.0)
self.run_with_protocols(self.check_call, inner, 8.0, (4.0,))
def test_call_dyn_func(self):
from .serialize_usecases import dyn_func
# Check serializing a dynamically-created function
self.run_with_protocols(self.check_call, dyn_func, 36, (6,))
def test_call_dyn_func_objmode(self):
from .serialize_usecases import dyn_func_objmode
# Same with an object mode function
self.run_with_protocols(self.check_call, dyn_func_objmode, 36, (6,))
def test_renamed_module(self):
from .serialize_usecases import get_renamed_module
# Issue #1559: using a renamed module (e.g. `import numpy as np`)
# should not fail serializing
expected = get_renamed_module(0.0)
self.run_with_protocols(self.check_call, get_renamed_module,
expected, (0.0,))
def test_other_process(self):
"""
Check that reconstructing doesn't depend on resources already
instantiated in the original process.
"""
from .serialize_usecases import closure_calling_other_closure
func = closure_calling_other_closure(3.0)
pickled = pickle.dumps(func)
code = """if 1:
import pickle
data = {pickled!r}
func = pickle.loads(data)
res = func(4.0)
assert res == 8.0, res
""".format(**locals())
subprocess.check_call([sys.executable, "-c", code])
def test_reuse(self):
"""
Check that deserializing the same function multiple times re-uses
the same dispatcher object.
Note that "same function" is intentionally under-specified.
"""
from .serialize_usecases import closure
func = closure(5)
pickled = pickle.dumps(func)
func2 = closure(6)
pickled2 = pickle.dumps(func2)
f = pickle.loads(pickled)
g = pickle.loads(pickled)
h = pickle.loads(pickled2)
self.assertIs(f, g)
self.assertEqual(f(2, 3), 10)
g.disable_compile()
self.assertEqual(g(2, 4), 11)
self.assertIsNot(f, h)
self.assertEqual(h(2, 3), 11)
# Now make sure the original object doesn't exist when deserializing
func = closure(7)
func(42, 43)
pickled = pickle.dumps(func)
del func
gc.collect()
f = pickle.loads(pickled)
g = pickle.loads(pickled)
self.assertIs(f, g)
self.assertEqual(f(2, 3), 12)
g.disable_compile()
self.assertEqual(g(2, 4), 13)
def test_imp_deprecation(self):
"""
The imp module was deprecated in v3.4 in favour of importlib
"""
code = """if 1:
import pickle
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
from numba import njit
@njit
def foo(x):
return x + 1
foo(1)
serialized_foo = pickle.dumps(foo)
for x in w:
if 'serialize.py' in x.filename:
assert "the imp module is deprecated" not in x.msg
"""
subprocess.check_call([sys.executable, "-c", code])
| TestDispatcherPickling |
python | scrapy__scrapy | scrapy/extensions/postprocessing.py | {
"start": 1222,
"end": 1944
} | class ____:
"""
Compresses received data using `bz2 <https://en.wikipedia.org/wiki/Bzip2>`_.
Accepted ``feed_options`` parameters:
- `bz2_compresslevel`
See :py:class:`bz2.BZ2File` for more info about parameters.
"""
def __init__(self, file: BinaryIO, feed_options: dict[str, Any]) -> None:
self.file = file
self.feed_options = feed_options
compress_level = self.feed_options.get("bz2_compresslevel", 9)
self.bz2file = BZ2File(
filename=self.file, mode="wb", compresslevel=compress_level
)
def write(self, data: bytes) -> int:
return self.bz2file.write(data)
def close(self) -> None:
self.bz2file.close()
| Bz2Plugin |
python | cherrypy__cherrypy | cherrypy/lib/sessions.py | {
"start": 4718,
"end": 14449
} | class ____(object):
"""A CherryPy dict-like Session object (one per request)."""
_id = None
id_observers = None
"A list of callbacks to which to pass new id's."
@property
def id(self):
"""Return the current session id."""
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
'Number of minutes after which to delete session data.'
locked = False
"""
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
"""If True, data has been retrieved from storage.
This should happen automatically on the first attempt to access
session data.
"""
clean_thread = None
'Class-level Monitor which calls self.clean_up.'
clean_freq = 5
'The poll rate for expired session cleanup in minutes.'
originalid = None
'The session id passed by the client. May be missing or unsafe.'
missing = False
'True if the session requested by the client did not exist.'
regenerated = False
"""True if the application called session.regenerate().
This is not set by internal calls to regenerate the session id.
"""
debug = False
'If True, log debug information.'
# --------------------- Session management methods --------------------- #
def __init__(self, id=None, **kwargs):
"""Initialize the session tool."""
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log(
'Expired or malicious session %r; '
'making a new one' % id,
'TOOLS.SESSIONS',
)
# Expired or malicious session. Make a new one.
# See https://github.com/cherrypy/cherrypy/issues/709.
self.id = None
self.missing = True
self._regenerate()
def now(self):
"""Generate the session specific concept of 'now'.
Other session providers can override this to use alternative,
possibly timezone aware, versions of 'now'.
"""
return datetime.datetime.now()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS',
)
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id, 'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
"""Clean up expired sessions."""
pass
def generate_id(self):
"""Return a new session id."""
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log(
'Saving session %r with expiry %s'
% (self.id, expiration_time),
'TOOLS.SESSIONS',
)
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).'
% self.id,
'TOOLS.SESSIONS',
)
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log(
'Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS',
)
self._data = {}
else:
if self.debug:
cherrypy.log(
'Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS',
)
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is an instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine,
self.clean_up,
self.clean_freq * 60,
name='Session cleanup',
)
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
"""Delete stored session data."""
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id, 'TOOLS.SESSIONS')
# -------------------- Application accessor methods -------------------- #
def __getitem__(self, key):
"""Retrieve a session-stored object."""
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
"""Store an object in the session."""
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
"""Delete object stored in the session."""
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise
KeyError is raised.
"""
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
"""Check if the session has an object by key."""
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
"""Retrieve a session-stored object.
D.get(k[,d]) -> D[k] if k in D, else d.
d defaults to None.
"""
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
"""Update multiple session-stored objects in one go.
D.update(E) -> None.
Update D from E: for k in E: D[k] = E[k].
"""
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""Set a default session key value.
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D.
"""
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
"""Clean up the session-stored data.
D.clear() -> None.
Remove all items from D.
"""
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
"""Return an iterable of session keys.
D.keys() -> list of D's keys.
"""
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
"""Return an iterable of items as tuples.
D.items() -> list of D's (key, value) pairs, as 2-tuples.
"""
if not self.loaded:
self.load()
return self._data.items()
def values(self):
"""Return an iterable of session objects.
D.values() -> list of D's values.
"""
if not self.loaded:
self.load()
return self._data.values()
| Session |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams7.py | {
"start": 487,
"end": 671
} | class ____[X: (int, str), Y](dict[Y, X]): ...
B1 = ClassB[int, int]
# This should generate an error because float doesn't conform
# to the constraint.
B2 = ClassB[float, float]
| ClassB |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 7955,
"end": 8463
} | class ____(BaseModel):
class Config:
extra = Extra.allow
type: Literal["CustomSchemaLoader"]
class_name: str = Field(
...,
description="Fully-qualified name of the class that will be implementing the custom schema loader. The format is `source_<name>.<package>.<class_name>`.",
examples=["source_railz.components.MyCustomSchemaLoader"],
title="Class Name",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| CustomSchemaLoader |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 255590,
"end": 255994
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "ip_allow_list_entry")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
ip_allow_list_entry = sgqlc.types.Field(
"IpAllowListEntry", graphql_name="ipAllowListEntry"
)
| CreateIpAllowListEntryPayload |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 73789,
"end": 78140
} | class ____(Request):
"""
Get a list of distinct values for the chosen hyperparameter
:param projects: Project IDs
:type projects: Sequence[str]
:param section: Hyperparameter section name
:type section: str
:param name: Hyperparameter name
:type name: str
:param allow_public: If set to 'true' then collect values from both company and
public tasks otherwise company tasks only. The default is 'true'
:type allow_public: bool
:param include_subprojects: If set to 'true' and the project field is set then
the result includes hyper parameters values from the subproject tasks
:type include_subprojects: bool
"""
_service = "projects"
_action = "get_hyperparam_values"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"allow_public": {
"description": "If set to 'true' then collect values from both company and public tasks otherwise company tasks only. The default is 'true'",
"type": "boolean",
},
"include_subprojects": {
"default": True,
"description": "If set to 'true' and the project field is set then the result includes hyper parameters values from the subproject tasks",
"type": "boolean",
},
"name": {"description": "Hyperparameter name", "type": "string"},
"projects": {
"description": "Project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"section": {"description": "Hyperparameter section name", "type": "string"},
},
"required": ["section", "name"],
"type": "object",
}
def __init__(
self,
section: str,
name: str,
projects: Optional[List[str]] = None,
allow_public: Optional[bool] = None,
include_subprojects: Optional[bool] = True,
**kwargs: Any
) -> None:
super(GetHyperparamValuesRequest, self).__init__(**kwargs)
self.projects = projects
self.section = section
self.name = name
self.allow_public = allow_public
self.include_subprojects = include_subprojects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("section")
def section(self) -> str:
return self._property_section
@section.setter
def section(self, value: str) -> None:
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("allow_public")
def allow_public(self) -> Optional[bool]:
return self._property_allow_public
@allow_public.setter
def allow_public(self, value: Optional[bool]) -> None:
if value is None:
self._property_allow_public = None
return
self.assert_isinstance(value, "allow_public", (bool,))
self._property_allow_public = value
@schema_property("include_subprojects")
def include_subprojects(self) -> Optional[bool]:
return self._property_include_subprojects
@include_subprojects.setter
def include_subprojects(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_subprojects = None
return
self.assert_isinstance(value, "include_subprojects", (bool,))
self._property_include_subprojects = value
| GetHyperparamValuesRequest |
python | huggingface__transformers | src/transformers/models/swin2sr/modeling_swin2sr.py | {
"start": 35858,
"end": 36479
} | class ____(nn.Module):
def __init__(self, config, num_features):
super().__init__()
self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1)
self.activation = nn.LeakyReLU(inplace=True)
self.upsample = Upsample(config.upscale, num_features)
self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1)
def forward(self, sequence_output):
x = self.conv_before_upsample(sequence_output)
x = self.activation(x)
x = self.upsample(x)
x = self.final_convolution(x)
return x
| PixelShuffleUpsampler |
python | getsentry__sentry | src/sentry/issues/auto_source_code_config/code_mapping.py | {
"start": 1080,
"end": 1970
} | class ____(NamedTuple):
repo: RepoAndBranch
stacktrace_root: str
source_path: str
SLASH = "/"
BACKSLASH = "\\" # This is the Python representation of a single backslash
def derive_code_mappings(
organization: Organization,
frame: Mapping[str, Any],
platform: str | None = None,
) -> list[dict[str, str]]:
installation = get_installation(organization)
if not isinstance(installation, RepoTreesIntegration):
return []
trees = installation.get_trees_for_org()
trees_helper = CodeMappingTreesHelper(trees)
try:
frame_filename = create_frame_info(frame, platform)
return trees_helper.get_file_and_repo_matches(frame_filename)
except NeedsExtension:
logger.warning("Needs extension: %s", frame.get("filename"))
return []
# call generate_code_mappings() after you initialize CodeMappingTreesHelper
| CodeMapping |
python | huggingface__transformers | src/transformers/models/nllb_moe/modeling_nllb_moe.py | {
"start": 45697,
"end": 50161
} | class ____(NllbMoePreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_tied_weights_keys = {
"lm_head.weight": "model.shared.weight",
}
def __init__(self, config: NllbMoeConfig):
super().__init__(config)
self.model = NllbMoeModel(config)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.num_experts = config.num_experts
self.router_z_loss_coef = config.router_z_loss_coef
self.router_aux_loss_coef = config.router_aux_loss_coef
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], Seq2SeqMoEOutput]:
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
lm_logits = self.lm_head(outputs[0])
loss = None
encoder_aux_loss = None
decoder_aux_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
# todo check in the config if router loss enables
if output_router_logits:
encoder_router_logits = outputs.encoder_router_logits
decoder_router_logits = outputs.decoder_router_logits
encoder_aux_loss = load_balancing_loss_func(
encoder_router_logits, self.num_experts, top_k=2, attention_mask=attention_mask
)
decoder_aux_loss = load_balancing_loss_func(
decoder_router_logits, self.num_experts, top_k=2, attention_mask=decoder_attention_mask
)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if output_router_logits and labels is not None:
aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
loss = loss + aux_loss
return Seq2SeqMoEOutput(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
cross_attentions=outputs.cross_attentions,
encoder_aux_loss=encoder_aux_loss,
decoder_aux_loss=decoder_aux_loss,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
decoder_hidden_states=outputs.decoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
decoder_attentions=outputs.decoder_attentions,
encoder_router_logits=outputs.encoder_router_logits,
decoder_router_logits=outputs.decoder_router_logits,
)
__all__ = [
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
| NllbMoeForConditionalGeneration |
python | fluentpython__example-code | 19-dyn-attr-prop/oscon/schedule2.py | {
"start": 2111,
"end": 3591
} | class ____(DbRecord): # <1>
@property
def venue(self):
key = 'venue.{}'.format(self.venue_serial)
return self.__class__.fetch(key) # <2>
@property
def speakers(self):
if not hasattr(self, '_speaker_objs'): # <3>
spkr_serials = self.__dict__['speakers'] # <4>
fetch = self.__class__.fetch # <5>
self._speaker_objs = [fetch('speaker.{}'.format(key))
for key in spkr_serials] # <6>
return self._speaker_objs # <7>
def __repr__(self):
if hasattr(self, 'name'): # <8>
cls_name = self.__class__.__name__
return '<{} {!r}>'.format(cls_name, self.name)
else:
return super().__repr__() # <9>
# END SCHEDULE2_EVENT
# BEGIN SCHEDULE2_LOAD
def load_db(db):
raw_data = osconfeed.load()
warnings.warn('loading ' + DB_NAME)
for collection, rec_list in raw_data['Schedule'].items():
record_type = collection[:-1] # <1>
cls_name = record_type.capitalize() # <2>
cls = globals().get(cls_name, DbRecord) # <3>
if inspect.isclass(cls) and issubclass(cls, DbRecord): # <4>
factory = cls # <5>
else:
factory = DbRecord # <6>
for record in rec_list: # <7>
key = '{}.{}'.format(record_type, record['serial'])
record['serial'] = key
db[key] = factory(**record) # <8>
# END SCHEDULE2_LOAD
| Event |
python | huggingface__transformers | src/transformers/models/longcat_flash/modeling_longcat_flash.py | {
"start": 25118,
"end": 28611
} | class ____(LongcatFlashPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)]
)
self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = LongcatFlashRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Each layer above has 2 sublayers, config hack to have a correct cache (to avoid a checkpoint change)
self.head_dim = config.head_dim # For CI happiness (we didn't convert so head_dim is not directly used)
self.config.num_hidden_layers = 2 * config.num_layers
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=None,
attentions=None,
)
@auto_docstring
| LongcatFlashModel |
python | openai__openai-python | tests/test_utils/test_typing.py | {
"start": 234,
"end": 280
} | class ____(BaseGeneric[_T]): ...
| SubclassGeneric |
python | tensorflow__tensorflow | tensorflow/python/saved_model/metrics_test.py | {
"start": 1607,
"end": 7222
} | class ____(test.TestCase):
def _create_save_v2_model(self):
root = autotrackable.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
return save_dir
def _create_save_v1_model(self):
save_dir = os.path.join(self.get_temp_dir(), "builder")
builder_ = builder.SavedModelBuilder(save_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
constant_op.constant(5.0)
builder_.add_meta_graph_and_variables(sess, ["foo"])
builder_.save()
return save_dir
def test_python_save(self):
write_count = metrics.GetWrite(write_version="2")
save_api_count = metrics.GetWriteApi(save._SAVE_V2_LABEL)
_ = self._create_save_v2_model()
self.assertEqual(
metrics.GetWriteApi(save._SAVE_V2_LABEL), save_api_count + 1)
self.assertEqual(metrics.GetWrite(write_version="2"), write_count + 1)
def test_builder_save(self):
write_count = metrics.GetWrite(write_version="1")
save_builder_count = metrics.GetWriteApi(builder_impl._SAVE_BUILDER_LABEL)
_ = self._create_save_v1_model()
self.assertEqual(
metrics.GetWriteApi(builder_impl._SAVE_BUILDER_LABEL),
save_builder_count + 1)
self.assertEqual(metrics.GetWrite(write_version="1"), write_count + 1)
def test_load_v2(self):
save_dir = self._create_save_v2_model()
read_count = metrics.GetRead(write_version="2")
load_v2_count = metrics.GetReadApi(load._LOAD_V2_LABEL)
load.load(save_dir)
self.assertEqual(metrics.GetReadApi(load._LOAD_V2_LABEL), load_v2_count + 1)
self.assertEqual(metrics.GetRead(write_version="2"), read_count + 1)
def test_load_v1_in_v2(self):
save_dir = self._create_save_v1_model()
read_v1_count = metrics.GetRead(write_version="1")
read_v2_count = metrics.GetRead(write_version="2")
load_v2_count = metrics.GetReadApi(load._LOAD_V2_LABEL)
load_v1_v2_count = metrics.GetReadApi(load_v1_in_v2._LOAD_V1_V2_LABEL)
load.load(save_dir)
# Check that `load_v2` was *not* incremented.
self.assertEqual(metrics.GetReadApi(load._LOAD_V2_LABEL), load_v2_count)
self.assertEqual(metrics.GetRead(write_version="2"), read_v2_count)
self.assertEqual(
metrics.GetReadApi(load_v1_in_v2._LOAD_V1_V2_LABEL),
load_v1_v2_count + 1)
self.assertEqual(metrics.GetRead(write_version="1"), read_v1_count + 1)
def test_loader_v1(self):
ops.disable_eager_execution()
save_dir = self._create_save_v1_model()
read_count = metrics.GetRead(write_version="1")
loader = loader_impl.SavedModelLoader(save_dir)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"])
ops.enable_eager_execution()
self.assertEqual(metrics.GetReadApi(loader_impl._LOADER_LABEL), 1)
self.assertEqual(metrics.GetRead(write_version="1"), read_count + 1)
def test_save_sets_write_fingerprint_metric(self):
exported_dir = self._create_save_v2_model()
fingerprint = fingerprinting.read_fingerprint(exported_dir)
fingerprint_metric = fingerprinting.Fingerprint.from_proto(
json_format.Parse(metrics.GetWriteFingerprint(),
fingerprinting.fingerprint_pb2.FingerprintDef()))
self.assertEqual(fingerprint, fingerprint_metric)
def test_load_sets_read_fingerprint_metric(self):
exported_dir = self._create_save_v2_model()
load.load(exported_dir)
fingerprint = fingerprinting.read_fingerprint(exported_dir)
fingerprint_metric = fingerprinting.Fingerprint.from_proto(
json_format.Parse(metrics.GetReadFingerprint(),
fingerprinting.fingerprint_pb2.FingerprintDef()))
self.assertEqual(fingerprint, fingerprint_metric)
def test_save_sets_write_path_metric(self):
exported_dir = self._create_save_v2_model()
self.assertEqual(metrics.GetWritePath(), exported_dir)
def test_load_sets_read_path_metric(self):
exported_dir = self._create_save_v2_model()
load.load(exported_dir)
self.assertEqual(metrics.GetReadPath(), exported_dir)
def test_save_sets_write_path_and_singleprint_metric(self):
exported_dir = self._create_save_v2_model()
singleprint = fingerprinting.read_fingerprint(exported_dir).singleprint()
path_and_singleprint_metric = metrics.GetWritePathAndSingleprint()
self.assertEqual(path_and_singleprint_metric, (exported_dir, singleprint))
def test_save_sets_read_path_and_singleprint_metric(self):
exported_dir = self._create_save_v2_model()
load.load(exported_dir)
singleprint = fingerprinting.read_fingerprint(exported_dir).singleprint()
path_and_singleprint_metric = metrics.GetReadPathAndSingleprint()
self.assertEqual(path_and_singleprint_metric, (exported_dir, singleprint))
def test_save_sets_sharding_callback_duration_metric(self):
self._create_save_v2_model()
sharding_callback_duration_metric = metrics.GetShardingCallbackDuration()
self.assertGreater(sharding_callback_duration_metric, 0)
def test_save_sets_num_checkpoint_shards_written_metric(self):
self._create_save_v2_model()
num_shards_written_metric = metrics.GetNumCheckpointShardsWritten()
self.assertGreater(num_shards_written_metric, 0)
def test_save_sets_sharding_callback_description_metric(self):
self._create_save_v2_model()
callback_description_metric = metrics.GetShardingCallbackDescription()
self.assertEqual(callback_description_metric,
sharding_policies.ShardByTaskPolicy().description)
if __name__ == "__main__":
test.main()
| MetricsTests |
python | squidfunk__mkdocs-material | material/plugins/tags/renderer/__init__.py | {
"start": 1513,
"end": 3570
} | class ____:
"""
A renderer for tags and listings.
This class implements a simple tag and listing renderer, leveraging the
Jinja environment and the MkDocs configuration as provided to plugins.
Note that the templates must be stored in the `fragments` and not `partials`
directory, because in order to render tags and listings, we must wait for
all pages to be read and processed, as we first need to collect all tags
before we can render listings. Tags induce a graph, not a tree.
For this reason, we consider the templates to be fragments, as they are
not implicitly rendered by MkDocs, but explicitly by the plugin.
"""
def __init__(self, env: Environment, config: MkDocsConfig):
"""
Initialize renderer.
Arguments:
env: The Jinja environment.
config: The MkDocs configuration.
"""
self.env = env
self.config = config
# -------------------------------------------------------------------------
env: Environment
"""
The Jinja environment.
"""
config: MkDocsConfig
"""
The MkDocs configuration.
"""
# -------------------------------------------------------------------------
def render(self, page: Page, name: str, **kwargs) -> str:
"""
Render a template.
Templates are resolved from `fragments/tags`, so if you want to override
templates or provide additional ones place them in this directory.
Arguments:
page: The page.
name: The name of the template.
kwargs: The template variables.
Returns:
The rendered template.
"""
path = posixpath.join("fragments", "tags", name)
path = posixpath.normpath(path)
# Resolve and render template
template = self.env.get_template(path)
return template.render(
config = self.config, page = page,
base_url = get_relative_url(".", page.url),
**kwargs
)
| Renderer |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 21355,
"end": 21622
} | class ____(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
| InetTestBase |
python | pytorch__pytorch | test/distributed/flight_recorder/test_fr_analysis.py | {
"start": 7060,
"end": 14484
} | class ____(TestCase):
def testBuildDB(self):
config = JobConfig()
args = config.parse_args([])
version = "2.8" # Same as the version in FlightRecorder.hpp
LOADED_FR_DETAIL_TEMPLATE["dump_file_rank_0"]["version"] = version
LOADED_FR_DETAIL_TEMPLATE["dump_file_rank_1"]["version"] = version
# Test case 1: matched all_reduce case.
details1 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
details1["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "all_reduce", [[4, 4]], [[4, 4]])
)
details1["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "all_reduce", [[4, 4]], [[4, 4]])
)
details1["dump_file_rank_0"]["entries"].append(
create_one_entry(
1, "all_reduce", [[5, 5]], [[5, 5]], pg_info=("1", "sub_pg")
)
)
details1["dump_file_rank_1"]["entries"].append(
create_one_entry(
1, "all_reduce", [[5, 5]], [[5, 5]], pg_info=("1", "sub_pg")
)
)
db = build_db(details1, args, version)
self.assertEqual(len(db.collectives), 3)
self.assertEqual(db.collectives[0].record_id, 0)
self.assertEqual(db.collectives[0].collective_name, "nccl:all_reduce")
self.assertEqual(db.collectives[0].pass_check, True)
self.assertEqual(db.collectives[1].record_id, 1)
self.assertEqual(db.collectives[1].collective_name, "nccl:all_reduce")
self.assertEqual(db.collectives[1].pass_check, True)
self.assertEqual(db.collectives[2].pass_check, True)
# Test case 2: matched allreduce_coalesced case.
details2 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
details2["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "allreduce_coalesced", [[4, 4]], [[4, 4]])
)
details2["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "allreduce_coalesced", [[4, 4]], [[4, 4]])
)
db = build_db(details2, args, version)
self.assertEqual(len(db.collectives), 1)
self.assertEqual(db.collectives[0].record_id, 0)
self.assertEqual(db.collectives[0].collective_name, "nccl:allreduce_coalesced")
self.assertEqual(db.collectives[0].pass_check, True)
# Test case 3: matched slow path, two broadcast coalesce case.
details3 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
# sequence ID should not increase for coalesced collectives
details3["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "broadcast", [[4, 4]], [[4, 4]])
)
details3["dump_file_rank_0"]["entries"].append(
create_one_entry(1, "broadcast", [[4, 4]], [[4, 4]])
)
details3["dump_file_rank_0"]["entries"].append(
create_one_entry(2, "coalesced", [[]], [[]])
)
details3["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "broadcast", [[4, 4]], [[4, 4]])
)
details3["dump_file_rank_1"]["entries"].append(
create_one_entry(1, "broadcast", [[4, 4]], [[4, 4]])
)
details3["dump_file_rank_1"]["entries"].append(
create_one_entry(2, "coalesced", [[]], [[]])
)
db = build_db(details3, args, version)
self.assertEqual(len(db.collectives), 1)
self.assertEqual(db.collectives[0].record_id, 2)
self.assertEqual(db.collectives[0].collective_name, "nccl:coalesced")
self.assertEqual(db.collectives[0].pass_check, True)
# Test case 4: mis-matched uneven all-gather case.
details4 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
# sequence ID should not increase for coalesced collectives
details4["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "_broadcast_oop", [[4, 4]], [[4, 4]])
)
details4["dump_file_rank_0"]["entries"].append(
create_one_entry(1, "_broadcast_oop", [[5, 5]], [[5, 5]])
)
details4["dump_file_rank_0"]["entries"].append(
create_one_entry(2, "ALLGATHER_coalesced", [[]], [[]])
)
details4["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "_broadcast_oop", [[4, 4]], [[4, 4]])
)
details4["dump_file_rank_1"]["entries"].append(
create_one_entry(1, "_broadcast_oop", [[4, 4]], [[4, 4]])
)
details4["dump_file_rank_1"]["entries"].append(
create_one_entry(2, "ALLGATHER_coalesced", [[]], [[]])
)
db = build_db(details4, args, version)
self.assertEqual(len(db.collectives), 1)
self.assertEqual(db.collectives[0].record_id, 1)
self.assertEqual(db.collectives[0].collective_name, "nccl:_broadcast_oop")
self.assertEqual(db.collectives[0].pass_check, False)
# Test case 5: matched uneven reduce scatter case.
details5 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
# sequence ID should not increase for coalesced collectives
details5["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details5["dump_file_rank_0"]["entries"].append(
create_one_entry(1, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details5["dump_file_rank_0"]["entries"].append(
create_one_entry(2, "REDUCE_SCATTER_coalesced", [[]], [[]])
)
details5["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details5["dump_file_rank_1"]["entries"].append(
create_one_entry(1, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details5["dump_file_rank_1"]["entries"].append(
create_one_entry(2, "REDUCE_SCATTER_coalesced", [[]], [[]])
)
db = build_db(details5, args, version)
self.assertEqual(len(db.collectives), 1)
self.assertEqual(db.collectives[0].record_id, 2)
self.assertEqual(
db.collectives[0].collective_name, "nccl:REDUCE_SCATTER_coalesced"
)
self.assertEqual(db.collectives[0].pass_check, True)
# Test case 6: empty coalesced call on rank 0 case.
details6 = copy.deepcopy(LOADED_FR_DETAIL_TEMPLATE)
# sequence ID should not increase for coalesced collectives
details6["dump_file_rank_0"]["entries"].append(
create_one_entry(0, "all_reduce", [[4, 4]], [[4, 4]])
)
details6["dump_file_rank_1"]["entries"].append(
create_one_entry(0, "all_reduce", [[4, 4]], [[4, 4]])
)
details6["dump_file_rank_1"]["entries"].append(
create_one_entry(1, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details6["dump_file_rank_1"]["entries"].append(
create_one_entry(2, "_reduce_oop", [[4, 4]], [[4, 4]])
)
details6["dump_file_rank_1"]["entries"].append(
create_one_entry(3, "REDUCE_SCATTER_coalesced", [[]], [[]])
)
db = build_db(details6, args, version)
self.assertEqual(len(db.collectives), 2)
self.assertEqual(db.collectives[1].collective_name, "nccl:_reduce_oop")
self.assertEqual(db.collectives[1].record_id, 1)
self.assertEqual(db.collectives[1].pass_check, True)
if __name__ == "__main__":
run_tests()
| FlightRecorderE2ETest |
python | huggingface__transformers | tests/quantization/bitnet_integration/test_bitnet.py | {
"start": 1646,
"end": 6885
} | class ____(unittest.TestCase):
model_name = "HF1BitLLM/Llama3-8B-1.58-100B-tokens"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Load the model
"""
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(cls.model_name, device_map=torch_device)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_replace_with_bitlinear(self):
from transformers.integrations import BitLinear, replace_with_bitnet_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id)
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_bitnet_linear(model)
nb_bitnet_linear = 0
for module in model.modules():
if isinstance(module, BitLinear):
nb_bitnet_linear += 1
self.assertEqual(nb_linears - 1, nb_bitnet_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_text = "What are we having for dinner?"
expected_output = "What are we having for dinner? What are we going to do for fun this weekend?"
input_ids = self.tokenizer(input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=11, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), expected_output)
def test_packing_unpacking(self):
"""
Simple test the packing and unpacking logic
"""
from transformers.integrations import pack_weights, unpack_weights
u = torch.randint(0, 255, (256, 256), dtype=torch.uint8)
unpacked_u = unpack_weights(u, dtype=torch.bfloat16)
repacked_u = pack_weights(unpacked_u)
for i in range(u.shape[0]):
for j in range(u.shape[1]):
self.assertEqual(repacked_u[i][j], u[i][j])
def test_activation_quant(self):
"""
test the activation function behaviour
"""
from transformers.integrations import BitLinear
layer = BitLinear(in_features=4, out_features=2, bias=False, dtype=torch.float32)
layer.to(torch_device)
input_tensor = torch.tensor([1.0, -1.0, -1.0, 1.0], dtype=torch.float32).to(torch_device)
# Quantize the input tensor
quantized_tensor, scale = layer.activation_quant(input_tensor)
# Verify the output quantized tensor
for i in range(input_tensor.shape[0]):
self.assertEqual(quantized_tensor[i] / scale, input_tensor[i])
# Verify the scale tensor
self.assertEqual(scale, 127)
def test_weights_dtype(self):
"""
test the weights dtype after loading
"""
self_attn_q = self.quantized_model.model.layers[0].self_attn.q_proj.weight
self_attn_k = self.quantized_model.model.layers[0].self_attn.k_proj.weight
self_attn_v = self.quantized_model.model.layers[0].self_attn.v_proj.weight
self_attn_o = self.quantized_model.model.layers[0].self_attn.o_proj.weight
mlp_gate = self.quantized_model.model.layers[0].mlp.gate_proj.weight
mlp_up = self.quantized_model.model.layers[0].mlp.up_proj.weight
mlp_down = self.quantized_model.model.layers[0].mlp.down_proj.weight
self.assertEqual(self_attn_q.dtype, torch.uint8)
self.assertEqual(self_attn_k.dtype, torch.uint8)
self.assertEqual(self_attn_v.dtype, torch.uint8)
self.assertEqual(self_attn_o.dtype, torch.uint8)
self.assertEqual(mlp_up.dtype, torch.uint8)
self.assertEqual(mlp_gate.dtype, torch.uint8)
self.assertEqual(mlp_down.dtype, torch.uint8)
def test_replace_with_bitlinear_shape(self):
"""
test that the BitNet layer weight shapes are correct, and the weight_scale is correctly initialized to 1
"""
from transformers.integrations import replace_with_bitnet_linear
out_features = 1024
in_features = 512
class SimpleLinearModule(torch.nn.Module):
"""
Simple class to test BitLinear
"""
def __init__(
self,
in_features: int = in_features,
out_features: int = out_features,
bias: bool = False,
):
super().__init__()
self.linear = torch.nn.Linear(in_features=in_features, out_features=out_features, bias=bias)
def forward(self, x):
return self.linear(x)
model = SimpleLinearModule()
replace_with_bitnet_linear(model)
self.assertEqual(list(model.linear.weight.shape), [out_features // 4, in_features])
self.assertEqual(model.linear.weight_scale, 1)
@slow
@require_torch_accelerator
@require_accelerate
| BitNetTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_merge_range03.py | {
"start": 315,
"end": 1018
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("merge_range03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"align": "center"})
worksheet.merge_range(1, 1, 1, 2, "Foo", cell_format)
worksheet.merge_range(1, 3, 1, 4, "Foo", cell_format)
worksheet.merge_range(1, 5, 1, 6, "Foo", cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_threading.py | {
"start": 474,
"end": 1764
} | class ____:
def run_threaded(
self,
func,
*thread_args,
nthreads=NUM_THREADS,
use_barrier=False,
**thread_kwargs,
):
barrier = threading.Barrier(nthreads)
results = []
errors = []
def thread_func(*args, **kwargs):
thread_name = threading.current_thread().name
if use_barrier:
barrier.wait()
local_result = []
try:
func(local_result, thread_name, *args, **kwargs)
results.append(tuple(local_result))
except Exception as e:
# raise
errors.append((thread_name, repr(e)))
threads = [
threading.Thread(
name=f"thread-{i}",
target=thread_func,
args=thread_args,
kwargs=thread_kwargs,
)
for i in range(nthreads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return results, errors
@testing.fixture
def num_threads_engine(self, testing_engine):
return testing_engine(options=dict(pool_size=NUM_THREADS))
@testing.add_to_marker.timing_intensive
| _ThreadTest |
python | django__django | tests/model_fields/test_genericipaddressfield.py | {
"start": 153,
"end": 1474
} | class ____(TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
GenericIPAddressField with a specified protocol does not generate a
formfield without a protocol.
"""
model_field = models.GenericIPAddressField(protocol="IPv4")
form_field = model_field.formfield()
with self.assertRaises(ValidationError):
form_field.clean("::1")
model_field = models.GenericIPAddressField(protocol="IPv6")
form_field = model_field.formfield()
with self.assertRaises(ValidationError):
form_field.clean("127.0.0.1")
def test_null_value(self):
"""
Null values should be resolved to None.
"""
GenericIPAddress.objects.create()
o = GenericIPAddress.objects.get()
self.assertIsNone(o.ip)
def test_blank_string_saved_as_null(self):
o = GenericIPAddress.objects.create(ip="")
o.refresh_from_db()
self.assertIsNone(o.ip)
GenericIPAddress.objects.update(ip="")
o.refresh_from_db()
self.assertIsNone(o.ip)
def test_save_load(self):
instance = GenericIPAddress.objects.create(ip="::1")
loaded = GenericIPAddress.objects.get()
self.assertEqual(loaded.ip, instance.ip)
| GenericIPAddressFieldTests |
python | getsentry__sentry | src/sentry/core/endpoints/project_environment_details.py | {
"start": 986,
"end": 3664
} | class ____(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve a Project Environment",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
EnvironmentParams.ENVIRONMENT,
],
responses={
200: EnvironmentProjectSerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=EnvironmentExamples.RETRIEVE_PROJECT_ENVIRONMENT,
)
def get(self, request: Request, project, environment) -> Response:
"""
Return details on a project environment.
"""
try:
instance = EnvironmentProject.objects.select_related("environment").get(
project=project,
environment__name=Environment.get_name_from_path_segment(environment),
)
except EnvironmentProject.DoesNotExist:
raise ResourceDoesNotExist
return Response(serialize(instance, request.user))
@extend_schema(
operation_id="Update a Project Environment",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
EnvironmentParams.ENVIRONMENT,
],
request=EnvironmentSerializer,
responses={
200: EnvironmentProjectSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=EnvironmentExamples.RETRIEVE_PROJECT_ENVIRONMENT,
)
def put(self, request: Request, project, environment) -> Response:
"""
Update the visibility for a project environment.
"""
try:
instance = EnvironmentProject.objects.select_related("environment").get(
project=project,
environment__name=Environment.get_name_from_path_segment(environment),
)
except EnvironmentProject.DoesNotExist:
raise ResourceDoesNotExist
serializer = EnvironmentSerializer(data=request.data, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data
fields = {}
if "isHidden" in data:
fields["is_hidden"] = data["isHidden"]
if fields:
instance.update(**fields)
return Response(serialize(instance, request.user))
| ProjectEnvironmentDetailsEndpoint |
python | tornadoweb__tornado | tornado/iostream.py | {
"start": 51549,
"end": 61792
} | class ____(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.SSLContext(...).wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
socket = None # type: ssl.SSLSocket
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The ``ssl_options`` keyword argument may either be an
`ssl.SSLContext` object or a dictionary of keywords arguments
for `ssl.SSLContext.wrap_socket`
"""
self._ssl_options = kwargs.pop("ssl_options", _client_ssl_defaults)
super().__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._server_hostname = None # type: Optional[str]
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except OSError:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self) -> bool:
return self._handshake_reading or super().reading()
def writing(self) -> bool:
return self._handshake_writing or super().writing()
def _do_ssl_handshake(self) -> None:
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=err)
elif err.args[0] in (ssl.SSL_ERROR_SSL, ssl.SSL_ERROR_SYSCALL):
try:
peer = self.socket.getpeername()
except Exception:
peer = "(not connected)"
gen_log.warning(
"SSL Error on %s %s: %s", self.socket.fileno(), peer, err
)
return self.close(exc_info=err)
raise
except OSError as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF and ENOTCONN, so make
# those errors quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
# Errno 0 is also possible in some cases (nc -z).
# https://github.com/tornadoweb/tornado/issues/2504
if self._is_connreset(err) or err.args[0] in (
0,
errno.EBADF,
errno.ENOTCONN,
):
return self.close(exc_info=err)
raise
except AttributeError as err:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=err)
else:
self._ssl_accepting = False
# Prior to the introduction of SNI, this is where we would check
# the server's claimed hostname.
assert ssl.HAS_SNI
self._finish_ssl_connect()
def _finish_ssl_connect(self) -> None:
if self._ssl_connect_future is not None:
future = self._ssl_connect_future
self._ssl_connect_future = None
future_set_result_unless_cancelled(future, self)
def _handle_read(self) -> None:
if self._ssl_accepting:
self._do_ssl_handshake()
return
super()._handle_read()
def _handle_write(self) -> None:
if self._ssl_accepting:
self._do_ssl_handshake()
return
super()._handle_write()
def connect(
self, address: Tuple, server_hostname: Optional[str] = None
) -> "Future[SSLIOStream]":
self._server_hostname = server_hostname
# Ignore the result of connect(). If it fails,
# wait_for_handshake will raise an error too. This is
# necessary for the old semantics of the connect callback
# (which takes no arguments). In 6.0 this can be refactored to
# be a regular coroutine.
# TODO: This is trickier than it looks, since if write()
# is called with a connect() pending, we want the connect
# to resolve before the write. Or do we care about this?
# (There's a test for it, but I think in practice users
# either wait for the connect before performing a write or
# they don't care about the connect Future at all)
fut = super().connect(address)
fut.add_done_callback(lambda f: f.exception())
return self.wait_for_handshake()
def _handle_connect(self) -> None:
# Call the superclass method to check for errors.
super()._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
assert old_state is not None
self._state = None
self.socket = ssl_wrap_socket(
self.socket,
self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False,
server_side=False,
)
self._add_io_state(old_state)
def wait_for_handshake(self) -> "Future[SSLIOStream]":
"""Wait for the initial SSL handshake to complete.
If a ``callback`` is given, it will be called with no
arguments once the handshake is complete; otherwise this
method returns a `.Future` which will resolve to the
stream itself after the handshake is complete.
Once the handshake is complete, information such as
the peer's certificate and NPN/ALPN selections may be
accessed on ``self.socket``.
This method is intended for use on server-side streams
or after using `IOStream.start_tls`; it should not be used
with `IOStream.connect` (which already waits for the
handshake to complete). It may only be called once per stream.
.. versionadded:: 4.2
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
"""
if self._ssl_connect_future is not None:
raise RuntimeError("Already waiting")
future = self._ssl_connect_future = Future()
if not self._ssl_accepting:
self._finish_ssl_connect()
return future
def write_to_fd(self, data: memoryview) -> int:
# clip buffer size at 1GB since SSL sockets only support upto 2GB
# this change in behaviour is transparent, since the function is
# already expected to (possibly) write less than the provided buffer
if len(data) >> 30:
data = memoryview(data)[: 1 << 30]
try:
return self.socket.send(data) # type: ignore
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
# the socket is not writeable; we need to transform this into
# an EWOULDBLOCK socket.error or a zero return value,
# either of which will be recognized by the caller of this
# method. Prior to Python 3.5, an unwriteable socket would
# simply return 0 bytes written.
return 0
raise
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def read_from_fd(self, buf: Union[bytearray, memoryview]) -> Optional[int]:
try:
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
# clip buffer size at 1GB since SSL sockets only support upto 2GB
# this change in behaviour is transparent, since the function is
# already expected to (possibly) read less than the provided buffer
if len(buf) >> 30:
buf = memoryview(buf)[: 1 << 30]
try:
return self.socket.recv_into(buf, len(buf))
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except BlockingIOError:
return None
finally:
del buf
def _is_connreset(self, e: BaseException) -> bool:
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
return True
return super()._is_connreset(e)
| SSLIOStream |
python | doocs__leetcode | solution/0900-0999/0956.Tallest Billboard/Solution.py | {
"start": 0,
"end": 389
} | class ____:
def tallestBillboard(self, rods: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= len(rods):
return 0 if j == 0 else -inf
ans = max(dfs(i + 1, j), dfs(i + 1, j + rods[i]))
ans = max(ans, dfs(i + 1, abs(rods[i] - j)) + min(j, rods[i]))
return ans
return dfs(0, 0)
| Solution |
python | mlflow__mlflow | mlflow/store/model_registry/dbmodels/models.py | {
"start": 7522,
"end": 9027
} | class ____(Base):
__tablename__ = "webhooks"
webhook_id = Column(String(256), nullable=False)
name = Column(String(256), nullable=False)
description = Column(String(1000), nullable=True)
url = Column(String(500), nullable=False)
status = Column(String(20), nullable=False, default="ACTIVE")
secret = Column(EncryptedString(), nullable=True) # Encrypted storage for HMAC secret
creation_timestamp = Column(BigInteger, default=get_current_time_millis)
last_updated_timestamp = Column(BigInteger, nullable=True, default=None)
deleted_timestamp = Column(BigInteger, nullable=True, default=None) # For soft deletes
__table_args__ = (
PrimaryKeyConstraint("webhook_id", name="webhook_pk"),
Index("idx_webhooks_status", "status"),
Index("idx_webhooks_name", "name"),
)
def __repr__(self):
return (
f"<SqlWebhook ({self.webhook_id}, {self.name}, {self.url}, "
f"{self.status}, {self.creation_timestamp})>"
)
def to_mlflow_entity(self):
return Webhook(
webhook_id=self.webhook_id,
name=self.name,
url=self.url,
events=[we.to_mlflow_entity() for we in self.webhook_events],
creation_timestamp=self.creation_timestamp,
last_updated_timestamp=self.last_updated_timestamp,
description=self.description,
status=WebhookStatus(self.status),
secret=self.secret,
)
| SqlWebhook |
python | bokeh__bokeh | tests/unit/bokeh/document/test_models.py | {
"start": 1465,
"end": 9902
} | class ____:
def test_basic(self) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
assert len(dm) == 0
# module manager should only hold a weak ref
assert len(gc.get_referrers(d)) == 0
def test_len(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
r1 = Row(children=[Div()])
r2 = Row(children=[Div(), Div()])
d.add_root(r1)
assert len(dm) == 2
d.add_root(r2)
assert len(dm) == 5
d.remove_root(r1)
assert len(dm) == 3
d.remove_root(r2)
assert len(dm) == 0
def test_setitem_getitem(self) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
assert len(dm) == 0
m = Div()
dm[m.id] = m
assert len(dm) == 1
assert dm[m.id] is m
with pytest.raises(KeyError):
dm["junk"]
def test_contains(self) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
assert len(dm) == 0
m = Div()
dm[m.id] = m
assert m.id in dm
assert "junk" not in dm
def test_iter(self) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
assert len(dm) == 0
m1 = Div()
m2 = Div()
m3 = Div()
dm["m1"] = m1
dm["m2"] = m2
dm["m3"] = m3
result = set()
for m in dm:
result.add(m)
assert result == {m1, m2, m3}
def test_destroy(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div()
m2 = Div()
m3 = Div()
d.add_root(m1)
d.add_root(m2)
d.add_root(m3)
for m in [m1, m2, m3]:
assert m._document is d
assert dm.destroy() is None
assert not hasattr(dm, "_models")
assert not hasattr(dm, "_models_by_name")
for m in [m1, m2, m3]:
assert m._document is None
@patch("bokeh.document.models.DocumentModelManager.recompute")
def test_freeze(self, mock_recompute: MagicMock) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
assert dm._freeze_count == 0
with dm.freeze():
assert dm._freeze_count == 1
assert not mock_recompute.called
with dm.freeze():
assert dm._freeze_count == 2
assert not mock_recompute.called
assert dm._freeze_count == 1
assert not mock_recompute.called
assert dm._freeze_count == 0
assert mock_recompute.called # called here
def test_get_all_by_name(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div(name="foo")
m2 = Div(name="foo")
m3 = Div(name="bar")
d.add_root(m1)
d.add_root(m2)
d.add_root(m3)
assert set(dm.get_all_by_name("foo")) == {m1, m2}
assert set(dm.get_all_by_name("bar")) == {m3}
assert set(dm.get_all_by_name("baz")) == set()
def test_get_all_by_id(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div()
m2 = Div()
d.add_root(m1)
d.add_root(m2)
assert dm.get_by_id(m1.id) is m1
assert dm.get_by_id(m2.id) is m2
assert dm.get_by_id(ID("junk")) is None
def test_get_one_by_name(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div(name="foo")
m2 = Div(name="foo")
m3 = Div(name="bar")
d.add_root(m1)
d.add_root(m2)
d.add_root(m3)
with pytest.raises(ValueError):
dm.get_one_by_name("foo")
assert dm.get_one_by_name("bar") is m3
assert dm.get_one_by_name("baz") is None
@patch("bokeh.document.models.DocumentModelManager.recompute")
def test_invalidate(self, mock_recompute: MagicMock) -> None:
d = Document()
dm = bdm.DocumentModelManager(d)
with dm.freeze():
dm.invalidate()
assert not mock_recompute.called
assert mock_recompute.call_count == 1
dm.invalidate()
assert mock_recompute.call_count == 2
# This is an indeirect test that documents are attached/detached
def test_recompute(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
r1 = Row(children=[Div(name="dr1")])
r2 = Row(children=[Div(name="dr2"), Div(name="dr2")])
d.add_root(r1)
d.add_root(r2)
assert set(dm._models_by_name._dict) == {"dr1", "dr2"}
for m in dm:
assert m._document is d
d.remove_root(r1)
for m in dm:
assert m._document is d
assert r1._document is None
assert r1.children[0]._document is None
assert set(dm._models_by_name._dict) == {"dr2"}
def test_seen(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div()
m2 = Div()
d.add_root(m1)
d.add_root(m2)
assert not dm.seen(m1.id)
assert not dm.seen(m2.id)
d.remove_root(m2)
assert not dm.seen(m1.id)
assert dm.seen(m2.id)
d.remove_root(m1)
assert dm.seen(m1.id)
assert dm.seen(m2.id)
def test_update_name(self) -> None:
d = Document()
dm = d.models
assert len(dm) == 0
m1 = Div(name="foo")
m2 = Div()
m3 = Div(name="bar")
d.add_root(m1)
d.add_root(m2)
d.add_root(m3)
assert set(dm.get_all_by_name("foo")) == {m1}
assert set(dm.get_all_by_name("bar")) == {m3}
dm.update_name(m1, "foo", "baz")
assert set(dm.get_all_by_name("baz")) == {m1}
assert set(dm.get_all_by_name("foo")) == set()
assert set(dm.get_all_by_name("bar")) == {m3}
dm.update_name(m2, None, "baz")
dm.update_name(m3, "bar", "baz")
assert set(dm.get_all_by_name("baz")) == {m1, m2, m3}
assert set(dm.get_all_by_name("foo")) == set()
assert set(dm.get_all_by_name("bar")) == set()
def test_flush_synced(self) -> None:
class SomeModel(Model, Local):
child = Nullable(Instance(Model), default=None)
child0 = SomeModel()
child1 = SomeModel()
child2 = SomeModel()
d = Document()
d.add_root(child0)
d.add_root(child1)
assert d.models._new_models == {child0, child1}
assert d.models.synced_references == set()
d.models.flush_synced()
assert d.models._new_models == set()
assert d.models.synced_references == {child0, child1}
d.add_root(child2)
assert d.models._new_models == {child2}
assert d.models.synced_references == {child0, child1}
child2.child = child0
assert d.models._new_models == {child2}
assert d.models.synced_references == {child0, child1}
d.models.flush_synced()
assert d.models._new_models == set()
assert d.models.synced_references == {child0, child1, child2}
def test_flush_synced_with_fn(self) -> None:
class SomeModel(Model, Local):
child = Nullable(Instance(Model), default=None)
child0 = SomeModel()
child1 = SomeModel()
child2 = SomeModel(child=child0)
d = Document()
d.add_root(child0)
d.add_root(child1)
d.add_root(child2)
assert d.models._new_models == {child0, child1, child2}
assert d.models.synced_references == set()
d.models.flush_synced(lambda model: model.child is not None)
assert d.models._new_models == {child2}
assert d.models.synced_references == {child0, child1}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| TestDocumentModelManager |
python | scrapy__scrapy | tests/test_webclient.py | {
"start": 6932,
"end": 12387
} | class ____:
def _listen(self, site):
from twisted.internet import reactor
return reactor.listenTCP(0, site, interface="127.0.0.1")
@pytest.fixture
def wrapper(self, tmp_path):
(tmp_path / "file").write_bytes(b"0123456789")
r = static.File(str(tmp_path))
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
site = server.Site(r, timeout=None)
return WrappingFactory(site)
@async_yield_fixture
async def server_port(self, wrapper):
port = self._listen(wrapper)
yield port.getHost().port
await port.stopListening()
@pytest.fixture
def server_url(self, server_port):
return f"http://127.0.0.1:{server_port}/"
@inlineCallbacks
def testPayload(self, server_url):
s = "0123456789" * 10
body = yield getPage(server_url + "payload", body=s)
assert body == to_bytes(s)
@inlineCallbacks
def testHostHeader(self, server_port, server_url):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
body = yield getPage(server_url + "host")
assert body == to_bytes(f"127.0.0.1:{server_port}")
body = yield getPage(server_url + "host", headers={"Host": "www.example.com"})
assert body == to_bytes("www.example.com")
@inlineCallbacks
def test_getPage(self, server_url):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
body = yield getPage(server_url + "file")
assert body == b"0123456789"
@inlineCallbacks
def test_getPageHead(self, server_url):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(server_url + "file", method=method)
body = yield _getPage("head")
assert body == b""
body = yield _getPage("HEAD")
assert body == b""
@inlineCallbacks
def test_timeoutNotTriggering(self, server_port, server_url):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
body = yield getPage(server_url + "host", timeout=100)
assert body == to_bytes(f"127.0.0.1:{server_port}")
@inlineCallbacks
def test_timeoutTriggering(self, wrapper, server_url):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
with pytest.raises(defer.TimeoutError):
yield getPage(server_url + "wait", timeout=0.000001)
# Clean up the server which is hanging around not doing
# anything.
connected = list(wrapper.protocols.keys())
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
@inlineCallbacks
def testNotFound(self, server_url):
body = yield getPage(server_url + "notsuchfile")
assert b"404 - No Such Resource" in body
@inlineCallbacks
def testFactoryInfo(self, server_url):
from twisted.internet import reactor
url = server_url + "file"
parsed = urlparse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(parsed.hostname, parsed.port, factory)
yield factory.deferred
assert factory.status == b"200"
assert factory.version.startswith(b"HTTP/")
assert factory.message == b"OK"
assert factory.response_headers[b"content-length"] == b"10"
@inlineCallbacks
def testRedirect(self, server_url):
body = yield getPage(server_url + "redirect")
assert (
body
== b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n'
)
@inlineCallbacks
def test_encoding(self, server_url):
"""Test that non-standart body encoding matches
Content-Encoding header"""
original_body = b"\xd0\x81\xd1\x8e\xd0\xaf"
response = yield getPage(
server_url + "encoding", body=original_body, response_transform=lambda r: r
)
content_encoding = to_unicode(response.headers[b"Content-Encoding"])
assert content_encoding == EncodingResource.out_encoding
assert response.body.decode(content_encoding) == to_unicode(original_body)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
| TestWebClient |
python | pola-rs__polars | py-polars/src/polars/series/categorical.py | {
"start": 306,
"end": 6757
} | class ____:
"""Namespace for categorical related series."""
_accessor = "cat"
def __init__(self, series: Series) -> None:
self._s: PySeries = series._s
def get_categories(self) -> Series:
"""
Get the categories stored in this data type.
Examples
--------
>>> s = pl.Series(["foo", "bar", "foo", "foo", "ham"], dtype=pl.Categorical)
>>> s.cat.get_categories() # doctest: +SKIP
shape: (3,)
Series: '' [str]
[
"foo"
"bar"
"ham"
]
"""
@deprecated(
"`cat.is_local()` is deprecated; Categoricals no longer have a local scope. "
"This method will be removed in Polars 2.0."
)
def is_local(self) -> bool:
"""
Return whether or not the column is a local categorical.
Always returns false.
"""
return self._s.cat_is_local()
def to_local(self) -> Series:
"""Simply returns the column as-is, local representations are deprecated."""
return wrap_s(self._s.cat_to_local())
@deprecated(
"`cat.uses_lexical_ordering()` is deprecated; Categoricals are now always ordered lexically. "
"This method will be removed in Polars 2.0."
)
def uses_lexical_ordering(self) -> bool:
"""
Indicate whether the Series uses lexical ordering.
Always returns true.
Examples
--------
>>> s = pl.Series(["b", "a", "b"]).cast(pl.Categorical)
>>> s.cat.uses_lexical_ordering()
True
"""
return self._s.cat_uses_lexical_ordering()
def len_bytes(self) -> Series:
"""
Return the byte-length of the string representation of each value.
Returns
-------
Series
Series of data type :class:`UInt32`.
See Also
--------
len_chars
Notes
-----
When working with non-ASCII text, the length in bytes is not the same as the
length in characters. You may want to use :func:`len_chars` instead.
Note that :func:`len_bytes` is much more performant (_O(1)_) than
:func:`len_chars` (_O(n)_).
Examples
--------
>>> s = pl.Series(["Café", "345", "東京", None], dtype=pl.Categorical)
>>> s.cat.len_bytes()
shape: (4,)
Series: '' [u32]
[
5
3
6
null
]
"""
def len_chars(self) -> Series:
"""
Return the number of characters of the string representation of each value.
Returns
-------
Series
Series of data type :class:`UInt32`.
See Also
--------
len_bytes
Notes
-----
When working with ASCII text, use :func:`len_bytes` instead to achieve
equivalent output with much better performance:
:func:`len_bytes` runs in _O(1)_, while :func:`len_chars` runs in (_O(n)_).
A character is defined as a `Unicode scalar value`_. A single character is
represented by a single byte when working with ASCII text, and a maximum of
4 bytes otherwise.
.. _Unicode scalar value: https://www.unicode.org/glossary/#unicode_scalar_value
Examples
--------
>>> s = pl.Series(["Café", "345", "東京", None], dtype=pl.Categorical)
>>> s.cat.len_chars()
shape: (4,)
Series: '' [u32]
[
4
3
2
null
]
"""
def starts_with(self, prefix: str) -> Series:
"""
Check if string representations of values start with a substring.
Parameters
----------
prefix
Prefix substring.
See Also
--------
contains : Check if the string repr contains a substring that matches a pattern.
ends_with : Check if string repr ends with a substring.
Examples
--------
>>> s = pl.Series("fruits", ["apple", "mango", None], dtype=pl.Categorical)
>>> s.cat.starts_with("app")
shape: (3,)
Series: 'fruits' [bool]
[
true
false
null
]
"""
def ends_with(self, suffix: str) -> Series:
"""
Check if string representations of values end with a substring.
Parameters
----------
suffix
Suffix substring.
See Also
--------
contains : Check if the string repr contains a substring that matches a pattern.
starts_with : Check if string repr starts with a substring.
Examples
--------
>>> s = pl.Series("fruits", ["apple", "mango", None], dtype=pl.Categorical)
>>> s.cat.ends_with("go")
shape: (3,)
Series: 'fruits' [bool]
[
false
true
null
]
"""
def slice(self, offset: int, length: int | None = None) -> Series:
"""
Extract a substring from the string representation of each string value.
Parameters
----------
offset
Start index. Negative indexing is supported.
length
Length of the slice. If set to `None` (default), the slice is taken to the
end of the string.
Returns
-------
Series
Series of data type :class:`String`.
Notes
-----
Both the `offset` and `length` inputs are defined in terms of the number
of characters in the (UTF8) string. A character is defined as a
`Unicode scalar value`_. A single character is represented by a single byte
when working with ASCII text, and a maximum of 4 bytes otherwise.
.. _Unicode scalar value: https://www.unicode.org/glossary/#unicode_scalar_value
Examples
--------
>>> s = pl.Series(["pear", None, "papaya", "dragonfruit"], dtype=pl.Categorical)
>>> s.cat.slice(-3)
shape: (4,)
Series: '' [str]
[
"ear"
null
"aya"
"uit"
]
Using the optional `length` parameter
>>> s.cat.slice(4, length=3)
shape: (4,)
Series: '' [str]
[
""
null
"ya"
"onf"
]
"""
| CatNameSpace |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 22437,
"end": 23406
} | class ____(Instruction):
def __init__(self, parent, vector, value, index, name=''):
if not isinstance(vector.type, types.VectorType):
raise TypeError("vector needs to be of VectorType.")
if not value.type == vector.type.element:
raise TypeError(
"value needs to be of type {} not {}.".format(
vector.type.element, value.type))
if not isinstance(index.type, types.IntType):
raise TypeError("index needs to be of IntType.")
typ = vector.type
super(InsertElement, self).__init__(parent, typ, "insertelement",
[vector, value, index], name=name)
def descr(self, buf):
operands = ", ".join("{0} {1}".format(
op.type, op.get_reference()) for op in self.operands)
buf.append("{opname} {operands}\n".format(
opname=self.opname, operands=operands))
| InsertElement |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 92740,
"end": 92845
} | class ____(BaseModel):
class NestedModel(BaseModel):
a: float
nested: NestedModel
| ModelOne |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-docling/llama_index/readers/docling/base.py | {
"start": 448,
"end": 3271
} | class ____(BasePydanticReader):
"""
Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str: ...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""
Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
| DoclingReader |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum13.py | {
"start": 161,
"end": 340
} | class ____(IntEnum):
MEMBER_1 = 1
MEMBER_2 = 2
i1: Literal[1] = IntEnum1.MEMBER_1.value
# This should generate an error.
i2: Literal[1] = IntEnum1.MEMBER_2.value
| IntEnum1 |
python | run-llama__llama_index | llama-index-core/llama_index/core/objects/base.py | {
"start": 733,
"end": 2737
} | class ____(Generic[OT]):
"""Object retriever."""
def __init__(
self,
retriever: BaseRetriever,
object_node_mapping: BaseObjectNodeMapping[OT],
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
):
self._retriever = retriever
self._object_node_mapping = object_node_mapping
self._node_postprocessors = node_postprocessors or []
@property
def retriever(self) -> BaseRetriever:
"""Retriever."""
return self._retriever
@property
def object_node_mapping(self) -> BaseObjectNodeMapping[OT]:
"""Object node mapping."""
return self._object_node_mapping
@property
def node_postprocessors(self) -> List[BaseNodePostprocessor]:
"""Node postprocessors."""
return self._node_postprocessors
def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(query_str=str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
nodes = self._retriever.retrieve(query_bundle)
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(query_str=str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
nodes = await self._retriever.aretrieve(query_bundle)
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
| ObjectRetriever |
python | realpython__materials | python-mixins/mixins.py | {
"start": 38,
"end": 255
} | class ____:
def serialize(self) -> dict:
if hasattr(self, "__slots__"):
return {name: getattr(self, name) for name in self.__slots__}
else:
return vars(self)
| SerializableMixin |
python | openai__gym | gym/envs/mujoco/inverted_pendulum.py | {
"start": 111,
"end": 1596
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 25,
}
def __init__(self, **kwargs):
utils.EzPickle.__init__(self, **kwargs)
observation_space = Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64)
MuJocoPyEnv.__init__(
self,
"inverted_pendulum.xml",
2,
observation_space=observation_space,
**kwargs
)
def step(self, a):
reward = 1.0
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
terminated = bool(not np.isfinite(ob).all() or (np.abs(ob[1]) > 0.2))
if self.render_mode == "human":
self.render()
return ob, reward, terminated, False, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.01, high=0.01
)
qvel = self.init_qvel + self.np_random.uniform(
size=self.model.nv, low=-0.01, high=0.01
)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = self.model.stat.extent
| InvertedPendulumEnv |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/training_epoch_loop.py | {
"start": 2122,
"end": 2272
} | class ____:
NONE = "none"
RESTARTED_ON_TRAIN_BATCH_END = "restarted_on_train_batch_end"
RESTARTED_ON_LAST = "restarted_on_last"
| RestartStage |
python | dask__dask | dask/array/ufunc.py | {
"start": 2120,
"end": 10043
} | class ____:
_forward_attrs = {
"nin",
"nargs",
"nout",
"ntypes",
"identity",
"signature",
"types",
}
def __init__(self, ufunc):
if not isinstance(ufunc, (np.ufunc, da_frompyfunc)):
raise TypeError(
"must be an instance of `ufunc` or "
f"`da_frompyfunc`, got `{type(ufunc).__name__}"
)
self._ufunc = ufunc
self.__name__ = ufunc.__name__
if isinstance(ufunc, np.ufunc):
derived_from(np)(self)
def __dask_tokenize__(self):
return self.__name__, normalize_token(self._ufunc)
def __getattr__(self, key):
if key in self._forward_attrs:
return getattr(self._ufunc, key)
raise AttributeError(f"{type(self).__name__!r} object has no attribute {key!r}")
def __dir__(self):
return list(self._forward_attrs.union(dir(type(self)), self.__dict__))
def __repr__(self):
return repr(self._ufunc)
def __call__(self, *args, **kwargs):
dsks = [arg for arg in args if hasattr(arg, "_elemwise")]
if len(dsks) > 0:
for dsk in dsks:
result = dsk._elemwise(self._ufunc, *args, **kwargs)
if type(result) != type(NotImplemented):
return result
raise TypeError(
f"Parameters of such types are not supported by {self.__name__}"
)
else:
return self._ufunc(*args, **kwargs)
@derived_from(np.ufunc)
def outer(self, A, B, **kwargs):
if self.nin != 2:
raise ValueError("outer product only supported for binary functions")
if "out" in kwargs:
raise ValueError("`out` kwarg not supported")
A_is_dask = is_dask_collection(A)
B_is_dask = is_dask_collection(B)
if not A_is_dask and not B_is_dask:
return self._ufunc.outer(A, B, **kwargs)
elif (
A_is_dask
and not isinstance(A, Array)
or B_is_dask
and not isinstance(B, Array)
):
raise NotImplementedError(
"Dask objects besides `dask.array.Array` "
"are not supported at this time."
)
A = asarray(A)
B = asarray(B)
ndim = A.ndim + B.ndim
out_inds = tuple(range(ndim))
A_inds = out_inds[: A.ndim]
B_inds = out_inds[A.ndim :]
dtype = apply_infer_dtype(
self._ufunc.outer, [A, B], kwargs, "ufunc.outer", suggest_dtype=False
)
if "dtype" in kwargs:
func = partial(self._ufunc.outer, dtype=kwargs.pop("dtype"))
else:
func = self._ufunc.outer
return blockwise(
func,
out_inds,
A,
A_inds,
B,
B_inds,
dtype=dtype,
token=f"{self.__name__}.outer",
**kwargs,
)
# ufuncs, copied from this page:
# https://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
add = ufunc(np.add)
subtract = ufunc(np.subtract)
multiply = ufunc(np.multiply)
divide = ufunc(np.divide)
logaddexp = ufunc(np.logaddexp)
logaddexp2 = ufunc(np.logaddexp2)
true_divide = ufunc(np.true_divide)
floor_divide = ufunc(np.floor_divide)
negative = ufunc(np.negative)
positive = ufunc(np.positive)
power = ufunc(np.power)
float_power = ufunc(np.float_power)
remainder = ufunc(np.remainder)
mod = ufunc(np.mod)
# fmod: see below
conj = conjugate = ufunc(np.conjugate)
exp = ufunc(np.exp)
exp2 = ufunc(np.exp2)
log = ufunc(np.log)
log2 = ufunc(np.log2)
log10 = ufunc(np.log10)
log1p = ufunc(np.log1p)
expm1 = ufunc(np.expm1)
sqrt = ufunc(np.sqrt)
square = ufunc(np.square)
cbrt = ufunc(np.cbrt)
reciprocal = ufunc(np.reciprocal)
# trigonometric functions
sin = ufunc(np.sin)
cos = ufunc(np.cos)
tan = ufunc(np.tan)
arcsin = ufunc(np.arcsin)
arccos = ufunc(np.arccos)
arctan = ufunc(np.arctan)
arctan2 = ufunc(np.arctan2)
hypot = ufunc(np.hypot)
sinh = ufunc(np.sinh)
cosh = ufunc(np.cosh)
tanh = ufunc(np.tanh)
arcsinh = ufunc(np.arcsinh)
arccosh = ufunc(np.arccosh)
arctanh = ufunc(np.arctanh)
deg2rad = ufunc(np.deg2rad)
rad2deg = ufunc(np.rad2deg)
# comparison functions
greater = ufunc(np.greater)
greater_equal = ufunc(np.greater_equal)
less = ufunc(np.less)
less_equal = ufunc(np.less_equal)
not_equal = ufunc(np.not_equal)
equal = ufunc(np.equal)
isneginf = partial(equal, -np.inf)
isposinf = partial(equal, np.inf)
logical_and = ufunc(np.logical_and)
logical_or = ufunc(np.logical_or)
logical_xor = ufunc(np.logical_xor)
logical_not = ufunc(np.logical_not)
maximum = ufunc(np.maximum)
minimum = ufunc(np.minimum)
fmax = ufunc(np.fmax)
fmin = ufunc(np.fmin)
# bitwise functions
bitwise_and = ufunc(np.bitwise_and)
bitwise_or = ufunc(np.bitwise_or)
bitwise_xor = ufunc(np.bitwise_xor)
bitwise_not = ufunc(np.bitwise_not)
invert = bitwise_not
left_shift = ufunc(np.left_shift)
right_shift = ufunc(np.right_shift)
# floating functions
isfinite = ufunc(np.isfinite)
isinf = ufunc(np.isinf)
isnan = ufunc(np.isnan)
signbit = ufunc(np.signbit)
copysign = ufunc(np.copysign)
nextafter = ufunc(np.nextafter)
spacing = ufunc(np.spacing)
# modf: see below
ldexp = ufunc(np.ldexp)
# frexp: see below
fmod = ufunc(np.fmod)
floor = ufunc(np.floor)
ceil = ufunc(np.ceil)
trunc = ufunc(np.trunc)
# more math routines, from this page:
# https://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = ufunc(np.degrees)
radians = ufunc(np.radians)
rint = ufunc(np.rint)
fabs = ufunc(np.fabs)
sign = ufunc(np.sign)
absolute = ufunc(np.absolute)
abs = absolute
# non-ufunc elementwise functions
clip = wrap_elemwise(np.clip)
isreal = wrap_elemwise(np.isreal)
iscomplex = wrap_elemwise(np.iscomplex)
real = wrap_elemwise(np.real)
imag = wrap_elemwise(np.imag)
fix = wrap_elemwise(np.fix)
i0 = wrap_elemwise(np.i0)
sinc = wrap_elemwise(np.sinc)
nan_to_num = wrap_elemwise(np.nan_to_num)
@derived_from(np)
def angle(x, deg=0):
deg = bool(deg)
if hasattr(x, "_elemwise"):
return x._elemwise(np.angle, x, deg)
return np.angle(x, deg=deg)
@derived_from(np)
def frexp(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.frexp, x, dtype=object)
left = f"mantissa-{tmp.name}"
right = f"exponent-{tmp.name}"
ldsk = {
(left,) + key[1:]: (getitem, key, 0)
for key in core.flatten(tmp.__dask_keys__())
}
rdsk = {
(right,) + key[1:]: (getitem, key, 1)
for key in core.flatten(tmp.__dask_keys__())
}
a = np.empty_like(getattr(x, "_meta", x), shape=(1,) * x.ndim, dtype=x.dtype)
l, r = np.frexp(a)
graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])
L = Array(graph, left, chunks=tmp.chunks, meta=l)
graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])
R = Array(graph, right, chunks=tmp.chunks, meta=r)
return L, R
@derived_from(np)
def modf(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.modf, x, dtype=object)
left = f"modf1-{tmp.name}"
right = f"modf2-{tmp.name}"
ldsk = {
(left,) + key[1:]: (getitem, key, 0)
for key in core.flatten(tmp.__dask_keys__())
}
rdsk = {
(right,) + key[1:]: (getitem, key, 1)
for key in core.flatten(tmp.__dask_keys__())
}
a = np.ones_like(getattr(x, "_meta", x), shape=(1,) * x.ndim, dtype=x.dtype)
l, r = np.modf(a)
graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])
L = Array(graph, left, chunks=tmp.chunks, meta=l)
graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])
R = Array(graph, right, chunks=tmp.chunks, meta=r)
return L, R
@derived_from(np)
def divmod(x, y):
res1 = x // y
res2 = x % y
return res1, res2
| ufunc |
python | protocolbuffers__protobuf | python/google/protobuf/internal/numpy/numpy_test.py | {
"start": 6577,
"end": 7718
} | class ____(unittest.TestCase):
def testNumpyIntScalarIndexing_Passes(self):
data = unittest_pb2.TestAllTypes(repeated_int64=[0, 1, 2])
self.assertEqual(0, data.repeated_int64[np.int64(0)])
def testNumpyNegative1IntScalarIndexing_Passes(self):
data = unittest_pb2.TestAllTypes(repeated_int64=[0, 1, 2])
self.assertEqual(2, data.repeated_int64[np.int64(-1)])
def testNumpyFloatScalarIndexing_Fails(self):
data = unittest_pb2.TestAllTypes(repeated_int64=[0, 1, 2])
with self.assertRaises(TypeError):
_ = data.repeated_int64[np.float64(0.0)]
def testNumpyIntArrayIndexing_Fails(self):
data = unittest_pb2.TestAllTypes(repeated_int64=[0, 1, 2])
with self.assertRaises(TypeError):
_ = data.repeated_int64[np.array([0])]
with self.assertRaises(TypeError):
_ = data.repeated_int64[np.ndarray((1,), buffer=np.array([0]), dtype=int)]
with self.assertRaises(TypeError):
_ = data.repeated_int64[np.ndarray((1, 1),
buffer=np.array([0]),
dtype=int)]
@testing_refleaks.TestCase
| NumpyProtoIndexingTest |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_voterank.py | {
"start": 58,
"end": 1687
} | class ____:
# Example Graph present in reference paper
def test_voterank_centrality_1(self):
G = nx.Graph()
G.add_edges_from(
[
(7, 8),
(7, 5),
(7, 9),
(5, 0),
(0, 1),
(0, 2),
(0, 3),
(0, 4),
(1, 6),
(2, 6),
(3, 6),
(4, 6),
]
)
assert [0, 7, 6] == nx.voterank(G)
def test_voterank_emptygraph(self):
G = nx.Graph()
assert [] == nx.voterank(G)
# Graph unit test
def test_voterank_centrality_2(self):
G = nx.florentine_families_graph()
d = nx.voterank(G, 4)
exact = ["Medici", "Strozzi", "Guadagni", "Castellani"]
assert exact == d
# DiGraph unit test
def test_voterank_centrality_3(self):
G = nx.gnc_graph(10, seed=7)
d = nx.voterank(G, 4)
exact = [3, 6, 8]
assert exact == d
# MultiGraph unit test
def test_voterank_centrality_4(self):
G = nx.MultiGraph()
G.add_edges_from(
[(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
)
exact = [2, 1, 5, 4]
assert exact == nx.voterank(G)
# MultiDiGraph unit test
def test_voterank_centrality_5(self):
G = nx.MultiDiGraph()
G.add_edges_from(
[(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
)
exact = [2, 0, 5, 4]
assert exact == nx.voterank(G)
| TestVoteRankCentrality |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_template_kernel.py | {
"start": 23741,
"end": 26154
} | class ____(ir.ChoiceCaller):
"""
CppTemplateCaller
This class represents a caller for CPP template kernels. It is a subclass of ir.ChoiceCaller.
Attributes:
name (str): The name of the caller.
category (str): The category of the caller.
bmreq (CppBenchmarkRequest): The benchmark request for the caller.
template_buffer (ir.CppTemplateBuffer): The template buffer for the caller.
"""
def __init__(
self,
name: str,
category: str,
input_nodes: list[ir.Buffer],
layout: ir.Layout,
make_kernel_render: Callable[
[
ir.CppTemplateBuffer,
bool,
Optional[list[ir.IRNode]],
],
str,
],
bmreq: CppBenchmarkRequest,
template: "CppTemplate", # type: ignore[name-defined] # noqa: F821
info_kwargs: Optional[
dict[str, Union[ir.PrimitiveInfoType, list[ir.PrimitiveInfoType]]]
] = None,
):
super().__init__(name, input_nodes, layout, description="")
self.category = category
self.make_kernel_render = make_kernel_render
self.bmreq = bmreq
self.template = template
self.info_kwargs = info_kwargs
def precompile(self) -> None:
assert self.bmreq is not None
self.bmreq.precompile()
def benchmark(self, *args, out) -> float:
assert self.bmreq is not None
if config.profile_bandwidth_with_do_bench_using_profiling:
algo = self.bmreq.make_run_fn(*args, out=out)
return do_bench_using_profiling(algo)
return self.bmreq.benchmark(*args, out=out)
def hash_key(self) -> str:
return "-".join(
[
self.category,
self.bmreq.hash_key,
]
)
def info_dict(
self,
) -> dict[str, Union[ir.PrimitiveInfoType, list[ir.PrimitiveInfoType]]]:
return {"backend": "CPP", "op_type": "unknown"}
def output_node(self) -> Union[ir.TensorBox, ir.ShapeAsConstantBuffer]:
return ir.TensorBox.create(
ir.CppTemplateBuffer(
layout=self.layout,
inputs=self.input_nodes,
make_kernel_render=self.make_kernel_render,
template=self.template,
choice=self,
)
)
| CppTemplateCaller |
python | doocs__leetcode | solution/2100-2199/2178.Maximum Split of Positive Even Integers/Solution.py | {
"start": 0,
"end": 301
} | class ____:
def maximumEvenSplit(self, finalSum: int) -> List[int]:
if finalSum & 1:
return []
ans = []
i = 2
while i <= finalSum:
finalSum -= i
ans.append(i)
i += 2
ans[-1] += finalSum
return ans
| Solution |
python | ray-project__ray | python/ray/serve/_private/benchmarks/locust_utils.py | {
"start": 378,
"end": 499
} | class ____:
p50_latency: float
p90_latency: float
p99_latency: float
rps: float
@dataclass
| PerformanceStats |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 685286,
"end": 685660
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Label", graphql_name="node")
"""The item at the end of the edge."""
| LabelEdge |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_core/test_worker.py | {
"start": 47089,
"end": 47750
} | class ____:
"""Tests worker network policy."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"networkPolicies": {"enabled": True},
"executor": "CeleryExecutor",
"workers": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/workers/worker-networkpolicy.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
| TestWorkerNetworkPolicy |
python | protocolbuffers__protobuf | python/google/protobuf/internal/import_test.py | {
"start": 580,
"end": 2723
} | class ____(unittest.TestCase):
def testPackageInitializationImport(self):
"""Test that we can import nested import public messages."""
msg = outer_pb2.Outer()
self.assertEqual(58, msg.import_public_nested.value)
def testImportOptionKnown(self):
file_descriptor = unittest_import_option_pb2.DESCRIPTOR
message_descriptor = unittest_import_option_pb2.TestMessage.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name['field1']
self.assertEqual(
file_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.file_opt1
],
1,
)
self.assertEqual(
message_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.message_opt1
],
2,
)
self.assertEqual(
field_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.field_opt1
],
3,
)
def testImportOptionUnknown(self):
file_descriptor = unittest_import_option_pb2.DESCRIPTOR
message_descriptor = unittest_import_option_pb2.TestMessage.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name['field1']
# Options from import option that are not linked in should be in unknown
# fields.
unknown_fields_file = unknown_fields.UnknownFieldSet(
file_descriptor.GetOptions()
)
unknown_fields_message = unknown_fields.UnknownFieldSet(
message_descriptor.GetOptions()
)
unknown_fields_field = unknown_fields.UnknownFieldSet(
field_descriptor.GetOptions()
)
self.assertEqual(len(unknown_fields_file), 1)
self.assertEqual(unknown_fields_file[0].field_number, 7736975)
self.assertEqual(unknown_fields_file[0].data, 1)
self.assertEqual(len(unknown_fields_message), 1)
self.assertEqual(unknown_fields_message[0].field_number, 7739037)
self.assertEqual(unknown_fields_message[0].data, 2)
self.assertEqual(len(unknown_fields_field), 1)
self.assertEqual(unknown_fields_field[0].field_number, 7740937)
self.assertEqual(unknown_fields_field[0].data, 3)
if __name__ == '__main__':
unittest.main()
| ImportTest |
python | pytorch__pytorch | test/distributed/tensor/test_attention.py | {
"start": 2405,
"end": 2989
} | class ____(torch.nn.Module):
def __init__(self, compiled: bool, backend: SDPBackend) -> None:
super().__init__()
if compiled:
self.sdpa = torch.compile(
F.scaled_dot_product_attention,
fullgraph=True,
backend="aot_eager",
)
else:
self.sdpa = F.scaled_dot_product_attention
self.backend = backend
def forward(self, *args: object, **kwargs: object) -> torch.Tensor:
with sdpa_kernel(self.backend):
return self.sdpa(*args, **kwargs)
| SDPAWrapper |
python | PyCQA__pylint | pylint/checkers/symilar.py | {
"start": 11691,
"end": 11910
} | class ____(NamedTuple):
cmn_lines_nb: int
fst_lset: LineSet
fst_file_start: LineNumber
fst_file_end: LineNumber
snd_lset: LineSet
snd_file_start: LineNumber
snd_file_end: LineNumber
| Commonality |
python | gabrielfalcao__HTTPretty | tests/functional/testserver.py | {
"start": 2132,
"end": 3278
} | class ____(object):
is_running = False
def __init__(self, port):
self.port = int(port)
self.process = None
@classmethod
def get_handlers(cls):
return Application([
(r"/go-for-bubbles/?", BubblesHandler),
(r"/come-again/?", ComeHandler),
])
def start(self):
app = self.get_handlers()
data = {}
args = (app, self.port, data)
HTTPretty.disable()
self.process = Process(target=subprocess_server_tornado, args=args)
self.process.start()
time.sleep(1)
def stop(self):
try:
os.kill(self.process.pid, 9)
except OSError:
self.process.terminate()
finally:
self.is_running = False
def subprocess_server_tcp(port):
from httpretty import HTTPretty
HTTPretty.disable()
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', port))
s.listen(True)
conn, addr = s.accept()
while True:
data = conn.recv(1024)
conn.send(b"RECEIVED: " + bytes(data))
conn.close()
| TornadoServer |
python | pytorch__pytorch | test/distributed/test_local_tensor.py | {
"start": 630,
"end": 2308
} | class ____(TestCase):
def assertEqual(self, lhs, rhs, **kwargs):
mode = local_tensor_mode()
with nullcontext() if mode is None else mode.disable():
if isinstance(lhs, LocalTensor) and isinstance(rhs, LocalTensor):
assert isinstance(lhs, LocalTensor) and isinstance(rhs, LocalTensor)
super().assertEqual(lhs._ranks, rhs._ranks)
for r in lhs._ranks:
super().assertEqual(
lhs._local_tensors[r],
rhs._local_tensors[r],
lambda m: f"rank {r}: {m}",
)
elif isinstance(lhs, LocalTensor) or isinstance(rhs, LocalTensor):
lhs, rhs = (lhs, rhs) if isinstance(lhs, LocalTensor) else (rhs, lhs)
for r in lhs._ranks:
super().assertEqual(
lhs._local_tensors[r], rhs, lambda m: f"rank {r}: {m}"
)
else:
return super().assertEqual(lhs, rhs, **kwargs)
@property
def world_size(self):
raise NotImplementedError("override world-size in your subclass")
def build_device_mesh(self) -> DeviceMesh:
return init_device_mesh("cpu", (self.world_size,))
def setUp(self):
super().setUp()
torch.distributed.init_process_group(
# TODO: test other ranks too
"fake",
rank=0,
world_size=self.world_size,
)
def tearDown(self):
super().tearDown()
try:
dist.destroy_process_group()
except AssertionError:
pass
| LocalTensorTestBase |
python | doocs__leetcode | solution/2900-2999/2992.Number of Self-Divisible Permutations/Solution2.py | {
"start": 0,
"end": 369
} | class ____:
def selfDivisiblePermutationCount(self, n: int) -> int:
f = [0] * (1 << n)
f[0] = 1
for mask in range(1 << n):
i = mask.bit_count()
for j in range(1, n + 1):
if (mask >> (j - 1) & 1) == 1 and gcd(i, j) == 1:
f[mask] += f[mask ^ (1 << (j - 1))]
return f[-1]
| Solution |
python | hynek__structlog | src/structlog/stdlib.py | {
"start": 23620,
"end": 27463
} | class ____:
"""
Apply stdlib-like string formatting to the ``event`` key.
If the ``positional_args`` key in the event dict is set, it must
contain a tuple that is used for formatting (using the ``%s`` string
formatting operator) of the value from the ``event`` key. This works
in the same way as the stdlib handles arguments to the various log
methods: if the tuple contains only a single `dict` argument it is
used for keyword placeholders in the ``event`` string, otherwise it
will be used for positional placeholders.
``positional_args`` is populated by `structlog.stdlib.BoundLogger` or
can be set manually.
The *remove_positional_args* flag can be set to `False` to keep the
``positional_args`` key in the event dict; by default it will be
removed from the event dict after formatting a message.
"""
def __init__(self, remove_positional_args: bool = True) -> None:
self.remove_positional_args = remove_positional_args
def __call__(
self, _: WrappedLogger, __: str, event_dict: EventDict
) -> EventDict:
args = event_dict.get("positional_args")
# Mimic the formatting behaviour of the stdlib's logging module, which
# accepts both positional arguments and a single dict argument. The
# "single dict" check is the same one as the stdlib's logging module
# performs in LogRecord.__init__().
if args:
if len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
event_dict["event"] %= args
if self.remove_positional_args and args is not None:
del event_dict["positional_args"]
return event_dict
def filter_by_level(
logger: logging.Logger, method_name: str, event_dict: EventDict
) -> EventDict:
"""
Check whether logging is configured to accept messages from this log level.
Should be the first processor if stdlib's filtering by level is used so
possibly expensive processors like exception formatters are avoided in the
first place.
>>> import logging
>>> from structlog.stdlib import filter_by_level
>>> logging.basicConfig(level=logging.WARN)
>>> logger = logging.getLogger()
>>> filter_by_level(logger, 'warn', {})
{}
>>> filter_by_level(logger, 'debug', {})
Traceback (most recent call last):
...
DropEvent
"""
if (
# We can't use logger.isEnabledFor() because it's always disabled when
# a log entry is in flight on Python 3.14 and later,
not logger.disabled
and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
):
return event_dict
raise DropEvent
def add_log_level_number(
logger: logging.Logger, method_name: str, event_dict: EventDict
) -> EventDict:
"""
Add the log level number to the event dict.
Log level numbers map to the log level names. The Python stdlib uses them
for filtering logic. This adds the same numbers so users can leverage
similar filtering. Compare::
level in ("warning", "error", "critical")
level_number >= 30
The mapping of names to numbers is in
``structlog.stdlib._log_levels._NAME_TO_LEVEL``.
.. versionadded:: 18.2.0
"""
event_dict["level_number"] = NAME_TO_LEVEL[method_name]
return event_dict
def add_logger_name(
logger: logging.Logger, method_name: str, event_dict: EventDict
) -> EventDict:
"""
Add the logger name to the event dict.
"""
record = event_dict.get("_record")
if record is None:
event_dict["logger"] = logger.name
else:
event_dict["logger"] = record.name
return event_dict
_LOG_RECORD_KEYS = logging.LogRecord(
"name", 0, "pathname", 0, "msg", (), None
).__dict__.keys()
| PositionalArgumentsFormatter |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-rocksetdb/llama_index/vector_stores/rocksetdb/base.py | {
"start": 1627,
"end": 12768
} | class ____(BasePydanticVectorStore):
"""
Rockset Vector Store.
Examples:
`pip install llama-index-vector-stores-rocksetdb`
```python
from llama_index.vector_stores.rocksetdb import RocksetVectorStore
# Set up RocksetVectorStore with necessary configurations
vector_store = RocksetVectorStore(
collection="my_collection",
api_key="your_rockset_api_key",
api_server="https://api.use1a1.rockset.com",
embedding_col="my_embedding",
metadata_col="node",
distance_func=RocksetVectorStore.DistanceFunc.DOT_PRODUCT
)
```
"""
stores_text: bool = True
is_embedding_query: bool = True
flat_metadata: bool = False
class DistanceFunc(Enum):
COSINE_SIM = "COSINE_SIM"
EUCLIDEAN_DIST = "EUCLIDEAN_DIST"
DOT_PRODUCT = "DOT_PRODUCT"
rockset: ModuleType
rs: Any
workspace: str
collection: str
text_key: str
embedding_col: str
metadata_col: str
distance_func: DistanceFunc
distance_order: str
def __init__(
self,
collection: str,
client: Any | None = None,
text_key: str = DEFAULT_TEXT_KEY,
embedding_col: str = DEFAULT_EMBEDDING_KEY,
metadata_col: str = "metadata",
workspace: str = "commons",
api_server: str | None = None,
api_key: str | None = None,
distance_func: DistanceFunc = DistanceFunc.COSINE_SIM,
) -> None:
"""
Rockset Vector Store Data container.
Args:
collection (str): The name of the collection of vectors
client (Optional[Any]): Rockset client object
text_key (str): The key to the text of nodes
(default: llama_index.core.vector_stores.utils.DEFAULT_TEXT_KEY)
embedding_col (str): The DB column containing embeddings
(default: llama_index.core.vector_stores.utils.DEFAULT_EMBEDDING_KEY))
metadata_col (str): The DB column containing node metadata
(default: "metadata")
workspace (str): The workspace containing the collection of vectors
(default: "commons")
api_server (Optional[str]): The Rockset API server to use
api_key (Optional[str]): The Rockset API key to use
distance_func (RocksetVectorStore.DistanceFunc): The metric to measure
vector relationship
(default: RocksetVectorStore.DistanceFunc.COSINE_SIM)
"""
super().__init__(
rockset=_get_rockset(),
rs=_get_client(api_key, api_server, client),
collection=collection,
text_key=text_key,
embedding_col=embedding_col,
metadata_col=metadata_col,
workspace=workspace,
distance_func=distance_func,
distance_order=(
"ASC" if distance_func is distance_func.EUCLIDEAN_DIST else "DESC"
),
)
try:
self.rs.set_application("llama_index")
except AttributeError:
# set_application method does not exist.
# rockset version < 2.1.0
pass
@classmethod
def class_name(cls) -> str:
return "RocksetVectorStore"
@property
def client(self) -> Any:
return self.rs
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Stores vectors in the collection.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
Returns:
Stored node IDs (List[str])
"""
return [
row["_id"]
for row in self.rs.Documents.add_documents(
collection=self.collection,
workspace=self.workspace,
data=[
{
self.embedding_col: node.get_embedding(),
"_id": node.node_id,
self.metadata_col: node_to_metadata_dict(
node, text_field=self.text_key
),
}
for node in nodes
],
).data
]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Deletes nodes stored in the collection by their ref_doc_id.
Args:
ref_doc_id (str): The ref_doc_id of the document
whose nodes are to be deleted
"""
self.rs.Documents.delete_documents(
collection=self.collection,
workspace=self.workspace,
data=[
self.rockset.models.DeleteDocumentsRequestData(id=row["_id"])
for row in self.rs.sql(
f"""
SELECT
_id
FROM
"{self.workspace}"."{self.collection}" x
WHERE
x.{self.metadata_col}.ref_doc_id=:ref_doc_id
""",
params={"ref_doc_id": ref_doc_id},
).results
],
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Gets nodes relevant to a query.
Args:
query (llama_index.core.vector_stores.types.VectorStoreQuery): The query
similarity_col (Optional[str]): The column to select the cosine
similarity as (default: "_similarity")
Returns:
query results (llama_index.core.vector_stores.types.VectorStoreQueryResult)
"""
similarity_col = kwargs.get("similarity_col", "_similarity")
res = self.rs.sql(
f"""
SELECT
_id,
{self.metadata_col}
{
f''', {self.distance_func.value}(
{query.query_embedding},
{self.embedding_col}
)
AS {similarity_col}'''
if query.query_embedding
else ""
}
FROM
"{self.workspace}"."{self.collection}" x
{
"WHERE"
if query.node_ids
or (query.filters and len(query.filters.legacy_filters()) > 0)
else ""
} {
f'''({
" OR ".join([f"_id='{node_id}'" for node_id in query.node_ids])
})'''
if query.node_ids
else ""
} {
f''' {"AND" if query.node_ids else ""} ({
" AND ".join(
[
f"x.{self.metadata_col}.{filter.key}=:{filter.key}"
for filter in query.filters.legacy_filters()
]
)
})'''
if query.filters
else ""
}
ORDER BY
{similarity_col} {self.distance_order}
LIMIT
{query.similarity_top_k}
""",
params=(
{filter.key: filter.value for filter in query.filters.legacy_filters()}
if query.filters
else {}
),
)
similarities: List[float] | None = [] if query.query_embedding else None
nodes, ids = [], []
for row in res.results:
if similarities is not None:
similarities.append(row[similarity_col])
nodes.append(metadata_dict_to_node(row[self.metadata_col]))
ids.append(row["_id"])
return VectorStoreQueryResult(similarities=similarities, nodes=nodes, ids=ids)
@classmethod
def with_new_collection(
cls: Type[T], dimensions: int | None = None, **rockset_vector_store_args: Any
) -> RocksetVectorStore:
"""
Creates a new collection and returns its RocksetVectorStore.
Args:
dimensions (Optional[int]): The length of the vectors to enforce
in the collection's ingest transformation. By default, the
collection will do no vector enforcement.
collection (str): The name of the collection to be created
client (Optional[Any]): Rockset client object
workspace (str): The workspace containing the collection to be
created (default: "commons")
text_key (str): The key to the text of nodes
(default: llama_index.core.vector_stores.utils.DEFAULT_TEXT_KEY)
embedding_col (str): The DB column containing embeddings
(default: llama_index.core.vector_stores.utils.DEFAULT_EMBEDDING_KEY))
metadata_col (str): The DB column containing node metadata
(default: "metadata")
api_server (Optional[str]): The Rockset API server to use
api_key (Optional[str]): The Rockset API key to use
distance_func (RocksetVectorStore.DistanceFunc): The metric to measure
vector relationship
(default: RocksetVectorStore.DistanceFunc.COSINE_SIM)
"""
client = rockset_vector_store_args["client"] = _get_client(
api_key=rockset_vector_store_args.get("api_key"),
api_server=rockset_vector_store_args.get("api_server"),
client=rockset_vector_store_args.get("client"),
)
collection_args = {
"workspace": rockset_vector_store_args.get("workspace", "commons"),
"name": rockset_vector_store_args.get("collection"),
}
embeddings_col = rockset_vector_store_args.get(
"embeddings_col", DEFAULT_EMBEDDING_KEY
)
if dimensions:
collection_args["field_mapping_query"] = (
_get_rockset().model.field_mapping_query.FieldMappingQuery(
sql=f"""
SELECT
*, VECTOR_ENFORCE(
{embeddings_col},
{dimensions},
'float'
) AS {embeddings_col}
FROM
_input
"""
)
)
client.Collections.create_s3_collection(**collection_args) # create collection
while (
client.Collections.get(
collection=rockset_vector_store_args.get("collection")
).data.status
!= "READY"
): # wait until collection is ready
sleep(0.1)
# TODO: add async, non-blocking method collection creation
return cls(
**dict(
filter( # filter out None args
lambda arg: arg[1] is not None, rockset_vector_store_args.items()
)
)
)
| RocksetVectorStore |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/checkpoint_writer.py | {
"start": 570,
"end": 1325
} | class ____(abc.ABC):
"""
Abstract base class for checkpoint commit hooks.
A commit hook provides callbacks that are executed before and after a checkpoint
is committed to storage. This allows for custom actions to be performed at specific
points in the checkpoint writing process, such as metadata updates, cleanup operations,
or notifications.
"""
@abc.abstractmethod
def pre_commit(self, path: str, **kwargs: dict[str, Any]) -> None:
"""
Performs actions before committing the checkpoint.
"""
@abc.abstractmethod
def post_commit(self, path: str, **kwargs: dict[str, Any]) -> None:
"""
Performs actions after committing the checkpoint.
"""
@dataclass
| WriterHook |
python | mlflow__mlflow | tests/genai/optimize/test_optimize.py | {
"start": 472,
"end": 14564
} | class ____(BasePromptOptimizer):
def __init__(self, reflection_model="openai:/gpt-4o-mini"):
self.model_name = reflection_model
def optimize(
self,
eval_fn: Any,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
optimized_prompts = {}
for prompt_name, template in target_prompts.items():
# Simple optimization: add "Be precise and accurate. " prefix
optimized_prompts[prompt_name] = f"Be precise and accurate. {template}"
# Verify the optimization by calling eval_fn
eval_fn(optimized_prompts, train_data)
return PromptOptimizerOutput(
optimized_prompts=optimized_prompts,
initial_eval_score=0.5,
final_eval_score=0.9,
)
@pytest.fixture
def sample_translation_prompt() -> PromptVersion:
return register_prompt(
name="test_translation_prompt",
template="Translate the following text to {{language}}: {{input_text}}",
)
@pytest.fixture
def sample_summarization_prompt() -> PromptVersion:
return register_prompt(
name="test_summarization_prompt",
template="Summarize this text: {{text}}",
)
@pytest.fixture
def sample_dataset() -> pd.DataFrame:
return pd.DataFrame(
{
"inputs": [
{"input_text": "Hello", "language": "Spanish"},
{"input_text": "World", "language": "French"},
{"input_text": "Goodbye", "language": "Spanish"},
],
"outputs": [
"Hola",
"Monde",
"Adiós",
],
}
)
@pytest.fixture
def sample_summarization_dataset() -> list[dict[str, Any]]:
return [
{
"inputs": {
"text": "This is a long document that needs to be summarized into key points."
},
"outputs": "Key points summary",
},
{
"inputs": {"text": "Another document with important information for summarization."},
"outputs": "Important info summary",
},
]
def sample_predict_fn(input_text: str, language: str) -> str:
mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
translations = {
("Hello", "Spanish"): "Hola",
("World", "French"): "Monde",
("Goodbye", "Spanish"): "Adiós",
}
return translations.get((input_text, language), f"translated_{input_text}")
def sample_summarization_fn(text: str) -> str:
return f"Summary of: {text[:20]}..."
@mlflow.genai.scorers.scorer(name="equivalence")
def equivalence(outputs, expectations):
return 1.0 if outputs == expectations["expected_response"] else 0.0
def test_optimize_prompts_single_prompt(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
mock_optimizer = MockPromptOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
optimized_prompt = result.optimized_prompts[0]
assert optimized_prompt.name == sample_translation_prompt.name
assert optimized_prompt.version == sample_translation_prompt.version + 1
assert "Be precise and accurate." in optimized_prompt.template
expected_template = "Translate the following text to {{language}}: {{input_text}}"
assert expected_template in optimized_prompt.template
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
def test_optimize_prompts_multiple_prompts(
sample_translation_prompt: PromptVersion,
sample_summarization_prompt: PromptVersion,
sample_dataset: pd.DataFrame,
):
mock_optimizer = MockPromptOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}",
f"prompts:/{sample_summarization_prompt.name}/{sample_summarization_prompt.version}",
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 2
prompt_names = {prompt.name for prompt in result.optimized_prompts}
assert sample_translation_prompt.name in prompt_names
assert sample_summarization_prompt.name in prompt_names
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
for prompt in result.optimized_prompts:
assert "Be precise and accurate." in prompt.template
def test_optimize_prompts_eval_function_behavior(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
class TestingOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "openai:/gpt-4o-mini"
self.eval_fn_calls = []
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
# Test that eval_fn works correctly
test_prompts = {
"test_translation_prompt": "Prompt Candidate: "
"Translate {{input_text}} to {{language}}"
}
results = eval_fn(test_prompts, dataset)
self.eval_fn_calls.append((test_prompts, results))
# Verify results structure
assert isinstance(results, list)
assert len(results) == len(dataset)
for i, result in enumerate(results):
assert isinstance(result, EvaluationResultRecord)
assert result.inputs == dataset[i]["inputs"]
assert result.outputs == dataset[i]["outputs"]
assert result.score == 1
assert result.trace is not None
return PromptOptimizerOutput(optimized_prompts=target_prompts)
predict_called_count = 0
def predict_fn(input_text, language):
prompt = mlflow.genai.load_prompt("prompts:/test_translation_prompt/1").format(
input_text=input_text, language=language
)
nonlocal predict_called_count
# the first call to the predict_fn is the model check
if predict_called_count > 0:
# validate the prompt is replaced with the candidate prompt
assert "Prompt Candidate" in prompt
predict_called_count += 1
return sample_predict_fn(input_text=input_text, language=language)
testing_optimizer = TestingOptimizer()
optimize_prompts(
predict_fn=predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=testing_optimizer,
scorers=[equivalence],
)
assert len(testing_optimizer.eval_fn_calls) == 1
_, eval_results = testing_optimizer.eval_fn_calls[0]
assert len(eval_results) == 3 # Number of records in sample_dataset
assert predict_called_count == 4 # 3 records in sample_dataset + 1 for the prediction check
def test_optimize_prompts_with_list_dataset(
sample_translation_prompt: PromptVersion, sample_summarization_dataset: list[dict[str, Any]]
):
mock_optimizer = MockPromptOptimizer()
def summarization_predict_fn(text):
return f"Summary: {text[:10]}..."
result = optimize_prompts(
predict_fn=summarization_predict_fn,
train_data=sample_summarization_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
def test_optimize_prompts_with_model_name(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
class TestOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "test/custom-model"
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
return PromptOptimizerOutput(optimized_prompts=target_prompts)
testing_optimizer = TestOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=testing_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
def test_optimize_prompts_warns_on_unused_prompt(
sample_translation_prompt: PromptVersion,
sample_summarization_prompt: PromptVersion,
sample_dataset: pd.DataFrame,
capsys,
):
mock_optimizer = MockPromptOptimizer()
# Create predict_fn that only uses translation prompt, not summarization prompt
def predict_fn_single_prompt(input_text, language):
prompt = mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
prompt.format(input_text=input_text, language=language)
return sample_predict_fn(input_text=input_text, language=language)
result = optimize_prompts(
predict_fn=predict_fn_single_prompt,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}",
f"prompts:/{sample_summarization_prompt.name}/{sample_summarization_prompt.version}",
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 2
captured = capsys.readouterr()
assert "prompts were not used during evaluation" in captured.err
assert "test_summarization_prompt" in captured.err
def test_optimize_prompts_with_custom_scorers(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
# Create a custom scorer for case-insensitive matching
@scorer(name="case_insensitive_match")
def case_insensitive_match(outputs, expectations):
# Extract expected_response if expectations is a dict
if isinstance(expectations, dict) and "expected_response" in expectations:
expected_value = expectations["expected_response"]
else:
expected_value = expectations
return 1.0 if str(outputs).lower() == str(expected_value).lower() else 0.5
class MetricTestOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "openai:/gpt-4o-mini"
self.captured_scores = []
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
# Run eval_fn and capture the scores
results = eval_fn(target_prompts, dataset)
self.captured_scores = [r.score for r in results]
return PromptOptimizerOutput(optimized_prompts=target_prompts)
testing_optimizer = MetricTestOptimizer()
# Create dataset with outputs that will test custom scorer
test_dataset = pd.DataFrame(
{
"inputs": [
{"input_text": "Hello", "language": "Spanish"},
{"input_text": "World", "language": "French"},
],
"outputs": ["HOLA", "monde"], # Different cases to test custom scorer
}
)
def predict_fn(input_text, language):
mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
# Return lowercase outputs
return {"Hello": "hola", "World": "monde"}.get(input_text, "unknown")
result = optimize_prompts(
predict_fn=predict_fn,
train_data=test_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
scorers=[case_insensitive_match],
optimizer=testing_optimizer,
)
# Verify custom scorer was used
# "hola" vs "HOLA" (case insensitive match) -> 1.0
# "monde" vs "monde" (exact match) -> 1.0
assert testing_optimizer.captured_scores == [1.0, 1.0]
assert len(result.optimized_prompts) == 1
@pytest.mark.parametrize(
("train_data", "error_match"),
[
# Empty dataset validation (handled by _convert_eval_set_to_df)
([], "The dataset is empty"),
# Missing inputs validation (handled by _convert_eval_set_to_df)
([{"outputs": "Hola"}], "Either `inputs` or `trace` column is required"),
# Empty inputs validation
(
[{"inputs": {}, "outputs": "Hola"}],
"Record 0 is missing required 'inputs' field or it is empty",
),
],
)
def test_optimize_prompts_validation_errors(
sample_translation_prompt: PromptVersion,
train_data: list[dict[str, Any]],
error_match: str,
):
with pytest.raises(MlflowException, match=error_match):
optimize_prompts(
predict_fn=sample_predict_fn,
train_data=train_data,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
def test_optimize_prompts_with_chat_prompt(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
chat_prompt = register_prompt(
name="test_chat_prompt",
template=[{"role": "user", "content": "{{input_text}}"}],
)
with pytest.raises(MlflowException, match="Only text prompts can be optimized"):
optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[f"prompts:/{chat_prompt.name}/{chat_prompt.version}"],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
| MockPromptOptimizer |
python | astropy__astropy | astropy/modeling/tests/test_fitting_parallel.py | {
"start": 33525,
"end": 36237
} | class ____:
def setup_method(self, method):
self.data = gaussian(np.arange(20), 2, 10, 1)
self.data = np.broadcast_to(self.data.reshape((20, 1)), (20, 3)).copy()
self.data_original = self.data.copy()
self.data[0, 0] = np.nan
self.model = Gaussian1D(amplitude=1.5, mean=12, stddev=1.5)
def test_default(self, tmp_path):
fitter = TRFLSQFitter()
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=fitter,
fitting_axes=0,
scheduler="synchronous",
)
assert fitter.fit_info is None
def test_all(self, tmp_path):
fitter = TRFLSQFitter()
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=fitter,
fitting_axes=0,
scheduler="synchronous",
fit_info=True,
)
assert "message" in fitter.fit_info.properties
assert_allclose(fitter.fit_info.get_property_as_array("nfev"), [0, 9, 9])
param_cov_array = fitter.fit_info.get_property_as_array("param_cov")
assert param_cov_array.shape == (3, 3, 3)
assert_allclose(param_cov_array[0], 0)
assert_allclose(param_cov_array[1], param_cov_array[2])
assert np.any(np.abs(param_cov_array[1]) > 0)
# Test slicing that returns an array
assert fitter.fit_info.shape == (3,)
fit_info_subset = fitter.fit_info[:2]
assert isinstance(fit_info_subset, FitInfoArrayContainer)
assert fit_info_subset.shape == (2,)
assert_allclose(fit_info_subset.get_property_as_array("nfev"), [0, 9])
# Test slicing that returns a one element array
fit_info_subset_single = fitter.fit_info[1:2]
assert isinstance(fit_info_subset_single, FitInfoArrayContainer)
assert fit_info_subset_single.shape == (1,)
assert_allclose(fit_info_subset_single.get_property_as_array("nfev"), [9])
# Test slicing that returns a scalar
fit_info_indiv = fitter.fit_info[1]
assert not isinstance(fit_info_indiv, FitInfoArrayContainer)
assert fit_info_indiv.nfev == 9
assert fit_info_indiv.message != ""
def test_subset(self, tmp_path):
fitter = TRFLSQFitter()
parallel_fit_dask(
data=self.data,
model=self.model,
fitter=fitter,
fitting_axes=0,
scheduler="synchronous",
fit_info=("message", "nfev", "success"),
)
assert fitter.fit_info.properties == ("message", "nfev", "success")
assert_allclose(fitter.fit_info.get_property_as_array("nfev"), [0, 9, 9])
| TestFitInfo |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_detector.py | {
"start": 1354,
"end": 2261
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.environment = self.create_environment(
organization_id=self.organization.id, name="production"
)
self.project = self.create_project()
self.uptime_subscription = self.create_uptime_subscription(
url="https://www.google.com",
interval_seconds=UptimeSubscription.IntervalSeconds.ONE_MINUTE,
timeout_ms=30000,
method=UptimeSubscription.SupportedHTTPMethods.GET,
headers=[],
body=None,
trace_sampling=False,
)
self.detector = self.create_uptime_detector(
project=self.project,
env=self.environment,
uptime_subscription=self.uptime_subscription,
name="Test Detector",
)
| UptimeDetectorBaseTest |
python | numba__llvmlite | llvmlite/binding/object_file.py | {
"start": 832,
"end": 2664
} | class ____(ffi.ObjectRef):
@classmethod
def from_data(cls, data):
return cls(ffi.lib.LLVMPY_CreateObjectFile(data, len(data)))
@classmethod
def from_path(cls, path):
with open(path, 'rb') as f:
data = f.read()
return cls(ffi.lib.LLVMPY_CreateObjectFile(data, len(data)))
def sections(self):
it = SectionIteratorRef(ffi.lib.LLVMPY_GetSections(self))
while not it.is_end(self):
yield it
it.next()
def _dispose(self):
ffi.lib.LLVMPY_DisposeObjectFile(self)
ffi.lib.LLVMPY_CreateObjectFile.argtypes = [c_char_p, c_size_t]
ffi.lib.LLVMPY_CreateObjectFile.restype = ffi.LLVMObjectFileRef
ffi.lib.LLVMPY_DisposeObjectFile.argtypes = [ffi.LLVMObjectFileRef]
ffi.lib.LLVMPY_GetSections.argtypes = [ffi.LLVMObjectFileRef]
ffi.lib.LLVMPY_GetSections.restype = ffi.LLVMSectionIteratorRef
ffi.lib.LLVMPY_DisposeSectionIterator.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_MoveToNextSection.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_IsSectionIteratorAtEnd.argtypes = [
ffi.LLVMObjectFileRef, ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_IsSectionIteratorAtEnd.restype = c_bool
ffi.lib.LLVMPY_GetSectionName.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_GetSectionName.restype = c_char_p
ffi.lib.LLVMPY_GetSectionSize.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_GetSectionSize.restype = c_uint64
ffi.lib.LLVMPY_GetSectionAddress.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_GetSectionAddress.restype = c_uint64
ffi.lib.LLVMPY_GetSectionContents.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_GetSectionContents.restype = POINTER(c_char)
ffi.lib.LLVMPY_IsSectionText.argtypes = [ffi.LLVMSectionIteratorRef]
ffi.lib.LLVMPY_IsSectionText.restype = c_bool
| ObjectFileRef |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/utils/mixins.py | {
"start": 3003,
"end": 5595
} | class ____(Generic[AwsHookType]):
"""
Mixin class for AWS Operators, Sensors, etc.
.. warning::
Only for internal usage, this class might be changed, renamed or removed in the future
without any further notice.
:meta private:
"""
# Should be assigned in child class
aws_hook_class: type[AwsHookType]
aws_conn_id: str | None
region_name: str | None
verify: bool | str | None
botocore_config: dict[str, Any] | None
def validate_attributes(self):
"""Validate class attributes."""
if hasattr(self, "aws_hook_class"): # Validate if ``aws_hook_class`` is properly set.
try:
if not issubclass(self.aws_hook_class, AwsGenericHook):
raise TypeError
except TypeError:
# Raise if ``aws_hook_class`` is not a class or not a subclass of Generic/Base AWS Hook
raise AttributeError(
f"Class attribute '{type(self).__name__}.aws_hook_class' "
f"is not a subclass of AwsGenericHook."
) from None
else:
raise AttributeError(f"Class attribute '{type(self).__name__}.aws_hook_class' should be set.")
@property
def _hook_parameters(self) -> dict[str, Any]:
"""
Mapping parameters to build boto3-related hooks.
Only required to be overwritten for thick-wrapped Hooks.
"""
return {
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"verify": self.verify,
"config": self.botocore_config,
}
@cached_property
@final
def hook(self) -> AwsHookType:
"""
Return AWS Provider's hook based on ``aws_hook_class``.
This method implementation should be taken as a final for
thin-wrapped Hooks around boto3. For thick-wrapped Hooks developer
should consider to overwrite ``_hook_parameters`` method instead.
"""
return self.aws_hook_class(**self._hook_parameters)
@cache
def aws_template_fields(*template_fields: str) -> tuple[str, ...]:
"""Merge provided template_fields with generic one and return in alphabetical order."""
if not all(isinstance(tf, str) for tf in template_fields):
msg = (
"Expected that all provided arguments are strings, but got "
f"{', '.join(map(repr, template_fields))}."
)
raise TypeError(msg)
return tuple(sorted({"aws_conn_id", "region_name", "verify"} | set(template_fields)))
| AwsBaseHookMixin |
python | astropy__astropy | astropy/coordinates/builtin_frames/lsr.py | {
"start": 1338,
"end": 4815
} | class ____(BaseRADecFrame):
r"""A frame in the Local Standard of Rest (LSR).
For Earth-bound observers it is often convenient to use a reference
frame that is tied to the Solar System barycenter, but such frames
are not very useful for describing galactic dynamics. The dynamical
LSR is instead tied to the circular velocity at the Sun's location,
but defining a circular velocity in a non-axisymmetric galaxy
requires non-trivial averaging. The kinematic LSR is understood as a
frame in which the average motion of the stars in the solar
neighborhood is zero, but in practice that is not straightforward
either because the average motion is different for different
spectral types.
The default parameters of this frame are those of the dynamical LSR
of Schönrich et al. (2010), meaning the Galactic (right-handed)
Cartesian velocity components of the solar motion are
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`,
but a different solar motion can be specified with the ``v_bary``
argument. The frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = CartesianRepresentationAttribute(
default=v_bary_Schoenrich2010,
unit=u.km / u.s,
doc="The relative velocity of the solar-system barycenter",
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary)
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
| LSR |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 23713,
"end": 28382
} | class ____(FlexFieldsModelSerializer):
"""
Project serializer.
.. note::
When using organizations, projects don't have the concept of users.
But we have organization.owners.
"""
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source="*")
urls = ProjectURLsSerializer(source="*")
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source="get_default_branch")
tags = serializers.StringRelatedField(many=True)
users = UserSerializer(many=True)
single_version = serializers.BooleanField(source="is_single_version")
_links = ProjectLinksSerializer(source="*")
# TODO: adapt these fields with the proper names in the db and then remove
# them from here
created = serializers.DateTimeField(source="pub_date")
modified = serializers.DateTimeField(source="modified_date")
related_project_serializer = RelatedProjectSerializer
class Meta:
model = Project
fields = [
"id",
"name",
"slug",
"created",
"modified",
"language",
"programming_language",
"homepage",
"repository",
"default_version",
"default_branch",
"subproject_of",
"translation_of",
"urls",
"tags",
"privacy_level",
"external_builds_privacy_level",
"versioning_scheme",
# Kept for backwards compatibility,
# versioning_scheme should be used instead.
"single_version",
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'active_versions',
"_links",
"users",
]
expandable_fields = {
# NOTE: this has to be a Model method, can't be a
# ``SerializerMethodField`` as far as I know.
# NOTE: this lists public versions only.
"active_versions": (
VersionSerializer,
{
"many": True,
},
),
# NOTE: we use a different serializer with just a subset of fields
# to avoid leaking information about the organization through a public project.
# Users can use the /api/v3/organizations/ endpoint to get more information
# about the organization.
"organization": (
"readthedocs.api.v3.serializers.RelatedOrganizationSerializer",
# NOTE: we cannot have a Project with multiple organizations.
{"source": "organizations.first"},
),
"permissions": (
ProjectPermissionSerializer,
{
"source": "*",
},
),
}
def __init__(self, *args, resolver=None, **kwargs):
# Receive a `Version` here to build URLs properly
self.version = kwargs.pop("version", None)
# Use a shared resolver to reduce the amount of DB queries while
# resolving version URLs.
self.resolver = resolver or Resolver()
super().__init__(*args, **kwargs)
# When using organizations, projects don't have the concept of users.
# But we have organization.owners.
# Set here instead of at the class level,
# so is easier to test.
if settings.RTD_ALLOW_ORGANIZATIONS:
self.fields.pop("users", None)
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
# Since the related project can be private, we use a restricted serializer.
return self.related_project_serializer(obj.main_language_project).data
return None
def get_subproject_of(self, obj):
if obj.main_language_project:
# If the project is a translation, it can't be a subproject,
# so it doesn't have a superproject.
return None
superproject = obj.superproject
if superproject:
# Since the related project can be private, we use a restricted serializer.
return self.related_project_serializer(superproject).data
return None
| ProjectSerializer |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 138234,
"end": 138389
} | class ____(BaseModel):
max_vectors: Optional[int] = Field(default=None, description="Max number of vectors in a multivector")
| StrictModeMultivectorOutput |
python | imageio__imageio | imageio/plugins/pillow_legacy.py | {
"start": 14505,
"end": 17415
} | class ____(PillowFormat):
"""See :mod:`imageio.plugins.pillow_legacy`"""
class Reader(PillowFormat.Reader):
def _open(self, pilmode=None, as_gray=False, ignoregamma=True):
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
def _get_data(self, index):
im, info = PillowFormat.Reader._get_data(self, index)
if not self.request.kwargs.get("ignoregamma", True):
# The gamma value in the file represents the gamma factor for the
# hardware on the system where the file was created, and is meant
# to be able to match the colors with the system on which the
# image is shown. See also issue #366
try:
gamma = float(info["gamma"])
except (KeyError, ValueError):
pass
else:
scale = float(65536 if im.dtype == np.uint16 else 255)
gain = 1.0
im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999
return im, info
# --
class Writer(PillowFormat.Writer):
def _open(self, compression=None, quantize=None, interlaced=False, **kwargs):
# Better default for compression
kwargs["compress_level"] = kwargs.get("compress_level", 9)
if compression is not None:
if compression < 0 or compression > 9:
raise ValueError("Invalid PNG compression level: %r" % compression)
kwargs["compress_level"] = compression
if quantize is not None:
for bits in range(1, 9):
if 2**bits == quantize:
break
else:
raise ValueError(
"PNG quantize must be power of two, " "not %r" % quantize
)
kwargs["bits"] = bits
if interlaced:
logger.warning("PIL PNG writer cannot produce interlaced images.")
ok_keys = (
"optimize",
"transparency",
"dpi",
"pnginfo",
"bits",
"compress_level",
"icc_profile",
"dictionary",
"prefer_uint8",
)
for key in kwargs:
if key not in ok_keys:
raise TypeError("Invalid arg for PNG writer: %r" % key)
PillowFormat.Writer._open(self)
self._meta.update(kwargs)
def _append_data(self, im, meta):
if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1):
im = image_as_uint(im, bitdepth=16)
else:
im = image_as_uint(im, bitdepth=8)
PillowFormat.Writer._append_data(self, im, meta)
| PNGFormat |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py | {
"start": 174,
"end": 1019
} | class ____:
"""Substitute for missing class which accepts all arguments."""
def __init__(self, *args, **kwargs) -> None:
pass
try:
import argcomplete
try:
# argcomplete 3+
# see: https://github.com/kislyuk/argcomplete/commit/bd781cb08512b94966312377186ebc5550f46ae0
from argcomplete.finders import (
CompletionFinder,
default_validator,
)
except ImportError:
# argcomplete <3
from argcomplete import (
CompletionFinder,
default_validator,
)
warn = argcomplete.warn # pylint: disable=invalid-name
except ImportError:
argcomplete = None
CompletionFinder = Substitute
default_validator = Substitute # pylint: disable=invalid-name
warn = Substitute # pylint: disable=invalid-name
| Substitute |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver38.py | {
"start": 464,
"end": 807
} | class ____(Protocol):
@property
def method(self) -> int: ...
def get_value(x: HasMethod) -> int: ...
def upcast(x: PolymorphicListItemGetter) -> Getter[list[HasMethod], HasMethod]:
return x
def test(poly_getter: PolymorphicListItemGetter):
compose(poly_getter, get_value)
compose(upcast(poly_getter), get_value)
| HasMethod |
python | getsentry__sentry-python | sentry_sdk/integrations/trytond.py | {
"start": 387,
"end": 1651
} | class ____(Integration):
identifier = "trytond_wsgi"
origin = f"auto.http.{identifier}"
def __init__(self): # type: () -> None
pass
@staticmethod
def setup_once(): # type: () -> None
app.wsgi_app = SentryWsgiMiddleware(
app.wsgi_app,
span_origin=TrytondWSGIIntegration.origin,
)
@ensure_integration_enabled(TrytondWSGIIntegration)
def error_handler(e): # type: (Exception) -> None
if isinstance(e, TrytonException):
return
else:
client = sentry_sdk.get_client()
event, hint = event_from_exception(
e,
client_options=client.options,
mechanism={"type": "trytond", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
# Expected error handlers signature was changed
# when the error_handler decorator was introduced
# in Tryton-5.4
if hasattr(app, "error_handler"):
@app.error_handler
def _(app, request, e): # type: ignore
error_handler(e)
else:
app.error_handlers.append(error_handler)
| TrytondWSGIIntegration |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 12393,
"end": 18678
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~GroundingDinoProcessor.post_process_grounded_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Encoded candidate labels sequence. Used in processor to post process object detection result.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
input_ids: Optional[torch.LongTensor] = None
# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->GroundingDino
| GroundingDinoObjectDetectionOutput |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_delete_operation.py | {
"start": 632,
"end": 1668
} | class ____:
def __delitem__(self, key):
pass
del NonSubscriptable()[0] # [unsupported-delete-operation]
del NonSubscriptable[0] # [unsupported-delete-operation]
del Subscriptable()[0]
del Subscriptable[0] # [unsupported-delete-operation]
# generators are not subscriptable
def powers_of_two():
k = 0
while k < 10:
yield 2 ** k
k += 1
del powers_of_two()[0] # [unsupported-delete-operation]
del powers_of_two[0] # [unsupported-delete-operation]
# check that primitive non subscriptable types are caught
del True[0] # [unsupported-delete-operation]
del None[0] # [unsupported-delete-operation]
del 8.5[0] # [unsupported-delete-operation]
del 10[0] # [unsupported-delete-operation]
# sets are not subscriptable
del {x ** 2 for x in range(10)}[0] # [unsupported-delete-operation]
del set(numbers)[0] # [unsupported-delete-operation]
del frozenset(numbers)[0] # [unsupported-delete-operation]
# skip instances with unknown base classes
from some_missing_module import LibSubscriptable
| Subscriptable |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 33936,
"end": 36386
} | class ____(ASTBase):
def __init__(
self,
outer: str,
leftSpecs: ASTDeclSpecsSimple,
rightSpecs: ASTDeclSpecsSimple,
trailing: ASTTrailingTypeSpec,
) -> None:
# leftSpecs and rightSpecs are used for output
# allSpecs are used for id generation TODO: remove?
self.outer = outer
self.leftSpecs = leftSpecs
self.rightSpecs = rightSpecs
self.allSpecs = self.leftSpecs.mergeWith(self.rightSpecs)
self.trailingTypeSpec = trailing
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclSpecs):
return NotImplemented
return (
self.outer == other.outer
and self.leftSpecs == other.leftSpecs
and self.rightSpecs == other.rightSpecs
and self.trailingTypeSpec == other.trailingTypeSpec
)
def __hash__(self) -> int:
return hash((
self.outer,
self.leftSpecs,
self.rightSpecs,
self.trailingTypeSpec,
))
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
l = transform(self.leftSpecs)
if len(l) > 0:
res.append(l)
if self.trailingTypeSpec:
if len(res) > 0:
res.append(' ')
res.append(transform(self.trailingTypeSpec))
r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(' ')
res.append(r)
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
modifiers: list[Node] = []
self.leftSpecs.describe_signature(modifiers)
for m in modifiers:
signode += m
if self.trailingTypeSpec:
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
self.trailingTypeSpec.describe_signature(signode, mode, env, symbol=symbol)
modifiers = []
self.rightSpecs.describe_signature(modifiers)
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
for m in modifiers:
signode += m
# Declarator
################################################################################
| ASTDeclSpecs |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/planning.py | {
"start": 924,
"end": 1238
} | class ____:
"""Container for a prompt-generated implementation plan.
Attributes:
markdown_content: The complete plan as markdown text generated by Claude
metadata: Additional metadata about plan generation
"""
markdown_content: str
metadata: dict[str, Any]
@record
| GeneratedPlan |
python | sqlalchemy__sqlalchemy | test/orm/dml/test_bulk.py | {
"start": 2301,
"end": 12634
} | class ____(BulkTest, _fixtures.FixtureTest):
__sparse_driver_backend__ = True
@classmethod
def setup_mappers(cls):
User, Address, Order = cls.classes("User", "Address", "Order")
u, a, o = cls.tables("users", "addresses", "orders")
cls.mapper_registry.map_imperatively(User, u)
cls.mapper_registry.map_imperatively(Address, a)
cls.mapper_registry.map_imperatively(Order, o)
@testing.combinations(
"save_objects",
"insert_mappings",
"insert_stmt",
argnames="statement_type",
)
@testing.variation("return_defaults", [True, False])
def test_bulk_save_return_defaults(self, statement_type, return_defaults):
(User,) = self.classes("User")
s = fixture_session()
if statement_type == "save_objects":
objects = [User(name="u1"), User(name="u2"), User(name="u3")]
assert "id" not in objects[0].__dict__
returning_users_id = " RETURNING users.id"
with self.sql_execution_asserter() as asserter:
s.bulk_save_objects(objects, return_defaults=return_defaults)
elif statement_type == "insert_mappings":
data = [dict(name="u1"), dict(name="u2"), dict(name="u3")]
returning_users_id = " RETURNING users.id"
with self.sql_execution_asserter() as asserter:
s.bulk_insert_mappings(
User, data, return_defaults=return_defaults
)
elif statement_type == "insert_stmt":
data = [dict(name="u1"), dict(name="u2"), dict(name="u3")]
# for statement, "return_defaults" is heuristic on if we are
# a joined inh mapping if we don't otherwise include
# .returning() on the statement itself
returning_users_id = ""
with self.sql_execution_asserter() as asserter:
s.execute(insert(User), data)
asserter.assert_(
Conditional(
(
return_defaults
and testing.db.dialect.insert_executemany_returning
)
or statement_type == "insert_stmt",
[
CompiledSQL(
"INSERT INTO users (name) "
f"VALUES (:name){returning_users_id}",
[{"name": "u1"}, {"name": "u2"}, {"name": "u3"}],
),
],
[
Conditional(
return_defaults,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "u1"}],
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "u2"}],
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "u3"}],
),
],
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[
{"name": "u1"},
{"name": "u2"},
{"name": "u3"},
],
),
],
)
],
)
)
if statement_type == "save_objects":
if return_defaults:
eq_(objects[0].__dict__["id"], 1)
eq_(inspect(objects[0]).key, (User, (1,), None))
else:
assert "id" not in objects[0].__dict__
eq_(inspect(objects[0]).key, None)
elif statement_type == "insert_mappings":
# test for #11661
if return_defaults:
eq_(data[0]["id"], 1)
else:
assert "id" not in data[0]
def test_bulk_save_objects_defaults_key(self):
User = self.classes.User
pes = [User(name=f"foo{i}") for i in range(3)]
s = fixture_session()
s.bulk_save_objects(pes, return_defaults=True)
key = inspect(pes[0]).key
s.commit()
eq_(inspect(s.get(User, 1)).key, key)
def test_bulk_save_mappings_preserve_order(self):
(User,) = self.classes("User")
s = fixture_session()
# commit some object into db
user1 = User(name="i1")
user2 = User(name="i2")
s.add(user1)
s.add(user2)
s.commit()
# make some changes
user1.name = "u1"
user3 = User(name="i3")
s.add(user3)
user2.name = "u2"
objects = [user1, user3, user2]
from sqlalchemy import inspect
def _bulk_save_mappings(
mapper,
mappings,
isupdate,
isstates,
return_defaults,
update_changed_only,
render_nulls,
):
mock_method(list(mappings), isupdate)
mock_method = mock.Mock()
with mock.patch.object(s, "_bulk_save_mappings", _bulk_save_mappings):
s.bulk_save_objects(objects)
eq_(
mock_method.mock_calls,
[
mock.call([inspect(user1)], True),
mock.call([inspect(user3)], False),
mock.call([inspect(user2)], True),
],
)
mock_method = mock.Mock()
with mock.patch.object(s, "_bulk_save_mappings", _bulk_save_mappings):
s.bulk_save_objects(objects, preserve_order=False)
eq_(
mock_method.mock_calls,
[
mock.call([inspect(user3)], False),
mock.call([inspect(user1), inspect(user2)], True),
],
)
def test_bulk_save_no_defaults(self):
(User,) = self.classes("User")
s = fixture_session()
objects = [User(name="u1"), User(name="u2"), User(name="u3")]
assert "id" not in objects[0].__dict__
with self.sql_execution_asserter() as asserter:
s.bulk_save_objects(objects)
asserter.assert_(
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "u1"}, {"name": "u2"}, {"name": "u3"}],
)
)
assert "id" not in objects[0].__dict__
def test_bulk_save_updated_include_unchanged(self):
(User,) = self.classes("User")
s = fixture_session(expire_on_commit=False)
objects = [User(name="u1"), User(name="u2"), User(name="u3")]
s.add_all(objects)
s.commit()
objects[0].name = "u1new"
objects[2].name = "u3new"
s = fixture_session()
with self.sql_execution_asserter() as asserter:
s.bulk_save_objects(objects, update_changed_only=False)
asserter.assert_(
CompiledSQL(
"UPDATE users SET name=:name WHERE users.id = :users_id",
[
{"users_id": 1, "name": "u1new"},
{"users_id": 2, "name": "u2"},
{"users_id": 3, "name": "u3new"},
],
)
)
@testing.combinations("update_mappings", "update_stmt")
def test_bulk_update(self, statement_type):
User = self.classes.User
s = fixture_session(expire_on_commit=False)
objects = [User(name="u1"), User(name="u2"), User(name="u3")]
s.add_all(objects)
s.commit()
s = fixture_session()
data = [
{"id": 1, "name": "u1new"},
{"id": 2, "name": "u2"},
{"id": 3, "name": "u3new"},
]
if statement_type == "update_mappings":
with self.sql_execution_asserter() as asserter:
s.bulk_update_mappings(User, data)
elif statement_type == "update_stmt":
with self.sql_execution_asserter() as asserter:
s.execute(update(User), data)
asserter.assert_(
CompiledSQL(
"UPDATE users SET name=:name WHERE users.id = :users_id",
[
{"users_id": 1, "name": "u1new"},
{"users_id": 2, "name": "u2"},
{"users_id": 3, "name": "u3new"},
],
)
)
def test_bulk_insert(self):
(User,) = self.classes("User")
s = fixture_session()
with self.sql_execution_asserter() as asserter:
s.bulk_insert_mappings(
User,
[
{"id": 1, "name": "u1new"},
{"id": 2, "name": "u2"},
{"id": 3, "name": "u3new"},
],
)
asserter.assert_(
CompiledSQL(
"INSERT INTO users (id, name) VALUES (:id, :name)",
[
{"id": 1, "name": "u1new"},
{"id": 2, "name": "u2"},
{"id": 3, "name": "u3new"},
],
)
)
def test_bulk_insert_render_nulls(self):
(Order,) = self.classes("Order")
s = fixture_session()
with self.sql_execution_asserter() as asserter:
s.bulk_insert_mappings(
Order,
[
{"id": 1, "description": "u1new"},
{"id": 2, "description": None},
{"id": 3, "description": "u3new"},
],
render_nulls=True,
)
asserter.assert_(
CompiledSQL(
"INSERT INTO orders (id, description) "
"VALUES (:id, :description)",
[
{"id": 1, "description": "u1new"},
{"id": 2, "description": None},
{"id": 3, "description": "u3new"},
],
)
)
| BulkInsertUpdateTest |
python | PyCQA__pylint | tests/functional/m/member/member_checks_typed_annotations.py | {
"start": 72,
"end": 99
} | class ____:
myfield: int
| A |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 70750,
"end": 77429
} | class ____(ASTBase):
def __init__(
self,
args: list[ASTFunctionParameter],
volatile: bool,
const: bool,
refQual: str | None,
exceptionSpec: ASTNoexceptSpec,
trailingReturn: ASTType,
override: bool,
final: bool,
attrs: ASTAttributeList,
initializer: str | None,
) -> None:
self.args = args
self.volatile = volatile
self.const = const
self.refQual = refQual
self.exceptionSpec = exceptionSpec
self.trailingReturn = trailingReturn
self.override = override
self.final = final
self.attrs = attrs
self.initializer = initializer
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTParametersQualifiers):
return NotImplemented
return (
self.args == other.args
and self.volatile == other.volatile
and self.const == other.const
and self.refQual == other.refQual
and self.exceptionSpec == other.exceptionSpec
and self.trailingReturn == other.trailingReturn
and self.override == other.override
and self.final == other.final
and self.attrs == other.attrs
and self.initializer == other.initializer
)
def __hash__(self) -> int:
return hash((
self.args,
self.volatile,
self.const,
self.refQual,
self.exceptionSpec,
self.trailingReturn,
self.override,
self.final,
self.attrs,
self.initializer,
))
@property
def function_params(self) -> list[ASTFunctionParameter]:
return self.args
def get_modifiers_id(self, version: int) -> str:
res = []
if self.volatile:
res.append('V')
if self.const:
if version == 1:
res.append('C')
else:
res.append('K')
if self.refQual == '&&':
res.append('O')
elif self.refQual == '&':
res.append('R')
return ''.join(res)
def get_param_id(self, version: int) -> str:
if version == 1:
if len(self.args) == 0:
return ''
else:
return '__' + '.'.join(a.get_id(version) for a in self.args)
if len(self.args) == 0:
return 'v'
else:
return ''.join(a.get_id(version) for a in self.args)
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
if self.volatile:
res.append(' volatile')
if self.const:
res.append(' const')
if self.refQual:
res.extend((' ', self.refQual))
if self.exceptionSpec:
res.extend((' ', transform(self.exceptionSpec)))
if self.trailingReturn:
res.extend((' -> ', transform(self.trailingReturn)))
if self.final:
res.append(' final')
if self.override:
res.append(' override')
if len(self.attrs) != 0:
res.extend((' ', transform(self.attrs)))
if self.initializer:
res.extend((' = ', self.initializer))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
multi_line_parameter_list = False
test_node: Element = signode
while test_node.parent:
if not isinstance(test_node, addnodes.desc_signature):
test_node = test_node.parent
continue
multi_line_parameter_list = test_node.get(
'multi_line_parameter_list', False
)
break
# only use the desc_parameterlist for the outer list, not for inner lists
if mode == 'lastIsName':
paramlist = addnodes.desc_parameterlist()
paramlist['multi_line_parameter_list'] = multi_line_parameter_list
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
else:
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for arg in self.args:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
arg.describe_signature(signode, 'markType', env, symbol=symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_keyword(text, text)
if self.volatile:
_add_anno(signode, 'volatile')
if self.const:
_add_anno(signode, 'const')
if self.refQual:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(self.refQual, self.refQual)
if self.exceptionSpec:
signode += addnodes.desc_sig_space()
self.exceptionSpec.describe_signature(signode, mode, env, symbol)
if self.trailingReturn:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_operator('->', '->')
signode += addnodes.desc_sig_space()
self.trailingReturn.describe_signature(signode, mode, env, symbol)
if self.final:
_add_anno(signode, 'final')
if self.override:
_add_anno(signode, 'override')
if len(self.attrs) != 0:
signode += addnodes.desc_sig_space()
self.attrs.describe_signature(signode)
if self.initializer:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation('=', '=')
signode += addnodes.desc_sig_space()
assert self.initializer in {'0', 'delete', 'default'}
if self.initializer == '0':
signode += addnodes.desc_sig_literal_number('0', '0')
else:
signode += addnodes.desc_sig_keyword(self.initializer, self.initializer)
| ASTParametersQualifiers |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 48627,
"end": 50087
} | class ____(ContextWrappingVariable):
"""represents torch._dynamo.patch_dynamo_config"""
# NOTE: no need to guard on dynamo config because dynamo config should not affect soundness
# (though it may affect tracing behavior)
def __init__(self, target_values: dict[str, Any], **kwargs: Any) -> None:
target_values_tuple = tuple(target_values.items())
super().__init__(
target_values=(target_values_tuple,), initial_values=None, **kwargs
)
initial_values_dict = {}
for key, _ in target_values_tuple:
initial_values_dict[key] = torch._dynamo.config.__getattr__(key) # type: ignore[attr-defined]
self.initial_values = (tuple(initial_values_dict.items()),)
def _call_func(self, tx: "InstructionTranslator", values: Any) -> None:
assert len(values) == 1
value = values[0]
# manually patch dynamo config
for key, val in value:
torch._dynamo.config.__setattr__(key, val) # type: ignore[attr-defined]
# No need to keep track of global side effects because
# dynamo will properly restore this context manager for
# unsupported instructions and continuation functions.
# Dynamo config also should not affect the semantics of the compiled graph.
def module_name(self) -> str:
return "torch._dynamo"
def fn_name(self) -> str:
return "patch_dynamo_config"
| DynamoConfigPatchVariable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/unit_tests/integration/test_story_insights.py | {
"start": 2702,
"end": 5522
} | class ____(TestCase):
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@HttpMocker()
def test_instagram_story_insights(self, http_mocker: HttpMocker) -> None:
test = HAPPY_PATH
# Mocking API stream
http_mocker.get(
get_account_request().build(),
get_account_response(),
)
# Mocking parent stream
http_mocker.get(
_get_parent_request().build(),
_get_response(stream_name=_PARENT_STREAM_NAME, test=test)
.with_record(_record(stream_name=_PARENT_STREAM_NAME, test=test))
.build(),
)
http_mocker.get(
_get_child_request(media_id=STORIES_ID, metric=_METRICS).build(),
HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 200),
)
output = self._read(config_=config())
assert len(output.records) == 1
assert output.records[0].record.data["page_id"]
assert output.records[0].record.data["business_account_id"]
assert output.records[0].record.data["id"]
for metric in _METRICS:
assert metric in output.records[0].record.data
@HttpMocker()
def test_instagram_story_insights_for_error_code_30(self, http_mocker: HttpMocker) -> None:
test = ERROR_10
http_mocker.get(
get_account_request().build(),
get_account_response(),
)
# Mocking parent stream
http_mocker.get(
_get_parent_request().build(), HttpResponse(json.dumps(find_template(f"{_PARENT_STREAM_NAME}_for_{test}", __file__)), 200)
)
# Good response
http_mocker.get(
_get_child_request(media_id=STORIES_ID, metric=_METRICS).build(),
HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{HAPPY_PATH}", __file__)), 200),
)
# error 10
http_mocker.get(
_get_child_request(media_id=STORIES_ID_ERROR_CODE_10, metric=_METRICS).build(),
HttpResponse(json.dumps(find_template(f"{_STREAM_NAME}_for_{test}", __file__)), 400),
)
output = self._read(config_=config())
# error was ignored and correct record was processed
assert len(output.records) == 1
assert output.records[0].record.data["page_id"]
assert output.records[0].record.data["business_account_id"]
assert output.records[0].record.data["id"]
for metric in _METRICS:
assert metric in output.records[0].record.data
| TestFullRefresh |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 21822,
"end": 22097
} | class ____(graphene.ObjectType):
"""Output indicating that a code location server was shut down."""
repositoryLocationName = graphene.NonNull(graphene.String)
class Meta:
name = "ShutdownRepositoryLocationSuccess"
| GrapheneShutdownRepositoryLocationSuccess |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 1228,
"end": 5305
} | class ____:
"""Base class for visual properties that can be set directly or be data scaling."""
# When True, scales for this property will populate the legend by default
legend = False
# When True, scales for this property normalize data to [0, 1] before mapping
normed = False
def __init__(self, variable: str | None = None):
"""Initialize the property with the name of the corresponding plot variable."""
if not variable:
variable = self.__class__.__name__.lower()
self.variable = variable
def default_scale(self, data: Series) -> Scale:
"""Given data, initialize appropriate scale class."""
var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
if var_type == "numeric":
return Continuous()
elif var_type == "datetime":
return Temporal()
elif var_type == "boolean":
return Boolean()
else:
return Nominal()
def infer_scale(self, arg: Any, data: Series) -> Scale:
"""Given data and a scaling argument, initialize appropriate scale class."""
# TODO put these somewhere external for validation
# TODO putting this here won't pick it up if subclasses define infer_scale
# (e.g. color). How best to handle that? One option is to call super after
# handling property-specific possibilities (e.g. for color check that the
# arg is not a valid palette name) but that could get tricky.
trans_args = ["log", "symlog", "logit", "pow", "sqrt"]
if isinstance(arg, str):
if any(arg.startswith(k) for k in trans_args):
# TODO validate numeric type? That should happen centrally somewhere
return Continuous(trans=arg)
else:
msg = f"Unknown magic arg for {self.variable} scale: '{arg}'."
raise ValueError(msg)
else:
arg_type = type(arg).__name__
msg = f"Magic arg for {self.variable} scale must be str, not {arg_type}."
raise TypeError(msg)
def get_mapping(self, scale: Scale, data: Series) -> Mapping:
"""Return a function that maps from data domain to property range."""
def identity(x):
return x
return identity
def standardize(self, val: Any) -> Any:
"""Coerce flexible property value to standardized representation."""
return val
def _check_dict_entries(self, levels: list, values: dict) -> None:
"""Input check when values are provided as a dictionary."""
missing = set(levels) - set(values)
if missing:
formatted = ", ".join(map(repr, sorted(missing, key=str)))
err = f"No entry in {self.variable} dictionary for {formatted}"
raise ValueError(err)
def _check_list_length(self, levels: list, values: list) -> list:
"""Input check when values are provided as a list."""
message = ""
if len(levels) > len(values):
message = " ".join([
f"\nThe {self.variable} list has fewer values ({len(values)})",
f"than needed ({len(levels)}) and will cycle, which may",
"produce an uninterpretable plot."
])
values = [x for _, x in zip(levels, itertools.cycle(values))]
elif len(values) > len(levels):
message = " ".join([
f"The {self.variable} list has more values ({len(values)})",
f"than needed ({len(levels)}), which may not be intended.",
])
values = values[:len(levels)]
# TODO look into custom PlotSpecWarning with better formatting
if message:
warnings.warn(message, UserWarning)
return values
# =================================================================================== #
# Properties relating to spatial position of marks on the plotting axes
# =================================================================================== #
| Property |
python | pydata__xarray | xarray/plot/accessor.py | {
"start": 30978,
"end": 43213
} | class ____:
"""
Enables use of xarray.plot functions as attributes on a Dataset.
For example, Dataset.plot.scatter
"""
_ds: Dataset
__slots__ = ("_ds",)
def __init__(self, dataset: Dataset) -> None:
self._ds = dataset
def __call__(self, *args, **kwargs) -> NoReturn:
raise ValueError(
"Dataset.plot cannot be called directly. Use "
"an explicit plot method, e.g. ds.plot.scatter(...)"
)
@overload
def scatter( # type: ignore[misc,unused-ignore] # None is hashable :(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
z: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
markersize: Hashable | None = None,
linewidth: Hashable | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
aspect: float | None = None,
ax: Axes | None = None,
row: None = None, # no wrap -> primitive
col: None = None, # no wrap -> primitive
col_wrap: int | None = None,
xincrease: bool | None = True,
yincrease: bool | None = True,
add_legend: bool | None = None,
add_colorbar: bool | None = None,
add_labels: bool | Iterable[bool] = True,
add_title: bool = True,
subplot_kws: dict[str, Any] | None = None,
xscale: ScaleOptions = None,
yscale: ScaleOptions = None,
xticks: ArrayLike | None = None,
yticks: ArrayLike | None = None,
xlim: tuple[float, float] | None = None,
ylim: tuple[float, float] | None = None,
cmap=None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
extend=None,
levels=None,
**kwargs: Any,
) -> PathCollection: ...
@overload
def scatter(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
z: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
markersize: Hashable | None = None,
linewidth: Hashable | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
aspect: float | None = None,
ax: Axes | None = None,
row: Hashable | None = None,
col: Hashable, # wrap -> FacetGrid
col_wrap: int | None = None,
xincrease: bool | None = True,
yincrease: bool | None = True,
add_legend: bool | None = None,
add_colorbar: bool | None = None,
add_labels: bool | Iterable[bool] = True,
add_title: bool = True,
subplot_kws: dict[str, Any] | None = None,
xscale: ScaleOptions = None,
yscale: ScaleOptions = None,
xticks: ArrayLike | None = None,
yticks: ArrayLike | None = None,
xlim: tuple[float, float] | None = None,
ylim: tuple[float, float] | None = None,
cmap=None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
extend=None,
levels=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@overload
def scatter(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
z: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
markersize: Hashable | None = None,
linewidth: Hashable | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
aspect: float | None = None,
ax: Axes | None = None,
row: Hashable, # wrap -> FacetGrid
col: Hashable | None = None,
col_wrap: int | None = None,
xincrease: bool | None = True,
yincrease: bool | None = True,
add_legend: bool | None = None,
add_colorbar: bool | None = None,
add_labels: bool | Iterable[bool] = True,
add_title: bool = True,
subplot_kws: dict[str, Any] | None = None,
xscale: ScaleOptions = None,
yscale: ScaleOptions = None,
xticks: ArrayLike | None = None,
yticks: ArrayLike | None = None,
xlim: tuple[float, float] | None = None,
ylim: tuple[float, float] | None = None,
cmap=None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
extend=None,
levels=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@functools.wraps(dataset_plot.scatter, assigned=("__doc__",))
def scatter(self, *args, **kwargs) -> PathCollection | FacetGrid[Dataset]:
return dataset_plot.scatter(self._ds, *args, **kwargs)
@overload
def quiver( # type: ignore[misc,unused-ignore] # None is hashable :(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: None = None, # no wrap -> primitive
row: None = None, # no wrap -> primitive
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> Quiver: ...
@overload
def quiver(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: Hashable, # wrap -> FacetGrid
row: Hashable | None = None,
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@overload
def quiver(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: Hashable | None = None,
row: Hashable, # wrap -> FacetGrid
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@functools.wraps(dataset_plot.quiver, assigned=("__doc__",))
def quiver(self, *args, **kwargs) -> Quiver | FacetGrid[Dataset]:
return dataset_plot.quiver(self._ds, *args, **kwargs)
@overload
def streamplot( # type: ignore[misc,unused-ignore] # None is hashable :(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: None = None, # no wrap -> primitive
row: None = None, # no wrap -> primitive
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> LineCollection: ...
@overload
def streamplot(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: Hashable, # wrap -> FacetGrid
row: Hashable | None = None,
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@overload
def streamplot(
self,
*args: Any,
x: Hashable | None = None,
y: Hashable | None = None,
u: Hashable | None = None,
v: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
col: Hashable | None = None,
row: Hashable, # wrap -> FacetGrid
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
size: float | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: AspectOptions = None,
subplot_kws: dict[str, Any] | None = None,
add_guide: bool | None = None,
cbar_kwargs: dict[str, Any] | None = None,
cbar_ax: Axes | None = None,
vmin: float | None = None,
vmax: float | None = None,
norm: Normalize | None = None,
infer_intervals=None,
center=None,
levels=None,
robust: bool | None = None,
colors=None,
extend=None,
cmap=None,
**kwargs: Any,
) -> FacetGrid[Dataset]: ...
@functools.wraps(dataset_plot.streamplot, assigned=("__doc__",))
def streamplot(self, *args, **kwargs) -> LineCollection | FacetGrid[Dataset]:
return dataset_plot.streamplot(self._ds, *args, **kwargs)
| DatasetPlotAccessor |
python | getsentry__sentry | src/sentry/search/events/builder/discover.py | {
"start": 6595,
"end": 9270
} | class ____(UnresolvedQuery):
def __init__(
self,
dataset: Dataset,
params: ParamsType,
interval: int,
snuba_params: SnubaParams | None = None,
query: str | None = None,
selected_columns: list[str] | None = None,
equations: list[str] | None = None,
limit: int | None = 10000,
config: QueryBuilderConfig | None = None,
):
config = config if config is not None else QueryBuilderConfig()
config.auto_fields = False
config.equation_config = {"auto_add": True, "aggregates_only": True}
self.interval = interval
super().__init__(
dataset,
params,
snuba_params=snuba_params,
query=query,
selected_columns=selected_columns,
equations=equations,
config=config,
)
self.granularity = Granularity(interval)
self.limit = None if limit is None else Limit(limit)
# This is a timeseries, the groupby will always be time
self.groupby = [self.time_column]
@property
def time_column(self) -> SelectType:
return Column("time")
def resolve_query(
self,
query: str | None = None,
selected_columns: list[str] | None = None,
groupby_columns: list[str] | None = None,
equations: list[str] | None = None,
orderby: list[str] | str | None = None,
) -> None:
self.resolve_time_conditions()
self.where, self.having = self.resolve_conditions(query)
# params depends on parse_query, and conditions being resolved first since there may be projects in conditions
self.where += self.resolve_params()
self.columns = self.resolve_select(selected_columns, equations)
@property
def select(self) -> list[SelectType]:
if not self.aggregates:
raise InvalidSearchQuery("Cannot query a timeseries without a Y-Axis")
# Casting for now since QueryFields/QueryFilter are only partially typed
return self.aggregates
def get_snql_query(self) -> Request:
return Request(
dataset=self.dataset.value,
app_id="default",
query=Query(
match=Entity(self._get_entity_name()),
select=self.select,
where=self.where,
having=self.having,
groupby=self.groupby,
orderby=[OrderBy(self.time_column, Direction.ASC)],
granularity=self.granularity,
limit=self.limit,
),
tenant_ids=self.tenant_ids,
)
| TimeseriesQueryBuilder |
python | doocs__leetcode | solution/2300-2399/2310.Sum of Numbers With Units Digit K/Solution2.py | {
"start": 0,
"end": 245
} | class ____:
def minimumNumbers(self, num: int, k: int) -> int:
if num == 0:
return 0
for i in range(1, 11):
if (k * i) % 10 == num % 10 and k * i <= num:
return i
return -1
| Solution |
python | PrefectHQ__prefect | tests/test_logging.py | {
"start": 13274,
"end": 32274
} | class ____:
@pytest.fixture
def handler(self) -> Generator[APILogHandler, None, None]:
yield APILogHandler()
@pytest.fixture
def logger(self, handler: APILogHandler):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
yield logger
logger.removeHandler(handler)
def test_worker_is_not_flushed_on_handler_close(self, mock_log_worker: MagicMock):
handler = APILogHandler()
handler.close()
mock_log_worker.drain_all.assert_not_called()
async def test_logs_can_still_be_sent_after_close(
self,
logger: logging.Logger,
handler: APILogHandler,
flow_run: "FlowRun",
prefect_client: PrefectClient,
):
logger.info("Test", extra={"flow_run_id": flow_run.id})
handler.close() # Close it
logger.info("Test", extra={"flow_run_id": flow_run.id})
await handler.aflush()
logs = await prefect_client.read_logs()
assert len(logs) == 2
async def test_logs_can_still_be_sent_after_flush(
self,
logger: logging.Logger,
handler: APILogHandler,
flow_run: "FlowRun",
prefect_client: PrefectClient,
):
logger.info("Test", extra={"flow_run_id": flow_run.id})
await handler.aflush()
logger.info("Test", extra={"flow_run_id": flow_run.id})
await handler.aflush()
logs = await prefect_client.read_logs()
assert len(logs) == 2
async def test_sync_flush_from_async_context(
self,
logger: logging.Logger,
handler: APILogHandler,
flow_run: "FlowRun",
prefect_client: PrefectClient,
):
logger.info("Test", extra={"flow_run_id": flow_run.id})
handler.flush()
# Yield to the worker thread
time.sleep(2)
logs = await prefect_client.read_logs()
assert len(logs) == 1
def test_sync_flush_from_global_event_loop(
self, logger: logging.Logger, handler: APILogHandler, flow_run: "FlowRun"
):
logger.info("Test", extra={"flow_run_id": flow_run.id})
with pytest.raises(RuntimeError, match="would block"):
from_sync.call_soon_in_loop_thread(create_call(handler.flush)).result()
def test_sync_flush_from_sync_context(
self, logger: logging.Logger, handler: APILogHandler, flow_run: "FlowRun"
):
logger.info("Test", extra={"flow_run_id": flow_run.id})
handler.flush()
def test_sends_task_run_log_to_worker(
self, logger: logging.Logger, mock_log_worker: MagicMock, task_run: "TaskRun"
):
with TaskRunContext.model_construct(task_run=task_run):
logger.info("test-task")
expected = LogCreate.model_construct(
flow_run_id=task_run.flow_run_id,
task_run_id=task_run.id,
name=logger.name,
level=logging.INFO,
message="test-task",
).model_dump(mode="json")
expected["timestamp"] = ANY # Tested separately
expected["__payload_size__"] = ANY # Tested separately
mock_log_worker.instance().send.assert_called_once_with(expected)
def test_sends_flow_run_log_to_worker(
self, logger: logging.Logger, mock_log_worker: MagicMock, flow_run: "FlowRun"
):
with FlowRunContext.model_construct(flow_run=flow_run):
logger.info("test-flow")
expected = LogCreate.model_construct(
flow_run_id=flow_run.id,
task_run_id=None,
name=logger.name,
level=logging.INFO,
message="test-flow",
).model_dump(mode="json")
expected["timestamp"] = ANY # Tested separately
expected["__payload_size__"] = ANY # Tested separately
mock_log_worker.instance().send.assert_called_once_with(expected)
@pytest.mark.parametrize("with_context", [True, False])
def test_respects_explicit_flow_run_id(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
flow_run: "FlowRun",
with_context: bool,
):
flow_run_id = uuid.uuid4()
context = (
FlowRunContext.model_construct(flow_run=flow_run)
if with_context
else nullcontext()
)
with context:
logger.info("test-task", extra={"flow_run_id": flow_run_id})
expected = LogCreate.model_construct(
flow_run_id=flow_run_id,
task_run_id=None,
name=logger.name,
level=logging.INFO,
message="test-task",
).model_dump(mode="json")
expected["timestamp"] = ANY # Tested separately
expected["__payload_size__"] = ANY # Tested separately
mock_log_worker.instance().send.assert_called_once_with(expected)
@pytest.mark.parametrize("with_context", [True, False])
def test_respects_explicit_task_run_id(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
flow_run: "FlowRun",
with_context: bool,
task_run: "TaskRun",
):
task_run_id = uuid.uuid4()
context = (
TaskRunContext.model_construct(task_run=task_run)
if with_context
else nullcontext()
)
with FlowRunContext.model_construct(flow_run=flow_run):
with context:
logger.warning("test-task", extra={"task_run_id": task_run_id})
expected = LogCreate.model_construct(
flow_run_id=flow_run.id,
task_run_id=task_run_id,
name=logger.name,
level=logging.WARNING,
message="test-task",
).model_dump(mode="json")
expected["timestamp"] = ANY # Tested separately
expected["__payload_size__"] = ANY # Tested separately
mock_log_worker.instance().send.assert_called_once_with(expected)
def test_does_not_emit_logs_below_level(
self, logger: logging.Logger, mock_log_worker: MagicMock
):
logger.setLevel(logging.WARNING)
logger.info("test-task", extra={"flow_run_id": uuid.uuid4()})
mock_log_worker.instance().send.assert_not_called()
def test_explicit_task_run_id_still_requires_flow_run_id(
self, logger: logging.Logger, mock_log_worker: MagicMock
):
task_run_id = uuid.uuid4()
with pytest.warns(
UserWarning, match="attempted to send logs .* without a flow run id"
):
logger.info("test-task", extra={"task_run_id": task_run_id})
mock_log_worker.instance().send.assert_not_called()
def test_sets_timestamp_from_record_created_time(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
flow_run: "FlowRun",
handler: APILogHandler,
):
# Capture the record
handler.emit = MagicMock(side_effect=handler.emit)
with FlowRunContext.model_construct(flow_run=flow_run):
logger.info("test-flow")
record = handler.emit.call_args[0][0]
log_dict = mock_log_worker.instance().send.call_args[0][0]
timestamp = log_dict["timestamp"]
if sys.version_info < (3, 11):
timestamp = _normalize_timestamp(timestamp)
assert datetime.fromisoformat(timestamp) == from_timestamp(record.created)
def test_sets_timestamp_from_time_if_missing_from_recrod(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
flow_run: "FlowRun",
handler: APILogHandler,
monkeypatch: pytest.MonkeyPatch,
):
def drop_created_and_emit(
emit: Callable[[logging.LogRecord], None], record: logging.LogRecord
):
record.created = None # type: ignore
return emit(record)
handler.emit = MagicMock(
side_effect=partial(drop_created_and_emit, handler.emit)
)
now = time.time()
monkeypatch.setattr("time.time", lambda: now)
with FlowRunContext.model_construct(flow_run=flow_run):
logger.info("test-flow")
log_dict = mock_log_worker.instance().send.call_args[0][0]
timestamp = log_dict["timestamp"]
if sys.version_info < (3, 11):
timestamp = _normalize_timestamp(timestamp)
assert datetime.fromisoformat(timestamp) == from_timestamp(now)
def test_does_not_send_logs_that_opt_out(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
task_run: "TaskRun",
):
with TaskRunContext.model_construct(task_run=task_run):
logger.info("test", extra={"send_to_api": False})
mock_log_worker.instance().send.assert_not_called()
def test_does_not_send_logs_when_handler_is_disabled(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
task_run: "TaskRun",
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_ENABLED: "False"},
):
with TaskRunContext.model_construct(task_run=task_run):
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
def test_does_not_send_logs_outside_of_run_context_with_default_setting(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
):
# Warns in the main process
with pytest.warns(
UserWarning, match="attempted to send logs .* without a flow run id"
):
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
# No stderr output
output = capsys.readouterr()
assert output.err == ""
def test_does_not_raise_when_logger_outside_of_run_context_with_default_setting(
self,
logger: logging.Logger,
capsys: pytest.CaptureFixture[str],
):
with pytest.warns(
UserWarning,
match=(
"Logger 'tests.test_logging' attempted to send logs to the API without"
" a flow run id."
),
):
logger.info("test")
def test_does_not_send_logs_outside_of_run_context_with_error_setting(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "error"},
):
with pytest.raises(
MissingContextError,
match="attempted to send logs .* without a flow run id",
):
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
# No stderr output
output = capsys.readouterr()
assert output.err == ""
def test_does_not_warn_when_logger_outside_of_run_context_with_error_setting(
self,
logger: logging.Logger,
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "error"},
):
with pytest.raises(
MissingContextError,
match=(
"Logger 'tests.test_logging' attempted to send logs to the API"
" without a flow run id."
),
):
logger.info("test")
def test_does_not_send_logs_outside_of_run_context_with_ignore_setting(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "ignore"},
):
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
# No stderr output
output = capsys.readouterr()
assert output.err == ""
def test_does_not_raise_or_warn_when_logger_outside_of_run_context_with_ignore_setting(
self,
logger: logging.Logger,
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "ignore"},
):
logger.info("test")
def test_does_not_send_logs_outside_of_run_context_with_warn_setting(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "warn"},
):
# Warns in the main process
with pytest.warns(
UserWarning, match="attempted to send logs .* without a flow run id"
):
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
# No stderr output
output = capsys.readouterr()
assert output.err == ""
def test_does_not_raise_when_logger_outside_of_run_context_with_warn_setting(
self,
logger: logging.Logger,
):
with temporary_settings(
updates={PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW: "warn"},
):
with pytest.warns(
UserWarning,
match=(
"Logger 'tests.test_logging' attempted to send logs to the API"
" without a flow run id."
),
):
logger.info("test")
def test_missing_context_warning_refers_to_caller_lineno(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
):
from inspect import currentframe, getframeinfo
# Warns in the main process
with pytest.warns(
UserWarning, match="attempted to send logs .* without a flow run id"
) as warnings:
logger.info("test")
lineno = getframeinfo(currentframe()).lineno - 1 # type: ignore
# The above dynamic collects the line number so that added tests do not
# break this test
mock_log_worker.instance().send.assert_not_called()
assert warnings.pop().lineno == lineno
def test_writes_logging_errors_to_stderr(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
monkeypatch: pytest.MonkeyPatch,
):
monkeypatch.setattr(
"prefect.logging.handlers.APILogHandler.prepare",
MagicMock(side_effect=RuntimeError("Oh no!")),
)
# No error raised
logger.info("test")
mock_log_worker.instance().send.assert_not_called()
# Error is in stderr
output = capsys.readouterr()
assert "RuntimeError: Oh no!" in output.err
def test_does_not_write_error_for_logs_outside_run_context_that_opt_out(
self,
logger: logging.Logger,
mock_log_worker: MagicMock,
capsys: pytest.CaptureFixture[str],
):
logger.info("test", extra={"send_to_api": False})
mock_log_worker.instance().send.assert_not_called()
output = capsys.readouterr()
assert (
"RuntimeError: Attempted to send logs to the API without a flow run id."
not in output.err
)
async def test_does_not_enqueue_logs_that_are_too_big(
self,
task_run: "TaskRun",
logger: logging.Logger,
capsys: pytest.CaptureFixture[str],
mock_log_worker: MagicMock,
):
with TaskRunContext.model_construct(task_run=task_run):
with temporary_settings(updates={PREFECT_LOGGING_TO_API_MAX_LOG_SIZE: "1"}):
logger.info("test")
mock_log_worker.instance().send.assert_called_once()
sent_log = mock_log_worker.instance().send.call_args[0][0]
output = capsys.readouterr()
assert sent_log["message"].endswith("... [truncated]")
assert "ValueError" not in output.err
def test_handler_knows_how_large_logs_are(self):
dict_log = {
"name": "prefect.flow_runs",
"level": 20,
"message": "Finished in state Completed()",
"timestamp": "2023-02-08T17:55:52.993831+00:00",
"flow_run_id": "47014fb1-9202-4a78-8739-c993d8c24415",
"task_run_id": None,
}
log_size = len(json.dumps(dict_log))
assert log_size == 211
handler = APILogHandler()
assert handler._get_payload_size(dict_log) == log_size # type: ignore[reportPrivateUsage]
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_defaults_to_cloud_value(self):
with temporary_settings(
updates={PREFECT_API_URL: "https://api.prefect.cloud/api"},
restore_defaults={PREFECT_LOGGING_TO_API_MAX_LOG_SIZE},
) as settings:
assert settings.logging.to_api.max_log_size == 25_000
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_defaults_to_cloud_setting(self):
with temporary_settings(
updates={
PREFECT_API_URL: "https://api.prefect.cloud/api",
PREFECT_CLOUD_MAX_LOG_SIZE: 10_000,
},
restore_defaults={PREFECT_LOGGING_TO_API_MAX_LOG_SIZE},
) as settings:
assert settings.logging.to_api.max_log_size == 10_000
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_respects_custom_value_lower_than_cloud(self):
with temporary_settings(
updates={
PREFECT_API_URL: "https://api.prefect.cloud/api",
PREFECT_LOGGING_TO_API_MAX_LOG_SIZE: 10_000,
},
) as settings:
assert settings.logging.to_api.max_log_size == 10_000
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_capped_at_cloud_max(self):
with temporary_settings(
updates={
PREFECT_API_URL: "https://api.prefect.cloud/api",
PREFECT_LOGGING_TO_API_MAX_LOG_SIZE: 1_000_000,
},
) as settings:
assert settings.logging.to_api.max_log_size == 25_000
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_does_not_change_for_self_hosted(self):
with temporary_settings(
updates={PREFECT_API_URL: "http://example.com/api"},
restore_defaults={PREFECT_LOGGING_TO_API_MAX_LOG_SIZE},
) as settings:
assert settings.logging.to_api.max_log_size == 1_000_000
@pytest.mark.usefixtures("disable_hosted_api_server")
def test_max_log_size_default_when_not_connected(self):
with temporary_settings(
restore_defaults={PREFECT_API_URL, PREFECT_LOGGING_TO_API_MAX_LOG_SIZE}
) as settings:
assert settings.logging.to_api.max_log_size == 1_000_000
WORKER_ID = uuid.uuid4()
| TestAPILogHandler |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/blob/resources.py | {
"start": 293,
"end": 482
} | class ____(Config):
"""Authentication using an azure SAS token."""
credential_type: Literal["sas"] = "sas"
token: str
"an azure SAS token"
| AzureBlobStorageSASTokenCredential |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/app_platform_event.py | {
"start": 777,
"end": 964
} | class ____[T: Mapping[str, Any]](TypedDict):
action: SentryAppActionType
installation: AppPlatformEventInstallation
data: T
actor: AppPlatformEventActor
| AppPlatformEventBody |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 16499,
"end": 17860
} | class ____(TemporalType):
"""
Data type representing a time duration.
Parameters
----------
time_unit : {'us', 'ns', 'ms'}
Unit of time. Defaults to `'us'` (microseconds).
Notes
-----
The underlying representation of this type is a 64-bit signed integer.
The integer indicates an amount of time units and can be negative to indicate
negative time offsets.
"""
time_unit: TimeUnit
def __init__(self, time_unit: TimeUnit = "us") -> None:
if time_unit not in ("ms", "us", "ns"):
msg = (
"invalid `time_unit`"
f"\n\nExpected one of {{'ns','us','ms'}}, got {time_unit!r}."
)
raise ValueError(msg)
self.time_unit = time_unit
def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override]
# allow comparing object instances to class
if type(other) is DataTypeClass and issubclass(other, Duration):
return True
elif isinstance(other, Duration):
return self.time_unit == other.time_unit
else:
return False
def __hash__(self) -> int:
return hash((self.__class__, self.time_unit))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}(time_unit={self.time_unit!r})"
| Duration |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 1622,
"end": 2086
} | class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def name(self) -> str:
"""
The name of the curve. e.g. secp256r1.
"""
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
Bit size of a secret scalar for the curve.
"""
@property
@abc.abstractmethod
def group_order(self) -> int:
"""
The order of the curve's group.
"""
| EllipticCurve |
python | walkccc__LeetCode | solutions/795. Number of Subarrays with Bounded Maximum/795.py | {
"start": 0,
"end": 373
} | class ____:
def numSubarrayBoundedMax(
self,
nums: list[int],
left: int,
right: int,
) -> int:
ans = 0
l = -1
r = -1
for i, num in enumerate(nums):
if num > right: # Handle the reset value.
l = i
if num >= left: # Handle the reset and the needed value.
r = i
ans += r - l
return ans
| Solution |
python | Textualize__textual | src/textual/widgets/_option_list.py | {
"start": 1296,
"end": 2615
} | class ____:
"""This class holds details of options in the list."""
def __init__(
self, prompt: VisualType, id: str | None = None, disabled: bool = False
) -> None:
"""Initialise the option.
Args:
prompt: The prompt (text displayed) for the option.
id: An option ID for the option.
disabled: Disable the option (will be shown grayed out, and will not be selectable).
"""
self._prompt = prompt
self._visual: Visual | None = None
self._id = id
self.disabled = disabled
self._divider = False
@property
def prompt(self) -> VisualType:
"""The original prompt."""
return self._prompt
@property
def id(self) -> str | None:
"""Optional ID for the option."""
return self._id
def _set_prompt(self, prompt: VisualType) -> None:
"""Update the prompt.
Args:
prompt: New prompt.
"""
self._prompt = prompt
self._visual = None
def __hash__(self) -> int:
return id(self)
def __rich_repr__(self) -> rich.repr.Result:
yield self._prompt
yield "id", self._id, None
yield "disabled", self.disabled, False
yield "_divider", self._divider, False
@dataclass
| Option |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.