language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | tests/functional/r/regression/regression_3535_double_enum_inherit.py | {
"start": 114,
"end": 144
} | class ____(enum.Enum):
pass
| A |
python | mlflow__mlflow | mlflow/store/artifact/mlflow_artifacts_repo.py | {
"start": 1906,
"end": 3678
} | class ____(HttpArtifactRepository):
"""Scheme wrapper around HttpArtifactRepository for mlflow-artifacts server functionality"""
def __init__(
self, artifact_uri: str, tracking_uri: str | None = None, registry_uri: str | None = None
) -> None:
effective_tracking_uri = tracking_uri or get_tracking_uri()
super().__init__(
self.resolve_uri(artifact_uri, effective_tracking_uri), tracking_uri, registry_uri
)
@classmethod
def resolve_uri(cls, artifact_uri, tracking_uri):
base_url = "/api/2.0/mlflow-artifacts/artifacts"
track_parse = urlparse(tracking_uri)
uri_parse = urlparse(artifact_uri)
# Check to ensure that a port is present with no hostname
_validate_port_mapped_to_hostname(uri_parse)
# Check that tracking uri is http or https
_validate_uri_scheme(track_parse)
if uri_parse.path == "/": # root directory; build simple path
resolved = f"{base_url}{uri_parse.path}"
elif uri_parse.path == base_url: # for operations like list artifacts
resolved = base_url
else:
resolved = f"{track_parse.path}/{base_url}/{uri_parse.path}"
resolved = re.sub(r"//+", "/", resolved)
resolved_artifacts_uri = urlunparse(
(
# scheme
track_parse.scheme,
# netloc
uri_parse.netloc or track_parse.netloc,
# path
resolved,
# params
"",
# query
"",
# fragment
"",
)
)
return resolved_artifacts_uri.replace("///", "/").rstrip("/")
| MlflowArtifactsRepository |
python | huggingface__transformers | src/transformers/models/internvl/modeling_internvl.py | {
"start": 9113,
"end": 13064
} | class ____(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = InternVLVisionPatchEmbeddings(config)
self.patch_size = config.patch_size
self.image_size = (
config.image_size
if isinstance(config.image_size, collections.abc.Iterable)
else (config.image_size, config.image_size)
)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size[0]
new_width = width // self.patch_size[1]
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
_, _, height, width = pixel_values.shape
embeddings, (patch_height, patch_width) = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings, (patch_height, patch_width)
| InternVLVisionEmbeddings |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 987629,
"end": 988494
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"action",
"previous_sponsors_tier",
"sponsor",
"sponsorable",
"sponsors_tier",
"timestamp",
)
action = sgqlc.types.Field(
sgqlc.types.non_null(SponsorsActivityAction), graphql_name="action"
)
previous_sponsors_tier = sgqlc.types.Field(
"SponsorsTier", graphql_name="previousSponsorsTier"
)
sponsor = sgqlc.types.Field("Sponsor", graphql_name="sponsor")
sponsorable = sgqlc.types.Field(
sgqlc.types.non_null(Sponsorable), graphql_name="sponsorable"
)
sponsors_tier = sgqlc.types.Field("SponsorsTier", graphql_name="sponsorsTier")
timestamp = sgqlc.types.Field(DateTime, graphql_name="timestamp")
| SponsorsActivity |
python | apache__airflow | providers/google/tests/unit/google/suite/transfers/test_local_to_drive.py | {
"start": 1121,
"end": 2361
} | class ____:
@mock.patch("airflow.providers.google.suite.transfers.local_to_drive.GoogleDriveHook")
def test_execute(self, mock_hook):
context = {}
mock_hook.return_value.upload_file.return_value = REMOTE_FILE_IDS
op = LocalFilesystemToGoogleDriveOperator(
task_id="test_task",
local_paths=LOCAL_PATHS,
drive_folder=DRIVE_FOLDER,
gcp_conn_id=GCP_CONN_ID,
folder_id="some_folder_id",
)
op.execute(context)
calls = [
mock.call(
local_location="test1",
remote_location="test_folder/test1",
chunk_size=100 * 1024 * 1024,
resumable=False,
folder_id="some_folder_id",
show_full_target_path=True,
),
mock.call(
local_location="test2",
remote_location="test_folder/test2",
chunk_size=100 * 1024 * 1024,
resumable=False,
folder_id="some_folder_id",
show_full_target_path=True,
),
]
mock_hook.return_value.upload_file.assert_has_calls(calls)
| TestLocalFilesystemToGoogleDriveOperator |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 80556,
"end": 81144
} | class ____(Module):
def get_function(self, name):
handle = drvapi.cu_function()
driver.cuModuleGetFunction(byref(handle), self.handle,
name.encode('utf8'))
return CtypesFunction(weakref.proxy(self), handle, name)
def get_global_symbol(self, name):
ptr = drvapi.cu_device_ptr()
size = drvapi.c_size_t()
driver.cuModuleGetGlobal(byref(ptr), byref(size), self.handle,
name.encode('utf8'))
return MemoryPointer(self.context, ptr, size), size.value
| CtypesModule |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 4196,
"end": 4364
} | class ____(BaseModel):
name: str
description: str | None = None
parameters: FunctionParams | None = None
strict: bool | None = None
| FunctionToolDefinition |
python | sphinx-doc__sphinx | sphinx/builders/linkcheck.py | {
"start": 11952,
"end": 12082
} | class ____(NamedTuple):
uri: str
docname: str
lineno: int
status: _Status
message: str
code: int
| CheckResult |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 7627,
"end": 9187
} | class ____:
def __init__(self, compaction_id: int, state: int) -> None:
self.compaction_id = compaction_id
self.state = State.new(state)
self.plans = []
def __repr__(self) -> str:
return f"""
Compaction Plans:
- compaction id: {self.compaction_id}
- state: {self.state}
- plans: {self.plans}
"""
def cmp_consistency_level(l1: Union[str, int], l2: Union[str, int]):
if isinstance(l1, str):
try:
l1 = ConsistencyLevel.Value(l1)
except ValueError:
return False
if isinstance(l2, str):
try:
l2 = ConsistencyLevel.Value(l2)
except ValueError:
return False
if isinstance(l1, int) and l1 not in ConsistencyLevel.values():
return False
if isinstance(l2, int) and l2 not in ConsistencyLevel.values():
return False
return l1 == l2
def get_consistency_level(consistency_level: Union[str, int]):
if isinstance(consistency_level, int):
if consistency_level in ConsistencyLevel.values():
return consistency_level
raise InvalidConsistencyLevel(message=f"invalid consistency level: {consistency_level}")
if isinstance(consistency_level, str):
try:
return ConsistencyLevel.Value(consistency_level)
except ValueError as e:
raise InvalidConsistencyLevel(
message=f"invalid consistency level: {consistency_level}"
) from e
raise InvalidConsistencyLevel(message="invalid consistency level")
| CompactionPlans |
python | keras-team__keras | keras/src/backend/tensorflow/core.py | {
"start": 21802,
"end": 22852
} | class ____(base_name_scope):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._tf_name_scope = tf.name_scope(name)
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
self._tf_name_scope.__enter__()
return self
def __exit__(self, *args, **kwargs):
super().__exit__(*args, **kwargs)
if self._pop_on_exit:
self._tf_name_scope.__exit__(*args, **kwargs)
def device_scope(device_name):
return tf.device(device_name)
| name_scope |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_dialect.py | {
"start": 29277,
"end": 30103
} | class ____(fixtures.TestBase):
__only_on__ = "postgresql"
def test_error_code(self, metadata, connection):
t = Table("t", metadata, Column("id", Integer, primary_key=True))
t.create(connection)
errmsg = assert_raises(
exc.IntegrityError,
connection.execute,
t.insert(),
[{"id": 1}, {"id": 1}],
)
if testing.against("postgresql+pg8000"):
# TODO: is there another way we're supposed to see this?
eq_(errmsg.orig.args[0]["C"], "23505")
elif not testing.against("postgresql+psycopg"):
eq_(errmsg.orig.pgcode, "23505")
if testing.against("postgresql+asyncpg") or testing.against(
"postgresql+psycopg"
):
eq_(errmsg.orig.sqlstate, "23505")
| PGCodeTest |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_any.py | {
"start": 16152,
"end": 28919
} | class ____(Enum):
a = 1
b = 'b'
def test_enum(any_serializer):
assert any_serializer.to_python(MyEnum.a) == MyEnum.a
assert any_serializer.to_python(MyEnum.b) == MyEnum.b
assert any_serializer.to_python({MyEnum.a: 42}) == {MyEnum.a: 42}
assert any_serializer.to_python({MyEnum.b: 42}) == {MyEnum.b: 42}
assert any_serializer.to_python(MyEnum.a, mode='json') == 1
assert any_serializer.to_python(MyEnum.b, mode='json') == 'b'
assert any_serializer.to_python({MyEnum.a: 42}, mode='json') == {'1': 42}
assert any_serializer.to_python({MyEnum.b: 42}, mode='json') == {'b': 42}
assert any_serializer.to_json(MyEnum.a) == b'1'
assert any_serializer.to_json(MyEnum.b) == b'"b"'
assert any_serializer.to_json({MyEnum.a: 42}) == b'{"1":42}'
assert any_serializer.to_json({MyEnum.b: 42}) == b'{"b":42}'
def test_base64():
s = SchemaSerializer(core_schema.any_schema(), core_schema.CoreConfig(ser_json_bytes='base64'))
assert s.to_python(b'foo') == b'foo'
assert s.to_python(b'foo', mode='json') == 'Zm9v'
assert s.to_json(b'foo') == b'"Zm9v"'
assert s.to_python(bytearray(b'foo')) == b'foo'
assert s.to_python(bytearray(b'foo'), mode='json') == 'Zm9v'
assert s.to_json(bytearray(b'foo')) == b'"Zm9v"'
@pytest.mark.parametrize(
'gen_input,kwargs,expected_json',
[
# (lambda: UUID('ebcdab58-6eb8-46fb-a190-d07a33e9eac8'), '"ebcdab58-6eb8-46fb-a190-d07a33e9eac8"'),
(lambda: datetime(2032, 1, 1, 1, 1), {}, b'"2032-01-01T01:01:00"'),
(lambda: datetime(2032, 1, 1, 1, 1, tzinfo=timezone.utc), {}, b'"2032-01-01T01:01:00Z"'),
(lambda: datetime(2032, 1, 1, 1, 1, tzinfo=timezone(timedelta(hours=2))), {}, b'"2032-01-01T01:01:00+02:00"'),
(lambda: datetime(2032, 1, 1), {}, b'"2032-01-01T00:00:00"'),
(lambda: time(12, 34, 56), {}, b'"12:34:56"'),
(lambda: timedelta(days=12, seconds=34, microseconds=56), {}, b'"P12DT34.000056S"'),
(lambda: timedelta(days=12, seconds=34, microseconds=56), dict(timedelta_mode='float'), b'1036834.000056'),
(lambda: timedelta(seconds=-1), {}, b'"-PT1S"'),
(lambda: timedelta(seconds=-1), dict(timedelta_mode='float'), b'-1.0'),
(lambda: {1, 2, 3}, {}, b'[1,2,3]'),
(lambda: frozenset([1, 2, 3]), {}, b'[1,2,3]'),
(lambda: (v for v in range(4)), {}, b'[0,1,2,3]'),
(lambda: iter([0, 1, 2, 3]), {}, b'[0,1,2,3]'),
(lambda: iter((0, 1, 2, 3)), {}, b'[0,1,2,3]'),
(lambda: iter(range(4)), {}, b'[0,1,2,3]'),
(lambda: b'this is bytes', {}, b'"this is bytes"'),
(lambda: b'this is bytes', dict(bytes_mode='base64'), b'"dGhpcyBpcyBieXRlcw=="'),
(lambda: bytearray(b'this is bytes'), {}, b'"this is bytes"'),
(lambda: bytearray(b'this is bytes'), dict(bytes_mode='base64'), b'"dGhpcyBpcyBieXRlcw=="'),
(lambda: Decimal('12.34'), {}, b'"12.34"'),
(lambda: MyEnum.a, {}, b'1'),
(lambda: MyEnum.b, {}, b'"b"'),
(lambda: [MyDataclass(1, 'a', 2), MyModel(a=2, b='b')], {}, b'[{"a":1,"b":"a"},{"a":2,"b":"b"}]'),
(lambda: re.compile('^regex$'), {}, b'"^regex$"'),
],
)
def test_encoding(any_serializer, gen_input, kwargs, expected_json):
assert to_json(gen_input(), **kwargs) == expected_json
if not kwargs:
assert any_serializer.to_python(gen_input(), mode='json') == json.loads(expected_json)
def test_any_dataclass():
@dataclasses.dataclass
class Foo:
a: str
b: bytes
# Build a schema that does not include the field 'b', to test that it is not serialized
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo', [core_schema.dataclass_field(name='a', schema=core_schema.str_schema())]
),
['a'],
)
Foo.__pydantic_serializer__ = SchemaSerializer(schema)
s = SchemaSerializer(core_schema.any_schema())
assert s.to_python(Foo(a='hello', b=b'more')) == IsStrictDict(a='hello')
assert s.to_python(Foo(a='hello', b=b'more'), mode='json') == IsStrictDict(a='hello')
j = s.to_json(Foo(a='hello', b=b'more'))
if on_pypy:
assert json.loads(j) == {'a': 'hello'}
else:
assert j == b'{"a":"hello"}'
assert s.to_python(Foo(a='hello', b=b'more'), exclude={'a'}) == IsStrictDict()
def test_any_model():
@dataclasses.dataclass
class Foo:
a: str
b: bytes
# Build a schema that does not include the field 'b', to test that it is not serialized
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo', [core_schema.dataclass_field(name='a', schema=core_schema.str_schema())]
),
['a'],
)
Foo.__pydantic_validator__ = SchemaValidator(schema)
Foo.__pydantic_serializer__ = SchemaSerializer(schema)
s = SchemaSerializer(core_schema.any_schema())
assert s.to_python(Foo(a='hello', b=b'more')) == IsStrictDict(a='hello')
assert s.to_python(Foo(a='hello', b=b'more'), mode='json') == IsStrictDict(a='hello')
j = s.to_json(Foo(a='hello', b=b'more'))
if on_pypy:
assert json.loads(j) == {'a': 'hello'}
else:
assert j == b'{"a":"hello"}'
assert s.to_python(Foo(a='hello', b=b'more'), exclude={'a'}) == IsStrictDict()
assert s.to_json(Foo(a='hello', b=b'more'), exclude={'a'}) == b'{}'
assert s.to_python(Foo) == Foo
with pytest.raises(PydanticSerializationError, match=r"Unable to serialize unknown type: <class 'type'>"):
s.to_python(Foo, mode='json')
with pytest.raises(PydanticSerializationError, match=r"Unable to serialize unknown type: <class 'type'>"):
s.to_json(Foo)
assert s.to_python(Foo, mode='json', fallback=lambda x: x.__name__) == 'Foo'
assert s.to_json(Foo, fallback=lambda x: x.__name__) == b'"Foo"'
def test_dataclass_classvar(any_serializer):
@dataclasses.dataclass
class Foo:
a: int
b: str
c: ClassVar[int] = 1
foo = Foo(1, 'a')
assert any_serializer.to_python(foo) == IsStrictDict(a=1, b='a')
assert any_serializer.to_json(foo) == b'{"a":1,"b":"a"}'
@dataclasses.dataclass
class Foo2(Foo):
pass
foo2 = Foo2(2, 'b')
assert any_serializer.to_python(foo2) == IsStrictDict(a=2, b='b')
assert any_serializer.to_json(foo2) == b'{"a":2,"b":"b"}'
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python >= 3.10')
def test_dataclass_slots(any_serializer):
@dataclasses.dataclass(slots=True)
class Foo:
a: int
b: str
foo = Foo(1, 'a')
assert any_serializer.to_python(foo) == IsStrictDict(a=1, b='a')
assert any_serializer.to_json(foo) == b'{"a":1,"b":"a"}'
@dataclasses.dataclass(slots=True)
class Foo2(Foo):
pass
foo2 = Foo2(2, 'b')
assert any_serializer.to_python(foo2) == IsStrictDict(a=2, b='b')
assert any_serializer.to_json(foo2) == b'{"a":2,"b":"b"}'
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python >= 3.10')
def test_dataclass_slots_init_vars(any_serializer):
@dataclasses.dataclass(slots=True)
class Foo:
a: int
b: str
c: dataclasses.InitVar[int]
d: ClassVar[int] = 42
foo = Foo(1, 'a', 42)
assert any_serializer.to_python(foo) == IsStrictDict(a=1, b='a')
assert any_serializer.to_json(foo) == b'{"a":1,"b":"a"}'
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python > 3.10')
def test_slots_mixed(any_serializer):
@dataclasses.dataclass(slots=True)
class Model:
x: int
y: dataclasses.InitVar[str]
z: ClassVar[str] = 'z-classvar'
@dataclasses.dataclass
class SubModel(Model):
x2: int
y2: dataclasses.InitVar[str]
z2: ClassVar[str] = 'z2-classvar'
dc = SubModel(x=1, y='a', x2=2, y2='b')
assert dataclasses.asdict(dc) == {'x': 1, 'x2': 2}
assert any_serializer.to_python(dc) == {'x': 1, 'x2': 2}
assert any_serializer.to_json(dc) == b'{"x":1,"x2":2}'
@pytest.mark.skipif(numpy is None, reason='numpy is not installed')
def test_numpy_float(any_serializer):
assert any_serializer.to_python(numpy.float64(1.0)) == 1.0
assert any_serializer.to_python(numpy.float64(1.0), mode='json') == 1.0
assert any_serializer.to_json(numpy.float64(1.0)) == b'1.0'
# float16 is not a subclass of float
assert not isinstance(numpy.float16(1.0), float)
assert any_serializer.to_python(numpy.float16(1.0)) == 1.0
with pytest.raises(PydanticSerializationError, match=r"Unable to serialize unknown type: <class 'numpy\.float16'>"):
any_serializer.to_python(numpy.float16(1.0), mode='json')
with pytest.raises(PydanticSerializationError, match=r"Unable to serialize unknown type: <class 'numpy\.float16'>"):
any_serializer.to_json(numpy.float16(1.0))
def test_ser_json_inf_nan_with_any() -> None:
s = SchemaSerializer(core_schema.any_schema(), core_schema.CoreConfig(ser_json_inf_nan='constants'))
assert isinf(s.to_python(inf))
assert isinf(s.to_python(inf, mode='json'))
assert s.to_json(inf) == b'Infinity'
assert isnan(s.to_python(nan))
assert isnan(s.to_python(nan, mode='json'))
assert s.to_json(nan) == b'NaN'
s = SchemaSerializer(core_schema.any_schema(), core_schema.CoreConfig(ser_json_inf_nan='null'))
assert isinf(s.to_python(inf))
assert s.to_python(inf, mode='json') is None
assert s.to_json(inf) == b'null'
assert isnan(s.to_python(nan))
assert s.to_python(nan, mode='json') is None
assert s.to_json(nan) == b'null'
s = SchemaSerializer(core_schema.any_schema(), core_schema.CoreConfig(ser_json_inf_nan='strings'))
assert isinf(s.to_python(inf))
assert isinf(s.to_python(inf, mode='json'))
assert s.to_json(inf) == b'"Infinity"'
assert isnan(s.to_python(nan))
assert isnan(s.to_python(nan, mode='json'))
assert s.to_json(nan) == b'"NaN"'
def test_ser_json_inf_nan_with_list_of_any() -> None:
s = SchemaSerializer(
core_schema.list_schema(core_schema.any_schema()), core_schema.CoreConfig(ser_json_inf_nan='constants')
)
assert isinf(s.to_python([inf])[0])
assert isinf(s.to_python([inf], mode='json')[0])
assert s.to_json([inf]) == b'[Infinity]'
assert isnan(s.to_python([nan])[0])
assert isnan(s.to_python([nan], mode='json')[0])
assert s.to_json([nan]) == b'[NaN]'
s = SchemaSerializer(
core_schema.list_schema(core_schema.any_schema()), core_schema.CoreConfig(ser_json_inf_nan='null')
)
assert isinf(s.to_python([inf])[0])
assert s.to_python([inf], mode='json')[0] is None
assert s.to_json([inf]) == b'[null]'
assert isnan(s.to_python([nan])[0])
assert s.to_python([nan], mode='json')[0] is None
assert s.to_json([nan]) == b'[null]'
def test_ser_json_int_subclass_value_larger_than_i64():
class IntSubclass(int):
pass
schema = core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
dict(
stuff=core_schema.model_field(
core_schema.dict_schema(
keys_schema=core_schema.str_schema(),
values_schema=core_schema.any_schema(),
)
)
)
),
)
s = SchemaSerializer(schema)
assert (
s.to_json(
MyModel(stuff={'value': IntSubclass(9_223_372_036_854_775_809)}),
)
== b'{"stuff":{"value":9223372036854775809}}'
)
assert str(
s.to_python(
MyModel(stuff={'value': IntSubclass(9_223_372_036_854_775_809)}),
mode='json',
)
) == str({'stuff': {'value': 9223372036854775809}})
def test_simple_any_ser_schema_repr():
assert (
plain_repr(SchemaSerializer(core_schema.simple_ser_schema('any')))
== 'SchemaSerializer(serializer=Any(AnySerializer),definitions=[])'
)
def test_simple_any_ser_schema():
import operator
class MyEnum(Enum):
A = (1,)
B = (2,)
v = SchemaSerializer(
core_schema.no_info_after_validator_function(
operator.attrgetter('value'),
core_schema.enum_schema(MyEnum, list(MyEnum.__members__.values())),
serialization=core_schema.simple_ser_schema('any'),
),
)
assert v.to_python({MyEnum.A: 'x'}) == {MyEnum.A: 'x'}
assert v.to_python({MyEnum.A: 'x'}, mode='json') == {'1': 'x'}
assert v.to_json({MyEnum.A: 'x'}) == b'{"1":"x"}'
assert v.to_python(1) == 1
assert v.to_json(1) == b'1'
| MyEnum |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 15419,
"end": 16096
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->CLIPSeg
| CLIPSegMLP |
python | graphql-python__graphene | graphene/types/tests/test_definition.py | {
"start": 1147,
"end": 1214
} | class ____(Union):
class Meta:
types = (Article,)
| MyUnion |
python | ray-project__ray | python/ray/tests/ludwig/ludwig_test_utils.py | {
"start": 1878,
"end": 18651
} | class ____(LocalBackend):
@property
def supports_multiprocessing(self):
return False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truth value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("Skipping: this test is too slow")(test_case)
return test_case
def generate_data(
input_features,
output_features,
filename="test_csv.csv",
num_examples=25,
):
"""
Helper method to generate synthetic data based on input, output feature
specs
:param num_examples: number of examples to generate
:param input_features: schema
:param output_features: schema
:param filename: path to the file where data is stored
:return:
"""
features = input_features + output_features
df = build_synthetic_dataset(num_examples, features)
data = [next(df) for _ in range(num_examples)]
dataframe = pd.DataFrame(data[1:], columns=data[0])
dataframe.to_csv(filename, index=False)
return filename
def random_string(length=5):
return uuid.uuid4().hex[:length].upper()
def numerical_feature(normalization=None, **kwargs):
feature = {
"name": "num_" + random_string(),
"type": "number",
"preprocessing": {"normalization": normalization},
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def category_feature(**kwargs):
feature = {
"type": "category",
"name": "category_" + random_string(),
"vocab_size": 10,
"embedding_size": 5,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def text_feature(**kwargs):
feature = {
"name": "text_" + random_string(),
"type": "text",
"reduce_input": None,
"vocab_size": 5,
"min_len": 7,
"max_len": 7,
"embedding_size": 8,
"state_size": 8,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def set_feature(**kwargs):
feature = {
"type": "set",
"name": "set_" + random_string(),
"vocab_size": 10,
"max_len": 5,
"embedding_size": 5,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def sequence_feature(**kwargs):
feature = {
"type": "sequence",
"name": "sequence_" + random_string(),
"vocab_size": 10,
"max_len": 7,
"encoder": "embed",
"embedding_size": 8,
"fc_size": 8,
"state_size": 8,
"num_filters": 8,
"hidden_size": 8,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def image_feature(folder, **kwargs):
feature = {
"type": "image",
"name": "image_" + random_string(),
"encoder": "resnet",
"preprocessing": {
"in_memory": True,
"height": 12,
"width": 12,
"num_channels": 3,
},
"resnet_size": 8,
"destination_folder": folder,
"fc_size": 8,
"num_filters": 8,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def audio_feature(folder, **kwargs):
feature = {
"name": "audio_" + random_string(),
"type": "audio",
"preprocessing": {
"audio_feature": {
"type": "fbank",
"window_length_in_s": 0.04,
"window_shift_in_s": 0.02,
"num_filter_bands": 80,
},
"audio_file_length_limit_in_s": 3.0,
},
"encoder": "stacked_cnn",
"should_embed": False,
"conv_layers": [
{
"filter_size": 400,
"pool_size": 16,
"num_filters": 32,
"regularize": "false",
},
{
"filter_size": 40,
"pool_size": 10,
"num_filters": 64,
"regularize": "false",
},
],
"fc_size": 256,
"destination_folder": folder,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def timeseries_feature(**kwargs):
feature = {
"name": "timeseries_" + random_string(),
"type": "timeseries",
"max_len": 7,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def binary_feature(**kwargs):
feature = {"name": "binary_" + random_string(), "type": "binary"}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def bag_feature(**kwargs):
feature = {
"name": "bag_" + random_string(),
"type": "bag",
"max_len": 5,
"vocab_size": 10,
"embedding_size": 5,
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def date_feature(**kwargs):
feature = {
"name": "date_" + random_string(),
"type": "date",
"preprocessing": {
"datetime_format": random.choice(list(DATETIME_FORMATS.keys()))
},
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def h3_feature(**kwargs):
feature = {"name": "h3_" + random_string(), "type": "h3"}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def vector_feature(**kwargs):
feature = {"type": VECTOR, "vector_size": 5, "name": "vector_" + random_string()}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def run_experiment(
input_features,
output_features,
skip_save_processed_input=True,
config=None,
backend=None,
**kwargs,
):
"""
Helper method to avoid code repetition in running an experiment. Deletes
the data saved to disk after running the experiment
:param input_features: list of input feature dictionaries
:param output_features: list of output feature dictionaries
**kwargs you may also pass extra parameters to the experiment as keyword
arguments
:return: None
"""
if input_features is not None and output_features is not None:
# This if is necessary so that the caller can call with
# config_file (and not config)
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
"training": {"epochs": 2},
}
args = {
"config": config,
"backend": backend or LocalTestBackend(),
"skip_save_training_description": True,
"skip_save_training_statistics": True,
"skip_save_processed_input": skip_save_processed_input,
"skip_save_progress": True,
"skip_save_unprocessed_output": True,
"skip_save_model": True,
"skip_save_predictions": True,
"skip_save_eval_stats": True,
"skip_collect_predictions": True,
"skip_collect_overall_stats": True,
"skip_save_log": True,
}
args.update(kwargs)
_, _, _, _, exp_dir_name = experiment_cli(**args)
shutil.rmtree(exp_dir_name, ignore_errors=True)
def generate_output_features_with_dependencies(main_feature, dependencies):
# helper function to generate multiple output features specifications
# with dependencies, support for 'test_experiment_multiple_seq_seq` unit
# test
# Parameters:
# main_feature: feature identifier, valid values 'feat1', 'feat2', 'feat3'
# dependencies: list of dependencies for 'main_feature', do not li
# Example:
# generate_output_features_with_dependencies('feat2', ['feat1', 'feat3'])
output_features = [
category_feature(vocab_size=2, reduce_input="sum"),
sequence_feature(vocab_size=10, max_len=5),
numerical_feature(),
]
# value portion of dictionary is a tuple: (position, feature_name)
# position: location of output feature in the above output_features list
# feature_name: Ludwig generated feature name
feature_names = {
"feat1": (0, output_features[0]["name"]),
"feat2": (1, output_features[1]["name"]),
"feat3": (2, output_features[2]["name"]),
}
# generate list of dependencies with real feature names
generated_dependencies = [feature_names[feat_name][1] for feat_name in dependencies]
# specify dependencies for the main_feature
output_features[feature_names[main_feature][0]][
"dependencies"
] = generated_dependencies
return output_features
def _subproc_wrapper(fn, queue, *args, **kwargs):
fn = cloudpickle.loads(fn)
try:
results = fn(*args, **kwargs)
except Exception as e:
traceback.print_exc(file=sys.stderr)
results = e
queue.put(results)
def spawn(fn):
def wrapped_fn(*args, **kwargs):
ctx = multiprocessing.get_context("spawn")
queue = ctx.Queue()
p = ctx.Process(
target=_subproc_wrapper,
args=(cloudpickle.dumps(fn), queue, *args),
kwargs=kwargs,
)
p.start()
p.join()
results = queue.get()
if isinstance(results, Exception):
raise RuntimeError(
f"Spawned subprocess raised {type(results).__name__}, "
f"check log output above for stack trace."
)
return results
return wrapped_fn
def run_api_experiment(input_features, output_features, data_csv):
"""
Helper method to avoid code repetition in running an experiment
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config)
output_dir = None
try:
# Training with csv
_, _, output_dir = model.train(
dataset=data_csv,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
model.predict(dataset=data_csv)
model_dir = os.path.join(output_dir, "model")
loaded_model = LudwigModel.load(model_dir)
# Necessary before call to get_weights() to materialize the weights
loaded_model.predict(dataset=data_csv)
model_weights = model.model.get_weights()
loaded_weights = loaded_model.model.get_weights()
for model_weight, loaded_weight in zip(model_weights, loaded_weights):
assert np.allclose(model_weight, loaded_weight)
finally:
# Remove results/intermediate data saved to disk
shutil.rmtree(output_dir, ignore_errors=True)
try:
# Training with dataframe
data_df = read_csv(data_csv)
_, _, output_dir = model.train(
dataset=data_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
model.predict(dataset=data_df)
finally:
shutil.rmtree(output_dir, ignore_errors=True)
def create_data_set_to_use(data_format, raw_data):
# helper function for generating training and test data with specified
# format handles all data formats except for hdf5
# assumes raw_data is a csv dataset generated by
# tests.integration_tests.utils.generate_data() function
# support for writing to a fwf dataset based on this stackoverflow posting:
# https://stackoverflow.com/questions/16490261/python-pandas-write-dataframe-to-fixed-width-file-to-fwf
from ray._private.thirdparty.tabulate.tabulate import tabulate
def to_fwf(df, fname):
content = tabulate(df.values.tolist(), list(df.columns), tablefmt="plain")
open(fname, "w").write(content)
pd.DataFrame.to_fwf = to_fwf
dataset_to_use = None
if data_format == "csv":
dataset_to_use = raw_data
elif data_format in {"df", "dict"}:
dataset_to_use = pd.read_csv(raw_data)
if data_format == "dict":
dataset_to_use = dataset_to_use.to_dict(orient="list")
elif data_format == "excel":
dataset_to_use = replace_file_extension(raw_data, "xlsx")
pd.read_csv(raw_data).to_excel(dataset_to_use, index=False)
elif data_format == "excel_xls":
dataset_to_use = replace_file_extension(raw_data, "xls")
pd.read_csv(raw_data).to_excel(dataset_to_use, index=False)
elif data_format == "feather":
dataset_to_use = replace_file_extension(raw_data, "feather")
pd.read_csv(raw_data).to_feather(dataset_to_use)
elif data_format == "fwf":
dataset_to_use = replace_file_extension(raw_data, "fwf")
pd.read_csv(raw_data).to_fwf(dataset_to_use)
elif data_format == "html":
dataset_to_use = replace_file_extension(raw_data, "html")
pd.read_csv(raw_data).to_html(dataset_to_use, index=False)
elif data_format == "json":
dataset_to_use = replace_file_extension(raw_data, "json")
pd.read_csv(raw_data).to_json(dataset_to_use, orient="records")
elif data_format == "jsonl":
dataset_to_use = replace_file_extension(raw_data, "jsonl")
pd.read_csv(raw_data).to_json(dataset_to_use, orient="records", lines=True)
elif data_format == "parquet":
dataset_to_use = replace_file_extension(raw_data, "parquet")
pd.read_csv(raw_data).to_parquet(dataset_to_use, index=False)
elif data_format == "pickle":
dataset_to_use = replace_file_extension(raw_data, "pickle")
pd.read_csv(raw_data).to_pickle(dataset_to_use)
elif data_format == "stata":
dataset_to_use = replace_file_extension(raw_data, "stata")
pd.read_csv(raw_data).to_stata(dataset_to_use)
elif data_format == "tsv":
dataset_to_use = replace_file_extension(raw_data, "tsv")
pd.read_csv(raw_data).to_csv(dataset_to_use, sep="\t", index=False)
else:
ValueError("'{}' is an unrecognized data format".format(data_format))
return dataset_to_use
def train_with_backend(
backend,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
predict=True,
evaluate=True,
):
model = LudwigModel(config, backend=backend)
output_dir = None
ret = False
try:
_, _, output_dir = model.train(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
if dataset is None:
dataset = training_set
if predict:
preds, _ = model.predict(dataset=dataset)
assert backend.df_engine.compute(preds) is not None
if evaluate:
_, eval_preds, _ = model.evaluate(dataset=dataset)
assert backend.df_engine.compute(eval_preds) is not None
ret = True
finally:
# Remove results/intermediate data saved to disk
shutil.rmtree(output_dir, ignore_errors=True)
return ret
| LocalTestBackend |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/ecr.py | {
"start": 1450,
"end": 2115
} | class ____:
"""Helper (frozen dataclass) for storing temporary ECR credentials."""
username: str
password: str
proxy_endpoint: str
expires_at: datetime
def __post_init__(self):
"""Initialize the `Ecr` credentials object."""
mask_secret(self.password)
logger.debug("Credentials to Amazon ECR %r expires at %s.", self.proxy_endpoint, self.expires_at)
@property
def registry(self) -> str:
"""Return registry in appropriate `docker login` format."""
# https://github.com/docker/docker-py/issues/2256#issuecomment-824940506
return self.proxy_endpoint.replace("https://", "")
| EcrCredentials |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/components/power_bi_workspace/component.py | {
"start": 5567,
"end": 6089
} | class ____(Resolvable):
credentials: Annotated[
Union[PowerBIToken, PowerBIServicePrincipal],
Resolver(
resolve_powerbi_credentials,
model_field_type=PowerBICredentialsModel,
),
]
workspace_id: str
def _resolve_powerbi_workspace(context: ResolutionContext, model: BaseModel) -> PowerBIWorkspace:
return PowerBIWorkspace(
**resolve_fields(model=model, resolved_cls=PowerBIWorkspaceModel, context=context)
)
@public
@dataclass
| PowerBIWorkspaceModel |
python | astropy__astropy | astropy/table/tests/test_item_access.py | {
"start": 275,
"end": 346
} | class ____:
pass
@pytest.mark.usefixtures("table_data")
| BaseTestItems |
python | openai__openai-python | src/openai/types/beta/threads/image_file_content_block.py | {
"start": 233,
"end": 363
} | class ____(BaseModel):
image_file: ImageFile
type: Literal["image_file"]
"""Always `image_file`."""
| ImageFileContentBlock |
python | facebookresearch__faiss | tests/test_index_composite.py | {
"start": 10469,
"end": 11908
} | class ____(unittest.TestCase):
def test_chain(self):
# generate data
d = 4
nt = 1000
nb = 200
nq = 200
# normal distribition
x = faiss.randn((nt + nb + nq) * d, 1234).reshape(nt + nb + nq, d)
# make distribution very skewed
x *= [10, 4, 1, 0.5]
rr, _ = np.linalg.qr(faiss.randn(d * d).reshape(d, d))
x = np.dot(x, rr).astype('float32')
xt = x[:nt]
xb = x[nt:-nq]
xq = x[-nq:]
index = faiss.index_factory(d, "L2norm,PCA2,L2norm,Flat")
assert index.chain.size() == 3
l2_1 = faiss.downcast_VectorTransform(index.chain.at(0))
assert l2_1.norm == 2
pca = faiss.downcast_VectorTransform(index.chain.at(1))
assert not pca.is_trained
index.train(xt)
assert pca.is_trained
index.add(xb)
D, I = index.search(xq, 5)
# do the computation manually and check if we get the same result
def manual_trans(x):
x = x.copy()
faiss.normalize_L2(x)
x = pca.apply_py(x)
faiss.normalize_L2(x)
return x
index2 = faiss.IndexFlatL2(2)
index2.add(manual_trans(xb))
D2, I2 = index2.search(manual_trans(xq), 5)
assert np.all(I == I2)
@unittest.skipIf(platform.system() == 'Windows', \
'Mmap not supported on Windows.')
| TestTransformChain |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 266637,
"end": 267907
} | class ____(StatNode):
"""Definition of a C property, backed by a CFuncDefNode getter.
"""
# name string
# doc EncodedString or None Doc string of the property
# entry Symtab.Entry The Entry of the property attribute
# body StatListNode[CFuncDefNode] (for compatibility with PropertyNode)
child_attrs = ["body"]
is_cproperty = True
@property
def cfunc(self):
stats = self.body.stats
assert stats and isinstance(stats[0], CFuncDefNode), stats
return stats[0]
def analyse_declarations(self, env):
scope = PropertyScope(self.name, class_scope=env)
self.body.analyse_declarations(scope)
entry = self.entry = env.declare_property(
self.name, self.doc, self.pos, ctype=self.cfunc.return_type, property_scope=scope)
entry.getter_cname = self.cfunc.entry.cname
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
| CPropertyNode |
python | huggingface__transformers | tests/models/seamless_m4t/test_tokenization_seamless_m4t.py | {
"start": 1470,
"end": 12141
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/hf-seamless-m4t-medium"
tokenizer_class = SeamlessM4TTokenizer
test_rust_tokenizer = True
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁生活', '的', '真', '<unk>', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', 'th', 'ere', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁enc', 'od', 'ed', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [9680, 248, 9, 7356, 248059, 253515, 117, 1398, 79519, 108, 855, 45299, 248079, 540, 3423, 248, 52428, 248132, 248075, 182892, 248506, 249573, 1, 249221, 2867, 94124, 2867, 94124, 94124, 2, 435, 2, 419, 275, 1617, 45893, 191422, 12516, 280, 242514, 12025, 129, 76, 248144, 94124, 248075, 9062, 528, 248072, 540, 99681, 528, 248072, 34744, 27426, 11657, 2442, 1259, 34512] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁生活', '的', '真', '<unk>', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', 'th', 'ere', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁enc', 'od', 'ed', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊 I was born in 92000, and this is falsé. 生活的真<unk>是 Hi Hello Hi Hello Hello<s> hi<s>there The following string should be properly encoded: Hello. But ird and ปี ird ด Hey how are you doing"
def test_batch_encode_plus_batch_sequence_length(self):
# Override the parent test because SeamlessM4T uses padding=True by default
# Tests that all encoded values have the correct size
tokenizer = self.get_tokenizer(do_lower_case=False)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# For SeamlessM4T, encode with explicit padding=False for individual sequences too
encoded_sequences = [tokenizer(sequence, padding=False) for sequence in sequences]
encoded_sequences_batch = tokenizer(sequences, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_to_list_format(encoded_sequences_batch))
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
else:
empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# default to padding=True so need to precise which padding is called
normal_tokens = tokenizer("This", pad_to_multiple_of=8, padding=False)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
"This",
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
@require_torch
def test_prepare_seq2seq_batch(self):
if not self.test_seq2seq:
self.skipTest(reason="test_seq2seq is set to False")
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
batch = tokenizer.prepare_seq2seq_batch(
src_texts=src_text,
tgt_texts=tgt_text,
max_length=3,
max_target_length=10,
return_tensors="pt",
src_lang="eng",
tgt_lang="ron",
pad_to_multiple_of=None,
)
except NotImplementedError:
self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# TODO: not working for tgt_text
# max_target_length will default to max_length if not specified
batch = tokenizer.prepare_seq2seq_batch(
src_texts=src_text,
tgt_texts=tgt_text,
max_length=4,
return_tensors="pt",
pad_to_multiple_of=None,
)
self.assertEqual(batch.input_ids.shape[1], 4)
self.assertEqual(batch.labels.shape[1], 4)
batch_encoder_only = tokenizer.prepare_seq2seq_batch(
src_texts=src_text,
max_length=4,
max_target_length=10,
return_tensors="pt",
pad_to_multiple_of=None,
)
self.assertEqual(batch_encoder_only.input_ids.shape[1], 4)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 4)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
# Copied from tests.models.nllb.test_tokenization_nllb.NllbTokenizationTest.test_special_tokens_initialization
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_tokenizer(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
# make sure it has the same prefix tokens first
new_tokenizer.tgt_lang = tokenizer.tgt_lang
tokenizer.tgt_lang = tokenizer.tgt_lang
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens,
new_tokenizer.all_special_tokens,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
@require_torch
@require_sentencepiece
@require_tokenizers
| SeamlessM4TTokenizationTest |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 110,
"end": 244
} | class ____(str, Enum): # Once we are on Python 3.11+: enum.StrEnum
def __str__(self) -> str:
return str(self.value)
| StrEnum |
python | aio-libs__aiohttp | aiohttp/web_runner.py | {
"start": 4250,
"end": 5113
} | class ____(BaseSite):
__slots__ = ("_path",)
def __init__(self, runner: "BaseRunner[Any]", path: str) -> None:
loop = asyncio.get_event_loop()
if not isinstance(
loop, asyncio.ProactorEventLoop # type: ignore[attr-defined]
):
raise RuntimeError(
"Named Pipes only available in proactor loop under windows"
)
super().__init__(runner)
self._path = path
@property
def name(self) -> str:
return self._path
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
_server = await loop.start_serving_pipe( # type: ignore[attr-defined]
server, self._path
)
self._server = _server[0]
| NamedPipeSite |
python | pytorch__pytorch | torch/_dynamo/convert_frame.py | {
"start": 19023,
"end": 29113
} | class ____:
def __init__(
self,
compiler_fn: CompilerFn,
one_graph: bool = True,
export: bool = False,
export_constraints: Optional[typing.Never] = None,
package: Optional[CompilePackage] = None,
) -> None:
# assert export_constraints is None
reset_graph_break_dup_checker()
self._torchdynamo_orig_backend = compiler_fn
self._one_graph = one_graph
self._export = export
self._export_constraints = export_constraints
self._package = package
self._box = ConvertFrameBox()
@property
def _clone_with_backend(self) -> Callable[[CompilerFn], ConvertFrameAssert]:
return lambda backend: convert_frame_assert(
backend,
self._one_graph,
self._export,
self._export_constraints,
)
def __call__(
self,
frame: DynamoFrameType,
cache_entry: Optional[CacheEntry],
hooks: Hooks,
frame_state: dict[str, Union[int, FrameStateSizeEntry]],
*,
skip: int = 0,
) -> ConvertFrameReturn:
increment_frame()
code = frame.f_code
cache_size = compute_cache_size(frame, cache_entry)
input_codes.add(code)
if code in output_codes:
return ConvertFrameReturn()
if (
os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION")
and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name
):
return ConvertFrameReturn()
if code.co_name == "<genexpr>" and code.co_filename.endswith(
(
"transformers/file_utils.py",
"transformers/utils/generic.py",
"diffusers/utils/outputs.py",
)
):
# not needed, but cleans up torchbench error stats
return ConvertFrameReturn()
if code.co_name == "__setattr__":
# setattr could be tricky to handle generally,
# but also not likely useful to compile- skip the whole frame
return ConvertFrameReturn()
if code.co_name == "__init__" and code.co_filename.startswith(
os.path.dirname(torch.optim.__file__)
):
# optimizer support is still incomplete see
# test_state_dict in test/dynamo/test_optimizers.py
return ConvertFrameReturn()
# Check if the frame is generated by an exec builtin call
# TODO - Running exec generated frame seems propagates f_globals to the
# next frames.
if code.co_name == "<module>" and code.co_filename == "<string>":
return ConvertFrameReturn()
if (
code.co_name == "<lambda>"
and code.co_filename == "<string>"
and not bool(frame.f_builtins)
):
# namedtuple subclass constructor. Empty builtins cause issue with
# len keyword in LIST_LEN guard.
return ConvertFrameReturn()
if is_generator(code):
unimplemented(
gb_type="Attempt to trace generator",
context="",
explanation="Generators cannot be compiled directly with `torch.compile`.",
hints=[
"Call a generator from inside of a non-generator Python function and "
"compile that function instead.",
*graph_break_hints.FUNDAMENTAL,
],
)
if not has_tensor_in_frame(frame):
return ConvertFrameReturn()
# skip tracing non-recursive disabled functions
# detect if the previous frame (non-convert_frame) is a non-recursive disable wrapper
prev_frame = sys._getframe()
while (
prev_frame
and "torch/_dynamo/convert_frame.py" in prev_frame.f_code.co_filename
):
prev_frame = prev_frame.f_back # type: ignore[assignment]
if (
prev_frame
and prev_frame.f_code is decorators._nonrecursive_disable_wrapper_code
):
return ConvertFrameReturn(apply_to_code=False)
global initial_global_state
initial_global_state = GlobalStateGuard()
compile_id = get_compile_id(frame_state)
frame_id = compile_id.frame_id
signpost_event(
"dynamo",
"_convert_frame_assert._compile",
{
"co_name": code.co_name,
"frame_id": frame_id,
"compile_id": str(compile_id),
"co_filename": code.co_filename,
"co_firstlineno": code.co_firstlineno,
"cache_size": cache_size.num_cache_entries_with_same_id_matched_objs,
"accumulated_cache_size": cache_size.num_cache_entries,
},
)
# Record traced frames, skipping Dynamo generated ones.
if not code.co_name.startswith(TORCH_DYNAMO_RESUME_IN_PREFIX):
info = f"{code.co_name} {code.co_filename}:{code.co_firstlineno}"
dynamo_tls.traced_frame_infos.append(info)
with compile_context(CompileContext(compile_id)):
result = _compile(
frame.f_code,
frame.f_globals,
frame.f_locals,
frame.f_builtins,
frame.closure,
self._torchdynamo_orig_backend,
self._one_graph,
self._export,
self._export_constraints,
hooks,
cache_entry,
cache_size,
frame,
frame_state=frame_state,
compile_id=compile_id,
skip=skip + 1,
package=self._package,
convert_frame_box=self._box,
)
if config.caching_precompile and self._package is not None:
from .package import DynamoCache
# Record that the dynamo package has changed
DynamoCache.record_package(self._package)
return result
def convert_frame_assert(
compiler_fn: CompilerFn,
one_graph: bool = True,
export: bool = False,
export_constraints: Optional[typing.Never] = None,
package: Optional[CompilePackage] = None,
) -> ConvertFrameAssert:
"""Fully convert a frame into an FX graph, raising an exception if we fail."""
return ConvertFrameAssert(
compiler_fn, one_graph, export, export_constraints, package
)
from collections import OrderedDict
from torch.utils.hooks import RemovableHandle
# we have to use `OrderedDict` to make `RemovableHandle` work.
_bytecode_hooks: dict[int, BytecodeHook] = OrderedDict()
def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle:
"""Register hooks for bytecode generated by Dynamo. The hook can do some
logging, as well as return a new code object to be used. Please refer
to `BytecodeHook` for the hook signature.
"""
handle = RemovableHandle(_bytecode_hooks)
_bytecode_hooks[handle.id] = hook
return handle
# TODO - We want to run preserve_node_meta context manager here, but the CI
# fails (its unclear if the failures were flaky)
# @torch.fx.traceback.preserve_node_meta()
@preserve_global_state
def trace_frame(
code: types.CodeType,
globals: dict[str, object],
locals: dict[str, object],
builtins: dict[str, object],
closure: tuple[CellType],
compiler_fn: CompilerFn,
tf_mode_stack: list[torch.overrides.TorchFunctionMode],
one_graph: bool,
speculation_log: SpeculationLog,
instructions: list[Instruction],
code_options: dict[str, object],
*,
export: bool = False,
export_constraints: Optional[typing.Never] = None,
frame_state: Optional[dict[str, Union[int, FrameStateSizeEntry]]] = None,
distributed_state: Optional[DistributedState] = None,
package: Optional[CompilePackage] = None,
) -> DynamoTracerOutput:
from torch.fx.experimental.validator import bisect, translation_validation_enabled
speculation_log.restart() # type: ignore[has-type]
exn_vt_stack = ExceptionStack()
tracer = InstructionTranslator(
instructions,
code,
locals,
globals,
builtins,
closure,
tf_mode_stack,
code_options,
compiler_fn,
one_graph,
export,
export_constraints,
frame_state=frame_state,
speculation_log=speculation_log, # type: ignore[has-type]
exn_vt_stack=exn_vt_stack,
distributed_state=distributed_state, # type: ignore[has-type]
package=package,
)
def run_tracer() -> None:
try:
tracer.output.mark_bytecode_tracing_start()
with tracing(tracer.output.tracing_context), tracer.set_current_tx():
tracer.run()
except exc.UnspecializeRestartAnalysis:
speculation_log.clear() # type: ignore[has-type]
raise
except (
exc.SpeculationRestartAnalysis,
exc.TensorifyScalarRestartAnalysis,
exc.SkipFrame,
):
raise
except Exception:
if translation_validation_enabled():
bisect(tracer.output.shape_env)
raise
finally:
tracer.output.call_cleanup_hooks()
tracer.f_locals = {}
try:
run_tracer()
tracer_output = DynamoTracerOutput(tracer)
output = tracer_output.output_graph
assert output is not None
assert output.output_instructions
instructions[:] = output.output_instructions
code_options.update(output.code_options)
propagate_inst_exn_table_entries(instructions)
check_inst_exn_tab_entries_valid(instructions)
instructions[:] = remove_pointless_jumps(remove_dead_code(instructions))
except Exception as e:
e._torch_dynamo_tracer_output = DynamoTracerOutput(tracer, error=True) # type: ignore[attr-defined]
raise
return tracer_output
@dataclass
| ConvertFrameAssert |
python | numba__numba | numba/core/typeinfer.py | {
"start": 1060,
"end": 4259
} | class ____(object):
def __init__(self, context, var):
self.context = context
self.var = var
self.type = None
self.locked = False
# Stores source location of first definition
self.define_loc = None
# Qualifiers
self.literal_value = NOTSET
def add_type(self, tp, loc):
assert isinstance(tp, types.Type), type(tp)
# Special case for _undef_var.
# If the typevar is the _undef_var, use the incoming type directly.
if self.type is types._undef_var:
self.type = tp
return self.type
if self.locked:
if tp != self.type:
if self.context.can_convert(tp, self.type) is None:
msg = ("No conversion from %s to %s for '%s', "
"defined at %s")
raise TypingError(msg % (tp, self.type, self.var,
self.define_loc),
loc=loc)
else:
if self.type is not None:
unified = self.context.unify_pairs(self.type, tp)
if unified is None:
msg = "Cannot unify %s and %s for '%s', defined at %s"
raise TypingError(msg % (self.type, tp, self.var,
self.define_loc),
loc=self.define_loc)
else:
# First time definition
unified = tp
self.define_loc = loc
self.type = unified
return self.type
def lock(self, tp, loc, literal_value=NOTSET):
assert isinstance(tp, types.Type), type(tp)
if self.locked:
msg = ("Invalid reassignment of a type-variable detected, type "
"variables are locked according to the user provided "
"function signature or from an ir.Const node. This is a "
"bug! Type={}. {}").format(tp, self.type)
raise CompilerError(msg, loc)
# If there is already a type, ensure we can convert it to the
# locked type.
if (self.type is not None and
self.context.can_convert(self.type, tp) is None):
raise TypingError("No conversion from %s to %s for "
"'%s'" % (tp, self.type, self.var), loc=loc)
self.type = tp
self.locked = True
if self.define_loc is None:
self.define_loc = loc
self.literal_value = literal_value
def union(self, other, loc):
if other.type is not None:
self.add_type(other.type, loc=loc)
return self.type
def __repr__(self):
return '%s := %s' % (self.var, self.type or "<undecided>")
@property
def defined(self):
return self.type is not None
def get(self):
return (self.type,) if self.type is not None else ()
def getone(self):
if self.type is None:
raise TypingError("Undecided type {}".format(self))
return self.type
def __len__(self):
return 1 if self.type is not None else 0
| TypeVar |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2.py | {
"start": 1866,
"end": 2185
} | class ____(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): compat_v1_import,
("tensorflow.compat.v2", "tf"): compat_v2_import,
}
| TFAPIImportAnalysisSpec |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vision.py | {
"start": 30691,
"end": 34260
} | class ____(GoogleCloudBaseOperator):
"""
Permanently delete a product and its reference images.
Metadata of the product and all its images will be deleted right away, but
search queries against ProductSets containing the product may still work
until all related caches are refreshed.
Possible errors:
- Returns `NOT_FOUND` if the product does not exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteProductOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Required) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_delete_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_delete_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_product(
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| CloudVisionDeleteProductOperator |
python | walkccc__LeetCode | solutions/3086. Minimum Moves to Pick K Ones/3086.py | {
"start": 0,
"end": 1876
} | class ____:
def minimumMoves(self, nums: list[int], k: int, maxChanges: int) -> int:
# Dylan has two actions for collecting '1's in a sequence:
# Action 1: Put a '1' next to him and pick it up.
# The cost is 2.
# Action 2: Swap a '1' towards him and collect it.
# The cost equals the distance to the '1'.
#
# To minimize the swapping cost, Dylan can use a sliding window strategy,
# selecting the optimal position (middle '1' in the window) for efficient
# collection. The window's size is crucial:
# The minimum window size: min(0, k - maxChanges), ensuring the window
# isn't too small.
# The maximum window size: min(k, minOnesByTwo + 3, the number of ones),
# preventing overly ambitious swaps.
#
# Note that if needing to move a '1' beyond `minOnesByTwo + 3`, it's
# cheaper to use Action 1.
# At most three indices, (dylanIndex - 1, dylanIndex, dylanIndex + 1), have
# a distance <= 1 from dylanIndex, implying that we'll be taking at most
# `maxOnesByTwo + 3` using Action 2. Any more Action 2 is not optimal and
# should be replaced with Action 1.
NUM_OF_INDICES_WITHIN_ONE_DISTANCE = 3
ans = math.inf
oneIndices = [i for i, num in enumerate(nums) if num == 1]
prefix = list(itertools.accumulate(oneIndices, initial=0))
minOnesByTwo = max(0, k - maxChanges)
maxOnesByTwo = min(
k, minOnesByTwo + NUM_OF_INDICES_WITHIN_ONE_DISTANCE, len(oneIndices))
for onesByTwo in range(minOnesByTwo, maxOnesByTwo + 1):
for l in range(len(prefix) - onesByTwo):
r = l + onesByTwo # Collect 1s in oneIndices[l - 1..r - 1].
cost1 = (k - onesByTwo) * 2
cost2 = ((prefix[r] - prefix[(l + r) // 2]) -
(prefix[(l + r + 1) // 2] - prefix[l]))
ans = min(ans, cost1 + cost2)
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/serialization/serialized_data.py | {
"start": 3229,
"end": 3963
} | class ____(NamedTuple):
id: int
uri: str
extra: Mapping[str, Any]
created_at: str
updated_at: str
consuming_dags: Sequence[DatasetConsumingDag]
producing_tasks: Sequence[DatasetProducingTask]
def is_produced_by_task(self, *, task_id: str, dag_id: str) -> bool:
return any(
task.task_id == task_id and task.dag_id == dag_id for task in self.producing_tasks
)
###################################################################################################
# Serialized data that scopes to airflow DAGs and tasks.
###################################################################################################
# History:
# - created
@whitelist_for_serdes
@record
| Dataset |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 2412,
"end": 2495
} | class ____(Evaluation):
assignee = models.CharField(max_length=50)
| QualityControl |
python | matplotlib__matplotlib | lib/matplotlib/tri/_trirefine.py | {
"start": 191,
"end": 1527
} | class ____:
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implement:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns:
- a refined triangulation,
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)``, where:
- *z* array of field values (to refine) defined at the base
triangulation nodes,
- *triinterpolator* is an optional `~matplotlib.tri.TriInterpolator`,
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation;
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
_api.check_isinstance(Triangulation, triangulation=triangulation)
self._triangulation = triangulation
| TriRefiner |
python | pallets__jinja | tests/test_api.py | {
"start": 699,
"end": 5570
} | class ____:
def test_item_and_attribute(self, env):
from jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
tmpl = env.from_string("{{ foo.items()|list }}")
assert tmpl.render(foo={"items": 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={"items": 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={"items": 42}) == "42"
def test_finalize(self):
e = Environment(finalize=lambda v: "" if v is None else v)
t = e.from_string("{% for item in seq %}|{{ item }}{% endfor %}")
assert t.render(seq=(None, 1, "foo")) == "||1|foo"
def test_finalize_constant_expression(self):
e = Environment(finalize=lambda v: "" if v is None else v)
t = e.from_string("<{{ none }}>")
assert t.render() == "<>"
def test_no_finalize_template_data(self):
e = Environment(finalize=lambda v: type(v).__name__)
t = e.from_string("<{{ value }}>")
# If template data was finalized, it would print "strintstr".
assert t.render(value=123) == "<int>"
def test_context_finalize(self):
@pass_context
def finalize(context, value):
return value * context["scale"]
e = Environment(finalize=finalize)
t = e.from_string("{{ value }}")
assert t.render(value=5, scale=3) == "15"
def test_eval_finalize(self):
@pass_eval_context
def finalize(eval_ctx, value):
return str(eval_ctx.autoescape) + value
e = Environment(finalize=finalize, autoescape=True)
t = e.from_string("{{ value }}")
assert t.render(value="<script>") == "True<script>"
def test_env_autoescape(self):
@pass_environment
def finalize(env, value):
return " ".join(
(env.variable_start_string, repr(value), env.variable_end_string)
)
e = Environment(finalize=finalize)
t = e.from_string("{{ value }}")
assert t.render(value="hello") == "{{ 'hello' }}"
def test_cycler(self, env):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert next(c) == item
next(c)
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self, env):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self, env):
t = Template("Content")
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_get_template_undefined(self, env):
"""Passing Undefined to get/select_template raises an
UndefinedError or shows the undefined message in the list.
"""
env.loader = DictLoader({})
t = Undefined(name="no_name_1")
with pytest.raises(UndefinedError):
env.get_template(t)
with pytest.raises(UndefinedError):
env.get_or_select_template(t)
with pytest.raises(UndefinedError):
env.select_template(t)
with pytest.raises(TemplatesNotFound) as exc_info:
env.select_template([t, "no_name_2"])
exc_message = str(exc_info.value)
assert "'no_name_1' is undefined" in exc_message
assert "no_name_2" in exc_message
def test_autoescape_autoselect(self, env):
def select_autoescape(name):
if name is None or "." not in name:
return False
return name.endswith(".html")
env = Environment(
autoescape=select_autoescape,
loader=DictLoader({"test.txt": "{{ foo }}", "test.html": "{{ foo }}"}),
)
t = env.get_template("test.txt")
assert t.render(foo="<foo>") == "<foo>"
t = env.get_template("test.html")
assert t.render(foo="<foo>") == "<foo>"
t = env.from_string("{{ foo }}")
assert t.render(foo="<foo>") == "<foo>"
def test_sandbox_max_range(self, env):
from jinja2.sandbox import MAX_RANGE
from jinja2.sandbox import SandboxedEnvironment
env = SandboxedEnvironment()
t = env.from_string("{% for item in range(total) %}{{ item }}{% endfor %}")
with pytest.raises(OverflowError):
t.render(total=MAX_RANGE + 1)
| TestExtendedAPI |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 99832,
"end": 100989
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "dál"
assert self.locale._format_timeframe("second", 1) == "sekunda"
assert self.locale._format_timeframe("seconds", 3) == "3 sekundda"
assert self.locale._format_timeframe("minute", 1) == "minuhta"
assert self.locale._format_timeframe("minutes", 4) == "4 minuhta"
assert self.locale._format_timeframe("hour", 1) == "diimmu"
assert self.locale._format_timeframe("hours", 23) == "23 diimmu"
assert self.locale._format_timeframe("day", 1) == "beaivvi"
assert self.locale._format_timeframe("days", 12) == "12 beaivvi"
assert self.locale._format_timeframe("month", 1) == "mánu"
assert self.locale._format_timeframe("months", 2) == "2 mánu"
assert self.locale._format_timeframe("year", 1) == "jagi"
assert self.locale._format_timeframe("years", 2) == "2 jagi"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "Lávvordat"
@pytest.mark.usefixtures("lang_locale")
| TestSamiLocale |
python | sphinx-doc__sphinx | sphinx/util/_files.py | {
"start": 1912,
"end": 3152
} | class ____(dict[Path, tuple[set[str], _StrPath]]): # NoQA: FURB189
"""A special dictionary for download files.
.. important:: This class would be refactored in nearly future.
Hence don't hack this directly.
"""
def add_file(self, docname: str, filename: str | os.PathLike[str]) -> _StrPath:
filename = Path(filename)
if filename not in self:
digest = hashlib.md5(
filename.as_posix().encode(), usedforsecurity=False
).hexdigest()
dest_path = _StrPath(digest, filename.name)
self[filename] = ({docname}, dest_path)
return dest_path
docnames, dest_path = self[filename]
docnames.add(docname)
return dest_path
def purge_doc(self, docname: str) -> None:
for filename, (docs, _dest) in list(self.items()):
docs.discard(docname)
if not docs:
del self[filename]
def merge_other(
self, docnames: Set[str], other: dict[Path, tuple[set[str], _StrPath]]
) -> None:
for filename, (docs, _dest) in other.items():
for docname in docs & set(docnames):
self.add_file(docname, filename)
| DownloadFiles |
python | getsentry__sentry | tests/sentry/seer/fetch_issues/test_utils.py | {
"start": 401,
"end": 3007
} | class ____(TestCase):
def test_get_repo_and_projects_success(self):
repo = self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:github",
external_id="123",
)
self.create_code_mapping(project=self.project, repo=repo)
result = get_repo_and_projects(
organization_id=self.organization.id,
provider="integrations:github",
external_id="123",
)
assert isinstance(result, RepoProjects)
assert result.organization_id == self.organization.id
assert result.provider == "integrations:github"
assert result.external_id == "123"
assert result.repo == repo
assert len(result.repo_configs) == 1
assert len(result.projects) == 1
assert result.projects[0] == self.project
def test_get_repo_and_projects_multiple_projects(self):
repo = self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:github",
external_id="123",
)
project2 = self.create_project(organization=self.organization)
self.create_code_mapping(project=self.project, repo=repo)
self.create_code_mapping(project=project2, repo=repo)
result = get_repo_and_projects(
organization_id=self.organization.id,
provider="integrations:github",
external_id="123",
)
assert len(result.repo_configs) == 2
assert len(result.projects) == 2
project_ids = {proj.id for proj in result.projects}
assert project_ids == {self.project.id, project2.id}
def test_get_repo_and_projects_no_configs(self):
self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:github",
external_id="123",
)
with pytest.raises(ValueError, match="No Sentry projects found for repo"):
get_repo_and_projects(
organization_id=self.organization.id,
provider="integrations:github",
external_id="123",
)
def test_get_repo_and_projects_repo_not_found(self):
from sentry.models.repository import Repository
with pytest.raises(Repository.DoesNotExist):
get_repo_and_projects(
organization_id=self.organization.id,
provider="integrations:github",
external_id="nonexistent",
)
| TestGetRepoAndProjects |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 19604,
"end": 20176
} | class ____:
"""Mixin to mark support for Indexer subclasses in indexing."""
__slots__ = ()
def __array__(
self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
) -> np.ndarray:
# Leave casting to an array up to the underlying array type.
if Version(np.__version__) >= Version("2.0.0"):
return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy)
else:
return np.asarray(self.get_duck_array(), dtype=dtype)
def get_duck_array(self):
return self.array
| ExplicitlyIndexed |
python | huggingface__transformers | src/transformers/models/big_bird/modeling_big_bird.py | {
"start": 70036,
"end": 70754
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`):
pooler output from BigBigModel
"""
loss: Optional[torch.FloatTensor] = None
start_logits: Optional[torch.FloatTensor] = None
end_logits: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring
| BigBirdForQuestionAnsweringModelOutput |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/serve.py | {
"start": 2270,
"end": 3709
} | class ____(CDNCacheControlMixin, ServeRedirectMixin, ServeDocsMixin, View):
"""
Page redirect view.
This allows users to redirec to the default version of a project.
For example:
- /page/api/index.html -> /en/latest/api/index.html
- /projects/subproject/page/index.html -> /projects/subproject/en/latest/api/index.html
"""
def get(self, request, subproject_slug=None, filename=""):
"""Handle all page redirects."""
unresolved_domain = request.unresolved_domain
project = unresolved_domain.project
# Use the project from the domain, or use the subproject slug.
if subproject_slug:
project = get_object_or_404(project.subprojects, alias=subproject_slug).child
# Get the default version from the current project,
# or the version from the external domain.
if unresolved_domain.is_from_external_domain:
version_slug = unresolved_domain.external_version_slug
else:
version_slug = project.get_default_version()
# TODO: find a better way to pass this to the middleware.
request.path_project_slug = project.slug
return self.system_redirect(
request=request,
final_project=project,
version_slug=version_slug,
filename=filename,
is_external_version=unresolved_domain.is_from_external_domain,
)
| ServePageRedirect |
python | facebook__pyre-check | tools/generate_taint_models/get_dynamic_graphql_sources.py | {
"start": 679,
"end": 1211
} | class ____():
def __init__(self, template_str: str) -> None:
if not template_str or '{gql_type_name}' not in template_str or '{gql_field}' not in template_str:
raise ModelGenerationException("Template string must be provided and contain '{gql_type_name}' and '{gql_field}'")
self.template_str = template_str
def format(self, gql_type_name: str, gql_field: str) -> str:
return self.template_str.format(gql_type_name=gql_type_name, gql_field=gql_field)
| DynamicGraphQLFormattableSpecification |
python | sanic-org__sanic | sanic/server/protocols/websocket_protocol.py | {
"start": 802,
"end": 8335
} | class ____(HttpProtocol):
__slots__ = (
"websocket",
"websocket_timeout",
"websocket_max_size",
"websocket_ping_interval",
"websocket_ping_timeout",
"websocket_url",
"websocket_peer",
)
def __init__(
self,
*args,
websocket_timeout: float = 10.0,
websocket_max_size: Optional[int] = None,
websocket_ping_interval: Optional[float] = 20.0,
websocket_ping_timeout: Optional[float] = 20.0,
**kwargs,
):
super().__init__(*args, **kwargs)
self.websocket: Optional[WebsocketImplProtocol] = None
self.websocket_timeout = websocket_timeout
self.websocket_max_size = websocket_max_size
self.websocket_ping_interval = websocket_ping_interval
self.websocket_ping_timeout = websocket_ping_timeout
self.websocket_url: Optional[str] = None
self.websocket_peer: Optional[str] = None
def connection_lost(self, exc):
if self.websocket is not None:
self.websocket.connection_lost(exc)
super().connection_lost(exc)
self.log_websocket("CLOSE")
self.websocket_url = None
self.websocket_peer = None
def data_received(self, data):
if self.websocket is not None:
self.websocket.data_received(data)
else:
# Pass it to HttpProtocol handler first
# That will (hopefully) upgrade it to a websocket.
super().data_received(data)
def eof_received(self) -> Optional[bool]:
if self.websocket is not None:
return self.websocket.eof_received()
else:
return False
def close(self, timeout: Optional[float] = None):
# Called by HttpProtocol at the end of connection_task
# If we've upgraded to websocket, we do our own closing
if self.websocket is not None:
# Note, we don't want to use websocket.close()
# That is used for user's application code to send a
# websocket close packet. This is different.
self.websocket.end_connection(1001)
else:
super().close()
def close_if_idle(self):
# Called by Sanic Server when shutting down
# If we've upgraded to websocket, shut it down
if self.websocket is not None:
if self.websocket.ws_proto.state in (CLOSING, CLOSED):
return True
elif self.websocket.loop is not None:
self.websocket.loop.create_task(self.websocket.close(1001))
else:
self.websocket.end_connection(1001)
else:
return super().close_if_idle()
@staticmethod
def sanic_request_to_ws_request(request: Request):
return http11.Request(
path=request.path,
headers=WSHeaders(request.headers),
)
async def websocket_handshake(
self, request, subprotocols: Optional[Sequence[str]] = None
):
# let the websockets package do the handshake with the client
try:
if subprotocols is not None:
# subprotocols can be a set or frozenset,
# but ServerProtocol needs a list
subprotocols = cast(
Optional[Sequence[Subprotocol]],
list(
[
Subprotocol(subprotocol)
for subprotocol in subprotocols
]
),
)
ws_proto = ServerProtocol(
max_size=self.websocket_max_size,
subprotocols=subprotocols,
state=OPEN,
logger=websockets_logger,
)
resp = ws_proto.accept(self.sanic_request_to_ws_request(request))
except Exception:
msg = (
"Failed to open a WebSocket connection.\n"
"See server log for more information.\n"
)
raise SanicException(msg, status_code=500)
if 100 <= resp.status_code <= 299:
first_line = (
f"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\r\n"
).encode()
rbody = bytearray(first_line)
rbody += (
"".join([f"{k}: {v}\r\n" for k, v in resp.headers.items()])
).encode()
rbody += b"\r\n"
if resp.body:
rbody += resp.body
rbody += b"\r\n\r\n"
await super().send(rbody)
else:
raise SanicException(resp.body, resp.status_code)
self.websocket = WebsocketImplProtocol(
ws_proto,
ping_interval=self.websocket_ping_interval,
ping_timeout=self.websocket_ping_timeout,
close_timeout=self.websocket_timeout,
)
loop = (
request.transport.loop
if hasattr(request, "transport")
and hasattr(request.transport, "loop")
else None
)
await self.websocket.connection_made(self, loop=loop)
self.websocket_url = self._http.request.url
self.websocket_peer = f"{id(self):X}"[-5:-1] + "unx"
if ip := self._http.request.client_ip:
self.websocket_peer = f"{ip}:{self._http.request.port}"
self.log_websocket("OPEN")
return self.websocket
def log_websocket(self, message):
if not self.access_log or not self.websocket_url:
return
status = ""
close = ""
try:
# Can we get some useful statistics?
p = self.websocket.ws_proto
state = p.state
if state == CLOSED:
codes = {
1000: "NORMAL",
1001: "GOING AWAY",
1005: "NO STATUS",
1006: "ABNORMAL",
1011: "SERVER ERR",
}
if p.close_code == 1006:
message = "CLOSE_ABN"
scode = rcode = 1006 # Abnormal closure (disconnection)
sdesc = rdesc = ""
if p.close_sent:
scode = p.close_sent.code
sdesc = p.close_sent.reason
if p.close_rcvd:
rcode = p.close_rcvd.code
rdesc = p.close_rcvd.reason
# Use repr() to escape any control characters
sdesc = repr(sdesc[:256]) if sdesc else codes.get(scode, "")
rdesc = repr(rdesc[:256]) if rdesc else codes.get(rcode, "")
if p.close_rcvd_then_sent or scode == 1006:
status = rcode
close = (
f"{rdesc} from client"
if scode in (rcode, 1006)
else f"{rdesc} ▼▲ {scode} {sdesc}"
)
else:
status = scode
close = (
f"{sdesc} from server"
if rcode in (scode, 1006)
else f"{sdesc} ▲▼ {rcode} {rdesc}"
)
except AttributeError:
...
extra = {
"status": status,
"byte": close,
"host": self.websocket_peer,
"request": f" 🔌 {self.websocket_url}",
"duration": "",
}
access_logger.info(message, extra=extra)
| WebSocketProtocol |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs.py | {
"start": 1135,
"end": 1542
} | class ____:
hidden_mutable_default: list[int] = default_function()
another_dataclass: A = A()
not_optimal: ImmutableType = ImmutableType(20)
good_variant: ImmutableType = DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES
okay_variant: A = DEFAULT_A_FOR_ALL_DATACLASSES
fine_dataclass_function: list[int] = field(default_factory=list)
attrs_factory: dict[str, str] = Factory(OrderedDict)
| B |
python | pypa__warehouse | tests/unit/manage/test_views.py | {
"start": 228963,
"end": 235832
} | class ____:
def test_get(self, db_request, user_service):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(release=release)
# NOTE: intentionally out of order, to test sorting.
events = [
FileEventFactory.create(
source=file_,
tag="fake:event",
time=datetime.datetime(2018, 2, 5, 17, 18, 18, 462_634),
additional={
"project_id": str(project.id),
},
),
ProjectEventFactory.create(
source=project,
tag="fake:event",
time=datetime.datetime(2017, 2, 5, 17, 18, 18, 462_634),
),
ProjectEventFactory.create(
source=project,
tag="fake:event",
time=datetime.datetime(2019, 2, 5, 17, 18, 18, 462_634),
),
FileEventFactory.create(
source=file_,
tag="fake:event",
time=datetime.datetime(2016, 2, 5, 17, 18, 18, 462_634),
additional={
"project_id": str(project.id),
},
),
]
project_events_query = (
db_request.db.query(Project.Event)
.join(Project.Event.source)
.filter(Project.Event.source_id == project.id)
)
file_events_query = (
db_request.db.query(File.Event)
.join(File.Event.source)
.filter(File.Event.additional["project_id"].astext == str(project.id))
)
events_query = project_events_query.union(file_events_query).order_by(
Project.Event.time.desc(), File.Event.time.desc()
)
events_page = SQLAlchemyORMPage(
events_query,
page=1,
items_per_page=25,
item_count=4,
url_maker=paginate_url_factory(db_request),
)
assert views.manage_project_history(project, db_request) == {
"events": events_page,
"get_user": user_service.get_user,
"project": project,
}
events_page = list(events_page)
# NOTE: The Event -> Project.Event | File.Event mapping is broken
# due to how Event subclasses are constructed, so we only test
# the ordering here.
assert [e.time for e in events_page] == [
e.time for e in sorted(events, key=lambda e: e.time, reverse=True)
]
# NOTE: This is a backstop for the bugged behavior above: when we
# fix it, this will begin to fail.
for event in events_page:
assert isinstance(event, Project.Event)
def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request):
params = MultiDict({"page": "abc"})
db_request.params = params
events_query = pretend.stub()
db_request.events_query = pretend.stub(
events_query=lambda *a, **kw: events_query
)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "SQLAlchemyORMPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
project = ProjectFactory.create()
with pytest.raises(HTTPBadRequest):
views.manage_project_history(project, db_request)
assert page_cls.calls == []
def test_first_page(self, db_request, user_service):
page_number = 1
params = MultiDict({"page": page_number})
db_request.params = params
project = ProjectFactory.create()
items_per_page = 25
total_items = items_per_page + 2
ProjectEventFactory.create_batch(total_items, source=project, tag="fake:event")
project_events_query = (
db_request.db.query(Project.Event)
.join(Project.Event.source)
.filter(Project.Event.source_id == project.id)
)
file_events_query = (
db_request.db.query(File.Event)
.join(File.Event.source)
.filter(File.Event.additional["project_id"].astext == str(project.id))
)
events_query = project_events_query.union(file_events_query).order_by(
Project.Event.time.desc(), File.Event.time.desc()
)
events_page = SQLAlchemyORMPage(
events_query,
page=page_number,
items_per_page=items_per_page,
item_count=total_items,
url_maker=paginate_url_factory(db_request),
)
assert views.manage_project_history(project, db_request) == {
"events": events_page,
"get_user": user_service.get_user,
"project": project,
}
def test_last_page(self, db_request, user_service):
page_number = 2
params = MultiDict({"page": page_number})
db_request.params = params
project = ProjectFactory.create()
items_per_page = 25
total_items = items_per_page + 2
ProjectEventFactory.create_batch(total_items, source=project, tag="fake:event")
project_events_query = (
db_request.db.query(Project.Event)
.join(Project.Event.source)
.filter(Project.Event.source_id == project.id)
)
file_events_query = (
db_request.db.query(File.Event)
.join(File.Event.source)
.filter(File.Event.additional["project_id"].astext == str(project.id))
)
events_query = project_events_query.union(file_events_query).order_by(
Project.Event.time.desc(), File.Event.time.desc()
)
events_page = SQLAlchemyORMPage(
events_query,
page=page_number,
items_per_page=items_per_page,
item_count=total_items,
url_maker=paginate_url_factory(db_request),
)
assert views.manage_project_history(project, db_request) == {
"events": events_page,
"get_user": user_service.get_user,
"project": project,
}
def test_raises_404_with_out_of_range_page(self, db_request):
page_number = 3
params = MultiDict({"page": page_number})
db_request.params = params
project = ProjectFactory.create()
items_per_page = 25
total_items = items_per_page + 2
ProjectEventFactory.create_batch(total_items, source=project, tag="fake:event")
with pytest.raises(HTTPNotFound):
assert views.manage_project_history(project, db_request)
| TestManageProjectHistory |
python | apache__airflow | providers/http/tests/unit/http/sensors/test_http.py | {
"start": 10004,
"end": 12286
} | class ____:
@mock.patch("airflow.providers.http.hooks.http.Session", FakeSession)
def test_get(self):
op = HttpOperator(
task_id="get_op",
method="GET",
endpoint="/search",
data={"client": "ubuntu", "q": "airflow"},
headers={},
)
op.execute({})
@mock.patch("airflow.providers.http.hooks.http.Session", FakeSession)
def test_get_response_check(self):
op = HttpOperator(
task_id="get_op",
method="GET",
endpoint="/search",
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("apache/airflow" in response.text),
headers={},
)
op.execute({})
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test only for Airflow 3.0+")
@mock.patch("airflow.providers.http.hooks.http.Session", FakeSession)
def test_sensor(self, run_task):
sensor = HttpSensor(
task_id="http_sensor_check",
http_conn_id="http_default",
endpoint="/search",
request_params={"client": "ubuntu", "q": "airflow", "date": "{{ds}}"},
headers={},
response_check=lambda response: f"apache/airflow/{DEFAULT_DATE:%Y-%m-%d}" in response.text,
poke_interval=5,
timeout=15,
)
run_task(sensor, logical_date=DEFAULT_DATE)
assert run_task.state == "success"
assert run_task.error is None
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Test only for Airflow < 3.0")
@mock.patch("airflow.providers.http.hooks.http.Session", FakeSession)
def test_sensor_af2(self):
dag = DAG(TEST_DAG_ID, schedule=None)
sensor = HttpSensor(
task_id="http_sensor_check",
http_conn_id="http_default",
endpoint="/search",
request_params={"client": "ubuntu", "q": "airflow", "date": "{{ds}}"},
headers={},
response_check=lambda response: f"apache/airflow/{DEFAULT_DATE:%Y-%m-%d}" in response.text,
poke_interval=5,
timeout=15,
dag=dag,
)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
| TestHttpOpSensor |
python | django__django | tests/migrations2/test_migrations_2_first/0001_initial.py | {
"start": 43,
"end": 572
} | class ____(migrations.Migration):
dependencies = [
("migrations", "__first__"),
]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
| Migration |
python | django-extensions__django-extensions | django_extensions/db/fields/json.py | {
"start": 985,
"end": 3144
} | class ____(models.TextField):
"""
JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object.
"""
def __init__(self, *args, **kwargs):
kwargs["default"] = kwargs.get("default", dict)
models.TextField.__init__(self, *args, **kwargs)
def get_default(self):
if self.has_default():
default = self.default
if callable(default):
default = default()
return self.to_python(default)
return super().get_default()
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == "":
return {}
if isinstance(value, str):
res = loads(value)
else:
res = value
if isinstance(res, dict):
return JSONDict(**res)
elif isinstance(res, list):
return JSONList(res)
return res
def get_prep_value(self, value):
if not isinstance(value, str):
return dumps(value)
return super(models.TextField, self).get_prep_value(value)
def from_db_value(self, value, expression, connection): # type: ignore
return self.to_python(value)
def get_db_prep_save(self, value, connection, **kwargs):
"""Convert our JSON object to a string before we save"""
if value is None and self.null:
return None
# default values come in as strings; only non-strings should be
# run through `dumps`
if (
not isinstance(value, str)
# https://github.com/django-extensions/django-extensions/issues/1924
# https://code.djangoproject.com/ticket/35167
and not isinstance(value, expressions.Expression)
):
value = dumps(value)
return super().get_db_prep_save(value, connection)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.default == "{}":
del kwargs["default"]
return name, path, args, kwargs
| JSONField |
python | falconry__falcon | examples/recipes/raw_url_path_wsgi.py | {
"start": 34,
"end": 317
} | class ____:
def process_request(self, req, resp):
raw_uri = req.env.get('RAW_URI') or req.env.get('REQUEST_URI')
# NOTE: Reconstruct the percent-encoded path from the raw URI.
if raw_uri:
req.path, _, _ = raw_uri.partition('?')
| RawPathComponent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 20117,
"end": 20538
} | class ____(sgqlc.types.Enum):
"""The possible values for the members can make purchases setting.
Enumeration Choices:
* `DISABLED`: The setting is disabled for organizations in the
enterprise.
* `ENABLED`: The setting is enabled for organizations in the
enterprise.
"""
__schema__ = github_schema
__choices__ = ("DISABLED", "ENABLED")
| EnterpriseMembersCanMakePurchasesSettingValue |
python | PrefectHQ__prefect | tests/events/server/storage/test_database.py | {
"start": 2190,
"end": 5970
} | class ____:
async def test_write_event(self, session: AsyncSession, event: ReceivedEvent):
# Write the event
async with session as session:
await write_events(session=session, events=[event])
await session.commit()
# Read it back
async with session as session:
events = await read_events(
session=session,
events_filter=EventFilter(
id=EventIDFilter(id=[event.id]),
occurred=EventOccurredFilter(
since=now("UTC") - datetime.timedelta(days=1)
),
),
)
assert len(events) == 1
assert events[0].id == event.id
async def test_write_event_ignores_duplicates(
self,
session: AsyncSession,
db: PrefectDBInterface,
event: ReceivedEvent,
other_events: List[ReceivedEvent],
):
assert len(other_events) == 1000
chunks = (other_events[:500], other_events[500:])
for chunk in chunks:
# Include the event twice in the middle of a batch of other events
events = chunk[:250] + [event] + chunk[250:]
assert len(events) == 501
async with session as session:
await write_events(session=session, events=events)
await session.commit()
# Confirm only one event and one set of event_resources was written. This uses
# the ORM directly to avoid JOINs, DISTINCTs, or .unique() that might be used
# by the read_events function and distort the test.
async with session as session:
results = await session.execute(
sa.select(db.Event).where(db.Event.id == event.id)
)
assert len(list(results)) == 1
results = await session.execute(
sa.select(db.EventResource).where(db.EventResource.event_id == event.id)
)
assert len(list(results)) == len(event.related) + 1
async def test_write_events_writes_in_chunks(
self,
session: AsyncSession,
db: PrefectDBInterface,
event: ReceivedEvent,
other_events: List[ReceivedEvent],
):
total_events = len(other_events)
total_resources = sum(len(e.involved_resources) for e in other_events)
# Confirm that our test setup is actually testing what we think it it
assert total_events == 1000
assert total_resources == 4000
total_parameters = (total_events * get_number_of_event_fields()) + (
total_resources * get_number_of_resource_fields()
)
assert total_parameters > get_max_query_parameters()
async with session as session:
await write_events(session=session, events=other_events)
await session.commit()
events_to_check = other_events[0], other_events[250], other_events[-1]
# Confirm only one event and one set of event_resources was written. This uses
# the ORM directly to avoid JOINs, DISTINCTs, or .unique() that might be used
# by the read_postgres_events function and distort the test.
async with session as session:
for event in events_to_check:
results = await session.execute(
sa.select(db.Event).where(db.Event.id == event.id)
)
assert len(list(results)) == 1
results = await session.execute(
sa.select(db.EventResource).where(
db.EventResource.event_id == event.id
)
)
assert len(list(results)) == len(event.related) + 1
| TestWriteEvents |
python | psf__requests | src/requests/auth.py | {
"start": 2045,
"end": 2220
} | class ____:
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError("Auth hooks must be callable.")
| AuthBase |
python | nedbat__coveragepy | tests/test_bytecode.py | {
"start": 408,
"end": 1493
} | class ____(CoverageTest):
"""Tests for bytecode.py"""
def test_code_objects(self) -> None:
code = compile(
dedent("""\
def f(x):
def g(y):
return {z for z in range(10)}
def j():
return [z for z in range(10)]
return g(x)
def h(x):
return x+1
"""),
"<string>",
"exec",
)
objs = list(code_objects(code))
assert code in objs
expected = {"<module>", "f", "g", "j", "h"}
if env.PYVERSION < (3, 12):
# Comprehensions were compiled as implicit functions in earlier
# versions of Python.
expected.update({"<setcomp>", "<listcomp>"})
assert {c.co_name for c in objs} == expected
def test_op_set(self) -> None:
opcodes = op_set("LOAD_CONST", "NON_EXISTENT_OPCODE", "RETURN_VALUE")
assert opcodes == {dis.opmap["LOAD_CONST"], dis.opmap["RETURN_VALUE"]}
| BytecodeTest |
python | huggingface__transformers | src/transformers/models/led/configuration_led.py | {
"start": 853,
"end": 7437
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LED
[allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LEDModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_encoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the encoder might ever be used with.
max_decoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the decoder might ever be used with.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
Example:
```python
>>> from transformers import LEDModel, LEDConfig
>>> # Initializing a LED allenai/led-base-16384 style configuration
>>> configuration = LEDConfig()
>>> # Initializing a model from the allenai/led-base-16384 style configuration
>>> model = LEDModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "led"
attribute_map = {
"num_attention_heads": "encoder_attention_heads",
"hidden_size": "d_model",
"attention_probs_dropout_prob": "attention_dropout",
"initializer_range": "init_std",
}
def __init__(
self,
vocab_size=50265,
max_encoder_position_embeddings=16384,
max_decoder_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
classifier_dropout=0.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
attention_window: Union[list[int], int] = 512,
**kwargs,
):
self.vocab_size = vocab_size
self.max_encoder_position_embeddings = max_encoder_position_embeddings
self.max_decoder_position_embeddings = max_decoder_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.attention_window = attention_window
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["LEDConfig"]
| LEDConfig |
python | kamyu104__LeetCode-Solutions | Python/convert-sorted-array-to-binary-search-tree.py | {
"start": 1467,
"end": 2001
} | class ____(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.iterator = iter(nums)
return self.helper(0, len(nums))
def helper(self, start, end):
if start == end:
return None
mid = (start + end) // 2
left = self.helper(start, mid)
current = TreeNode(next(self.iterator))
current.left = left
current.right = self.helper(mid+1, end)
return current
| Solution2 |
python | dask__dask | dask/tests/test_tokenize.py | {
"start": 26377,
"end": 26431
} | class ____:
a: int
@dataclasses.dataclass
| ADataClass |
python | joke2k__faker | faker/providers/phone_number/tw_GH/__init__.py | {
"start": 49,
"end": 578
} | class ____(PhoneNumberProvider):
formats = (
"+23327#######",
"+23357#######",
"+23355#######",
"+23324#######",
"+23354#######",
"+23320#######",
"+23350#######",
"+23326#######",
"+23356#######",
"+23328#######",
"024#######",
"054#######",
"055#######",
"027#######",
"057#######",
"020#######",
"050#######",
"026#######",
"056#######",
"028#######",
)
| Provider |
python | scrapy__scrapy | tests/test_squeues_request.py | {
"start": 3848,
"end": 4115
} | class ____(TestRequestQueueBase):
is_fifo = True
@pytest.fixture
def q(self, crawler, tmp_path):
return MarshalFifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "marshal" / "fifo")
)
| TestMarshalFifoDiskQueueRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter2.py | {
"start": 406,
"end": 1383
} | class ____(ModelBase):
asymmetric: int = model_field(converter=converter_simple)
symmetric: str | int = model_field(converter=converter_passThru)
dc1 = DC1("1", 1)
reveal_type(dc1.asymmetric, expected_text="int")
dc1.asymmetric = "2"
reveal_type(
dc1.asymmetric, expected_text="int"
) # Asymmetric -- type narrowing should not occur
# This should generate an error because only strs can be assigned to field0.
dc1.asymmetric = 2
reveal_type(dc1.symmetric, expected_text="str | int")
dc1.symmetric = "1"
reveal_type(
dc1.symmetric, expected_text="Literal['1']"
) # Symmetric -- type narrowing should occur
reveal_type(DC1.asymmetric, expected_text="int")
DC1.asymmetric = "2"
reveal_type(DC1.asymmetric, expected_text="int")
# This should generate an error because only strs can be assigned to field0.
DC1.asymmetric = 2
reveal_type(DC1.symmetric, expected_text="str | int")
DC1.symmetric = "1"
reveal_type(DC1.symmetric, expected_text="Literal['1']")
| DC1 |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_eks.py | {
"start": 8119,
"end": 52690
} | class ____:
def test_hook(self, cluster_builder) -> None:
eks_hook, _ = cluster_builder()
assert eks_hook.get_conn() is not None
assert eks_hook.aws_conn_id == DEFAULT_CONN_ID
assert eks_hook.region_name == REGION
###
# This specific test does not use the fixture since
# it is intended to verify that there are no clusters
# in the list at initialization, which means the mock
# decorator must be used manually in this one case.
###
@mock_aws
def test_list_clusters_returns_empty_by_default(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
result: list = eks_hook.list_clusters()
assert isinstance(result, list)
assert len(result) == 0
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_list_clusters_returns_sorted_cluster_names(
self, cluster_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.cluster_names)
result: list = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.LARGE])
def test_list_clusters_returns_all_results(self, cluster_builder, initial_batch_size: int) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.cluster_names)
result: list = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_create_cluster_throws_exception_when_cluster_exists(
self, cluster_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_EXISTS_MSG.format(
clusterName=generated_test_data.existing_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_cluster(
name=generated_test_data.existing_cluster_name,
**dict(ClusterInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new cluster was created.
len_after_test: int = len(eks_hook.list_clusters())
assert len_after_test == initial_batch_size
def test_create_cluster_generates_valid_cluster_arn(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_names,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=generated_test_data.cluster_describe_output[ClusterAttributes.ARN],
)
@time_machine.travel(FROZEN_TIME, tick=False)
def test_create_cluster_generates_valid_cluster_created_timestamp(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_time: datetime = generated_test_data.cluster_describe_output[ClusterAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_cluster_generates_valid_cluster_endpoint(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_endpoint: str = generated_test_data.cluster_describe_output[ClusterAttributes.ENDPOINT]
assert_is_valid_uri(result_endpoint)
def test_create_cluster_generates_valid_oidc_identity(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_issuer: str = generated_test_data.cluster_describe_output[ClusterAttributes.IDENTITY][
ClusterAttributes.OIDC
][ClusterAttributes.ISSUER]
assert_is_valid_uri(result_issuer)
def test_create_cluster_saves_provided_parameters(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.cluster_describe_output[key] == expected_value
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_describe_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_cluster_returns_deleted_cluster(self, cluster_builder, initial_batch_size: int) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_cluster_removes_deleted_cluster(self, cluster_builder, initial_batch_size: int) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)
result_cluster_list: list = eks_hook.list_clusters()
assert len(result_cluster_list) == (initial_batch_size - 1)
assert generated_test_data.existing_cluster_name not in result_cluster_list
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify nothing was deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == initial_batch_size
def test_list_nodegroups_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_list_nodegroups_returns_sorted_nodegroup_names(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.nodegroup_names)
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.LARGE])
def test_list_nodegroups_returns_all_results(self, nodegroup_builder, initial_batch_size: int) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.nodegroup_names)
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_aws
def test_create_nodegroup_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=non_existent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=non_existent_cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_create_nodegroup_throws_exception_when_nodegroup_already_exists(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = NODEGROUP_EXISTS_MSG.format(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_create_nodegroup_throws_exception_when_cluster_not_active(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.is_active", new_callable=PropertyMock(return_value=False)):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_generates_valid_nodegroup_arn(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.nodegroup_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=generated_test_data.nodegroup_describe_output[NodegroupAttributes.ARN],
)
@time_machine.travel(FROZEN_TIME, tick=False)
def test_create_nodegroup_generates_valid_nodegroup_created_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
@time_machine.travel(FROZEN_TIME, tick=False)
def test_create_nodegroup_generates_valid_nodegroup_modified_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.MODIFIED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_nodegroup_generates_valid_autoscaling_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_asg_name: str = result_resources[NodegroupAttributes.AUTOSCALING_GROUPS][0][
NodegroupAttributes.NAME
]
assert RegExTemplates.NODEGROUP_ASG_NAME_PATTERN.match(result_asg_name)
def test_create_nodegroup_generates_valid_security_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_security_group: str = result_resources[NodegroupAttributes.REMOTE_ACCESS_SG]
assert RegExTemplates.NODEGROUP_SECURITY_GROUP_NAME_PATTERN.match(result_security_group)
def test_create_nodegroup_saves_provided_parameters(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.nodegroup_describe_output[key] == expected_value
def test_create_nodegroup_without_tags_uses_default(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
tag_list: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.TAGS]
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(
cluster_name=generated_test_data.cluster_name
)
assert tag_list.get(ownership_tag_key) == NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE
def test_create_nodegroup_with_ownership_tag_uses_provided_value(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(cluster_name=cluster_name)
provided_tag_value: str = "shared"
created_nodegroup: dict = eks_hook.create_nodegroup(
clusterName=cluster_name,
nodegroupName="nodegroup",
tags={ownership_tag_key: provided_tag_value},
**dict(deepcopy(NodegroupInputs.REQUIRED)),
)[ResponseAttributes.NODEGROUP]
tags = created_nodegroup.get(NodegroupAttributes.TAGS)
assert tags is not None
assert tags.get(ownership_tag_key) == provided_tag_value
def test_describe_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_nodegroup_throws_exception_when_nodegroup_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_throws_exception_when_nodegroups_exist(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_IN_USE_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no clusters were deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == BatchCountSize.SINGLE
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_nodegroup_removes_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
result_nodegroup_list: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert len(result_nodegroup_list) == (initial_batch_size - 1)
assert generated_test_data.existing_nodegroup_name not in result_nodegroup_list
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_nodegroup_returns_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)[ResponseAttributes.NODEGROUP]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_nodegroup_throws_exception_when_nodegroup_not_found(
self, nodegroup_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test: int = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
# If launch_template is specified, you can not specify instanceTypes, diskSize, or remoteAccess.
test_cases = [
# Happy Paths
(LAUNCH_TEMPLATE, None, None, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, None, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, None, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, None, None, PossibleTestResults.SUCCESS),
# Unhappy Paths
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
]
@pytest.mark.parametrize(
("launch_template", "instance_types", "disk_size", "remote_access", "expected_result"),
test_cases,
)
def test_create_nodegroup_handles_launch_template_combinations(
self,
cluster_builder,
launch_template,
instance_types,
disk_size,
remote_access,
expected_result,
):
eks_hook, generated_test_data = cluster_builder()
nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = InvalidParameterException
expected_message: str = ""
test_inputs = dict(
deepcopy(
# Required Constants
NodegroupInputs.REQUIRED
# Required Variables
+ [
(
ClusterAttributes.CLUSTER_NAME,
generated_test_data.existing_cluster_name,
),
(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name),
]
# Test Case Values
+ [_ for _ in [launch_template, instance_types, disk_size, remote_access] if _]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: dict = eks_hook.create_nodegroup(**test_inputs)[ResponseAttributes.NODEGROUP]
expected_output = deepcopy(test_inputs)
# The Create Nodegroup hook magically adds the required
# cluster/owned tag, so add that to the expected outputs.
expected_output["tags"] = {
f"kubernetes.io/cluster/{generated_test_data.existing_cluster_name}": "owned"
}
for key, expected_value in expected_output.items():
assert result[key] == expected_value
else:
if launch_template and disk_size:
expected_message = LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG
elif launch_template and remote_access:
expected_message = LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG
# Docs say this combination throws an exception but testing shows that
# instanceTypes overrides the launchTemplate instance values instead.
# Leaving here for easier correction if/when that gets fixed.
elif launch_template and instance_types:
pass
if expected_message:
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
def test_list_fargate_profiles_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_list_fargate_profiles_returns_sorted_profile_names(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.fargate_profile_names)
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.LARGE])
def test_list_fargate_profiles_returns_all_results(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.fargate_profile_names)
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_aws
def test_create_fargate_profile_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(clusterName=non_existent_cluster_name)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=non_existent_cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_create_fargate_profile_throws_exception_when_fargate_profile_already_exists(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = FARGATE_PROFILE_EXISTS_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_create_fargate_profile_throws_exception_when_cluster_not_active(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.is_active", new_callable=PropertyMock(return_value=False)):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_generates_valid_profile_arn(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.fargate_profile_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.FARGATE_PROFILE_ARN,
arn_under_test=generated_test_data.fargate_describe_output[FargateProfileAttributes.ARN],
)
@time_machine.travel(FROZEN_TIME, tick=False)
def test_create_fargate_profile_generates_valid_created_timestamp(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
result_time: datetime = generated_test_data.fargate_describe_output[
FargateProfileAttributes.CREATED_AT
]
assert iso_date(result_time) == FROZEN_TIME
def test_create_fargate_profile_saves_provided_parameters(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.fargate_describe_output[key] == expected_value
def test_describe_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_fargate_profile_throws_exception_when_profile_not_found(
self, fargate_profile_builder
) -> None:
client, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
client.describe_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_fargate_profile_removes_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(initial_batch_size)
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
result_fargate_profile_list: list = eks_hook.list_fargate_profiles(
clusterName=generated_test_data.cluster_name
)
assert len(result_fargate_profile_list) == (initial_batch_size - 1)
assert generated_test_data.existing_fargate_profile_name not in result_fargate_profile_list
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_fargate_profile_returns_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
@pytest.mark.parametrize("initial_batch_size", [BatchCountSize.SMALL])
def test_delete_fargate_profile_throws_exception_when_fargate_profile_not_found(
self, fargate_profile_builder, initial_batch_size: int
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
# The following Selector test cases have all been verified against the AWS API using cURL.
selector_formatting_test_cases = [
# Format is ([Selector(s), expected_message, expected_result])
# Happy Paths
# Selector with a Namespace and no Labels
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and an empty collection of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 0),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and one valid Label
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 1),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and the maximum number of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Two valid Selectors
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{FargateProfileAttributes.NAMESPACE: f"{DEFAULT_NAMESPACE}_2"},
],
None,
PossibleTestResults.SUCCESS,
),
# Unhappy Cases
# No Selectors provided
([], FARGATE_PROFILE_NEEDS_SELECTOR_MSG, PossibleTestResults.FAILURE),
# Empty Selector / Selector without a Namespace or Labels
([{}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Selector with labels but no Namespace
(
[{FargateProfileAttributes.LABELS: generate_dict("label", 1)}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Selector with Namespace but too many Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
}
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
# Valid Selector followed by Empty Selector
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}, {}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Valid Selector
(
[{}, {FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Empty Selector
([{}, {}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Valid Selector followed by Selector with Namespace but too many Labels
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
},
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
]
@pytest.mark.parametrize(
("selectors", "expected_message", "expected_result"),
selector_formatting_test_cases,
)
@mock_aws
def test_create_fargate_selectors(self, cluster_builder, selectors, expected_message, expected_result):
client, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = InvalidParameterException
test_inputs = dict(
deepcopy(
[
# Required Constants
POD_EXECUTION_ROLE_ARN,
# Required Variables
(ClusterAttributes.CLUSTER_NAME, cluster_name),
(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name),
# Test Case Values
(FargateProfileAttributes.SELECTORS, selectors),
]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: list = client.create_fargate_profile(**test_inputs)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
with pytest.raises(ClientError) as raised_exception:
client.create_fargate_profile(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
| TestEksHooks |
python | huggingface__transformers | src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py | {
"start": 1524,
"end": 2026
} | class ____(ImagesKwargs, total=False):
"""
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
do_reduce_labels: bool
@requires(backends=("vision",))
| MobileNetV2ImageProcessorKwargs |
python | wandb__wandb | wandb/vendor/pygments/lexers/parsers.py | {
"start": 10765,
"end": 11216
} | class ____(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: java' in text
| RagelJavaLexer |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/snapshot.py | {
"start": 1192,
"end": 12308
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A Dataset that captures a snapshot or reads from a snapshot."""
def __init__(self,
input_dataset,
path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None,
shuffle_on_read=None,
shuffle_seed=None,
mode=None,
snapshot_name=None):
self._compression = compression if compression is not None else ""
self._reader_path_prefix = (
reader_path_prefix if reader_path_prefix is not None else "")
self._writer_path_prefix = (
writer_path_prefix if writer_path_prefix is not None else "")
self._shard_size_bytes = (
shard_size_bytes if shard_size_bytes is not None else -1)
self._pending_snapshot_expiry_seconds = (
pending_snapshot_expiry_seconds
if pending_snapshot_expiry_seconds is not None else -1)
self._num_reader_threads = (
num_reader_threads if num_reader_threads is not None else -1)
self._reader_buffer_size = (
reader_buffer_size if reader_buffer_size is not None else -1)
self._num_writer_threads = (
num_writer_threads if num_writer_threads is not None else -1)
self._writer_buffer_size = (
writer_buffer_size if writer_buffer_size is not None else -1)
self._shuffle_on_read = (
shuffle_on_read if shuffle_on_read is not None else False)
self._mode = (mode if mode is not None else "auto")
self._snapshot_name = (snapshot_name if snapshot_name is not None else "")
self._seed, self._seed2 = random_seed.get_seed(shuffle_seed)
self._input_dataset = input_dataset
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
variant_tensor = ged_ops.snapshot_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
path=self._path,
compression=self._compression,
reader_path_prefix=self._reader_path_prefix,
writer_path_prefix=self._writer_path_prefix,
shard_size_bytes=self._shard_size_bytes,
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
num_reader_threads=self._num_reader_threads,
reader_buffer_size=self._reader_buffer_size,
num_writer_threads=self._num_writer_threads,
writer_buffer_size=self._writer_buffer_size,
shuffle_on_read=self._shuffle_on_read,
seed=self._seed,
seed2=self._seed2,
mode=self._mode,
snapshot_name=self._snapshot_name,
**self._flat_structure)
super(_LegacySnapshotDataset, self).__init__(input_dataset, variant_tensor)
@deprecation.deprecated(None, "Use `tf.data.Dataset.snapshot(...)` instead.")
def legacy_snapshot(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None,
shuffle_on_read=None,
shuffle_seed=None,
mode=None,
snapshot_name=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before the
snapshot op considers a previously unfinished snapshot to be stale.
num_reader_threads: Number of threads to parallelize reading from snapshot.
Especially useful if compression is turned on since the decompression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are read
from the snapshot are different from the order they're written.
reader_buffer_size: Maximum number of elements we can prefetch reading from
the snapshot. Defaults to 1. Increasing this might improve performance but
will increase memory consumption.
num_writer_threads: Number of threads to parallelize writing from snapshot.
We'll open up `num_writer_threads` files and write to them in parallel.
Especially useful if compression is turned on since the compression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are read
from the upstream iterator are different from the order they're written.
writer_buffer_size: Maximum number of pipeline elements to fill up the
buffer before writing them out using `num_writer_threads`.
shuffle_on_read: If this is True, then the order in which examples are
produced when reading from a snapshot will be random. Defaults to False.
shuffle_seed: Optional. If shuffle_seed is set, the random number generator
used for shuffling (when shuffle_on_read is turned on) is seeded by the
given seed. Otherwise, it is seeded by a random seed that differs for
every run.
mode: The mode at which snapshot should operate. Valid options are "auto",
"read", "write", and "passthrough". The default mode is "auto", where the
snapshot op will automatically determine what mode to operate in.
snapshot_name: If set, use the supplied string as a named snapshot name
instead of introspecting the data pipeline and automatically generating a
unique identifier for the snapshot.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _LegacySnapshotDataset(
input_dataset=dataset,
path=path,
compression=compression,
reader_path_prefix=reader_path_prefix,
writer_path_prefix=writer_path_prefix,
shard_size_bytes=shard_size_bytes,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
num_reader_threads=num_reader_threads,
reader_buffer_size=reader_buffer_size,
num_writer_threads=num_writer_threads,
writer_buffer_size=writer_buffer_size,
shuffle_on_read=shuffle_on_read,
shuffle_seed=shuffle_seed,
mode=mode,
snapshot_name=snapshot_name)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.Dataset.snapshot(...)`.")
@tf_export("data.experimental.snapshot")
def snapshot(path, compression="AUTO", reader_func=None, shard_func=None):
"""API to persist the output of the input dataset.
The snapshot API allows users to transparently persist the output of their
preprocessing pipeline to disk, and materialize the pre-processed data on a
different training run.
This API enables repeated preprocessing steps to be consolidated, and allows
re-use of already processed data, trading off disk storage and network
bandwidth for freeing up more valuable CPU resources and accelerator compute
time.
https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md
has detailed design documentation of this feature.
Users can specify various options to control the behavior of snapshot,
including how snapshots are read from and written to by passing in
user-defined functions to the `reader_func` and `shard_func` parameters.
`shard_func` is a user specified function that maps input elements to snapshot
shards.
Users may want to specify this function to control how snapshot files should
be written to disk. Below is an example of how a potential shard_func could
be written.
```python
dataset = ...
dataset = dataset.enumerate()
dataset = dataset.apply(tf.data.Dataset.snapshot("/path/to/snapshot/dir",
shard_func=lambda x, y: x % NUM_SHARDS, ...))
dataset = dataset.map(lambda x, y: y)
```
`reader_func` is a user specified function that accepts a single argument:
(1) a Dataset of Datasets, each representing a "split" of elements of the
original dataset. The cardinality of the input dataset matches the
number of the shards specified in the `shard_func` (see above). The function
should return a Dataset of elements of the original dataset.
Users may want specify this function to control how snapshot files should be
read from disk, including the amount of shuffling and parallelism.
Here is an example of a standard reader function a user can define. This
function enables both dataset shuffling and parallel reading of datasets:
```python
def user_reader_func(datasets):
# shuffle the datasets splits
datasets = datasets.shuffle(NUM_CORES)
# read datasets in parallel and interleave their elements
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = dataset.apply(tf.data.Dataset.snapshot("/path/to/snapshot/dir",
reader_func=user_reader_func))
```
By default, snapshot parallelizes reads by the number of cores available on
the system, but will not attempt to shuffle the data.
Args:
path: Required. A directory to use for storing / loading the snapshot to /
from.
compression: Optional. The type of compression to apply to the snapshot
written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None.
Defaults to AUTO, which attempts to pick an appropriate compression
algorithm for the dataset.
reader_func: Optional. A function to control how to read data from snapshot
shards.
shard_func: Optional. A function to control how to shard data when writing a
snapshot.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Actual dataset transformation."""
return dataset.snapshot(
path=path,
compression=compression,
reader_func=reader_func,
shard_func=shard_func)
return _apply_fn
| _LegacySnapshotDataset |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 3885,
"end": 4098
} | class ____:
def m[S](self: S, other: S) -> S:
x: S = other
return x
@classmethod
def n[S](cls: type[S], other: S) -> S:
x: type[S] = type(other)
return x()
| MethodsWithBody |
python | fastai__fastai | fastai/vision/core.py | {
"start": 5678,
"end": 6034
} | class ____(Transform):
"Add the code metadata to a `TensorMask`"
def __init__(self, codes=None):
self.codes = codes
if codes is not None: self.vocab,self.c = codes,len(codes)
def decodes(self, o:TensorMask):
if self.codes is not None: o.codes=self.codes
return o
# %% ../../nbs/07_vision.core.ipynb 60
| AddMaskCodes |
python | pallets__flask | src/flask/config.py | {
"start": 257,
"end": 1094
} | class ____(t.Generic[T]):
"""Makes an attribute forward to the config"""
def __init__(
self, name: str, get_converter: t.Callable[[t.Any], T] | None = None
) -> None:
self.__name__ = name
self.get_converter = get_converter
@t.overload
def __get__(self, obj: None, owner: None) -> te.Self: ...
@t.overload
def __get__(self, obj: App, owner: type[App]) -> T: ...
def __get__(self, obj: App | None, owner: type[App] | None = None) -> T | te.Self:
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv # type: ignore[no-any-return]
def __set__(self, obj: App, value: t.Any) -> None:
obj.config[self.__name__] = value
| ConfigAttribute |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 17491,
"end": 17968
} | class ____(LocalizableStreamlitException):
"""Exception raised when data provided to a bidirectional component cannot be serialized."""
def __init__(self) -> None:
super().__init__(
"The `data` provided to the bidirectional component could not be serialized. "
"Please ensure the data is JSON-serializable, or is a supported data structure "
"like a pandas DataFrame."
)
# policies
| BidiComponentUnserializableDataError |
python | plotly__plotly.py | plotly/graph_objs/pie/marker/_pattern.py | {
"start": 233,
"end": 15270
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "pie.marker"
_path_str = "pie.marker.pattern"
_valid_props = {
"bgcolor",
"bgcolorsrc",
"fgcolor",
"fgcolorsrc",
"fgopacity",
"fillmode",
"path",
"pathsrc",
"shape",
"shapesrc",
"size",
"sizesrc",
"solidity",
"soliditysrc",
}
@property
def bgcolor(self):
"""
When there is no colorscale sets the color of background
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "overlay". Otherwise, defaults to a transparent
background.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def fgcolor(self):
"""
When there is no colorscale sets the color of foreground
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "replace". Otherwise, defaults to dark grey or
white to increase contrast with the `bgcolor`.
The 'fgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["fgcolor"]
@fgcolor.setter
def fgcolor(self, val):
self["fgcolor"] = val
@property
def fgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `fgcolor`.
The 'fgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["fgcolorsrc"]
@fgcolorsrc.setter
def fgcolorsrc(self, val):
self["fgcolorsrc"] = val
@property
def fgopacity(self):
"""
Sets the opacity of the foreground pattern fill. Defaults to a
0.5 when `fillmode` is "overlay". Otherwise, defaults to 1.
The 'fgopacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fgopacity"]
@fgopacity.setter
def fgopacity(self, val):
self["fgopacity"] = val
@property
def fillmode(self):
"""
Determines whether `marker.color` should be used as a default
to `bgcolor` or a `fgcolor`.
The 'fillmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['replace', 'overlay']
Returns
-------
Any
"""
return self["fillmode"]
@fillmode.setter
def fillmode(self, val):
self["fillmode"] = val
@property
def path(self):
"""
Sets a custom path for pattern fill. Use with no `shape` or
`solidity`, provide an SVG path string for the regions of the
square from (0,0) to (`size`,`size`) to color.
The 'path' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["path"]
@path.setter
def path(self, val):
self["path"] = val
@property
def pathsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `path`.
The 'pathsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["pathsrc"]
@pathsrc.setter
def pathsrc(self, val):
self["pathsrc"] = val
@property
def shape(self):
"""
Sets the shape of the pattern fill. By default, no pattern is
used for filling the area.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', '/', '\\', 'x', '-', '|', '+', '.']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
@property
def shapesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shape`.
The 'shapesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shapesrc"]
@shapesrc.setter
def shapesrc(self, val):
self["shapesrc"] = val
@property
def size(self):
"""
Sets the size of unit squares of the pattern fill in pixels,
which corresponds to the interval of repetition of the pattern.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def solidity(self):
"""
Sets the solidity of the pattern fill. Solidity is roughly the
fraction of the area filled by the pattern. Solidity of 0 shows
only the background color without pattern and solidty of 1
shows only the foreground color without pattern.
The 'solidity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["solidity"]
@solidity.setter
def solidity(self, val):
self["solidity"] = val
@property
def soliditysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `solidity`.
The 'soliditysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["soliditysrc"]
@soliditysrc.setter
def soliditysrc(self, val):
self["soliditysrc"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
path
Sets a custom path for pattern fill. Use with no
`shape` or `solidity`, provide an SVG path string for
the regions of the square from (0,0) to (`size`,`size`)
to color.
pathsrc
Sets the source reference on Chart Studio Cloud for
`path`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bgcolorsrc=None,
fgcolor=None,
fgcolorsrc=None,
fgopacity=None,
fillmode=None,
path=None,
pathsrc=None,
shape=None,
shapesrc=None,
size=None,
sizesrc=None,
solidity=None,
soliditysrc=None,
**kwargs,
):
"""
Construct a new Pattern object
Sets the pattern within the marker.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.pie.marker.Pattern`
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
path
Sets a custom path for pattern fill. Use with no
`shape` or `solidity`, provide an SVG path string for
the regions of the square from (0,0) to (`size`,`size`)
to color.
pathsrc
Sets the source reference on Chart Studio Cloud for
`path`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
Returns
-------
Pattern
"""
super().__init__("pattern")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.pie.marker.Pattern
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.marker.Pattern`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("fgcolor", arg, fgcolor)
self._set_property("fgcolorsrc", arg, fgcolorsrc)
self._set_property("fgopacity", arg, fgopacity)
self._set_property("fillmode", arg, fillmode)
self._set_property("path", arg, path)
self._set_property("pathsrc", arg, pathsrc)
self._set_property("shape", arg, shape)
self._set_property("shapesrc", arg, shapesrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("solidity", arg, solidity)
self._set_property("soliditysrc", arg, soliditysrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Pattern |
python | vyperlang__vyper | vyper/venom/basicblock.py | {
"start": 2420,
"end": 2874
} | class ____:
"""
IRDebugInfo represents debug information in IR, used to annotate IR
instructions with source code information when printing IR.
"""
line_no: int
src: str
def __init__(self, line_no: int, src: str) -> None:
self.line_no = line_no
self.src = src
def __repr__(self) -> str:
src = self.src if self.src else ""
return f"\t; line {self.line_no}: {src}".expandtabs(20)
| IRDebugInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1501788,
"end": 1503787
} | class ____(sgqlc.types.Type, Node):
"""Represents a commit status."""
__schema__ = github_schema
__field_names__ = ("combined_contexts", "commit", "context", "contexts", "state")
combined_contexts = sgqlc.types.Field(
sgqlc.types.non_null(StatusCheckRollupContextConnection),
graphql_name="combinedContexts",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of status contexts and check runs for this commit.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
commit = sgqlc.types.Field(Commit, graphql_name="commit")
"""The commit this status is attached to."""
context = sgqlc.types.Field(
"StatusContext",
graphql_name="context",
args=sgqlc.types.ArgDict((("name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="name", default=None)),)),
)
"""Looks up an individual status context by context name.
Arguments:
* `name` (`String!`): The context name.
"""
contexts = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("StatusContext"))), graphql_name="contexts")
"""The individual status contexts for this commit."""
state = sgqlc.types.Field(sgqlc.types.non_null(StatusState), graphql_name="state")
"""The combined commit status."""
| Status |
python | django__django | tests/decorators/test_csrf.py | {
"start": 2330,
"end": 4040
} | class ____(CsrfTestMixin, SimpleTestCase):
def test_wrapped_sync_function_is_not_coroutine_function(self):
def sync_view(request):
return HttpResponse()
wrapped_view = requires_csrf_token(sync_view)
self.assertIs(iscoroutinefunction(wrapped_view), False)
def test_wrapped_async_function_is_coroutine_function(self):
async def async_view(request):
return HttpResponse()
wrapped_view = requires_csrf_token(async_view)
self.assertIs(iscoroutinefunction(wrapped_view), True)
def test_requires_csrf_token_decorator(self):
@requires_csrf_token
def sync_view(request):
return HttpResponse()
request = self.get_request()
response = sync_view(request)
self.assertEqual(response.status_code, 200)
self.assertIs(request.csrf_processing_done, True)
with self.assertNoLogs("django.security.csrf", "WARNING"):
request = self.get_request(token=None)
response = sync_view(request)
self.assertEqual(response.status_code, 200)
async def test_requires_csrf_token_decorator_async_view(self):
@requires_csrf_token
async def async_view(request):
return HttpResponse()
request = self.get_request()
response = await async_view(request)
self.assertEqual(response.status_code, 200)
self.assertIs(request.csrf_processing_done, True)
with self.assertNoLogs("django.security.csrf", "WARNING"):
request = self.get_request(token=None)
response = await async_view(request)
self.assertEqual(response.status_code, 200)
| RequiresCsrfTokenTests |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py | {
"start": 13502,
"end": 13796
} | class ____(ImportError):
def __init__(self, service, *args, **kwargs):
msg = (
f"To use {service.title()}TargetConfigs, "
f'execute `pip install "prefect-dbt[{service.lower()}]"`'
)
super().__init__(msg, *args, **kwargs)
| MissingExtrasRequireError |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 42396,
"end": 42851
} | class ____:
def __init__(self, value=None):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if isinstance(other, EqOverride):
return self.value == other.value
else:
return False
def __ne__(self, other):
if isinstance(other, EqOverride):
return self.value != other.value
else:
return True
| HashEqOverride |
python | huggingface__transformers | examples/metrics-monitoring/metrics_example.py | {
"start": 139,
"end": 1551
} | class ____:
def __init__(self, name):
# The attach_tracer decorator has already created self.tracer for us
self.name = name
@traced # This method will use the tracer from the class instance
def process_data(self, data):
# This method is traced and can use self.tracer
return f"Processed {data} with {self.name}"
@traced(span_name="custom_operation") # With custom span name
def special_operation(self, value):
# Also traced, with a custom span name
return value * 2
@traced(
additional_attributes=[
("name", "object.name", lambda x: x.upper()), # Using a transform function
("name", "object.fixed_value", "static_value"), # Using a fixed value
]
)
def operation_with_attributes(self):
# This will add the specified attributes to the span
return "Operation completed"
# For functions without a class, the traced decorator still works
@traced
def standalone_function(arg1, arg2):
# For functions, a tracer is created based on the module name
return arg1 + arg2
# Usage:
if __name__ == "__main__":
# With OpenTelemetry configured, these will produce traces
example = ExampleClass("test_object")
example.process_data("sample")
example.special_operation(42)
example.operation_with_attributes()
result = standalone_function(1, 2)
| ExampleClass |
python | pytorch__pytorch | torch/distributed/_symmetric_memory/__init__.py | {
"start": 18782,
"end": 57158
} | class ____(Enum):
UNSCALED = "unscaled"
TENSOR_WISE = "tensor-wise"
ROW_WISE_SHARDED = "row-wise-sharded"
ROW_WISE_REPLICATED = "row-wise-replicated"
def _check_and_verify_fp8_all_gather_scale_mode(
shard: torch.Tensor, scale: torch.Tensor | None, gather_dim: int, group_size: int
) -> _ScaleMode:
full_shape = list(shard.shape)
full_shape[gather_dim] *= group_size
if scale is None:
return _ScaleMode.UNSCALED
elif scale.shape[:-1] == shard.shape[:-1] and scale.shape[-1] == 1:
# Row-wise scaling
#
# NOTE: when the last dim of both A_shard and A_scale is one, we can't
# tell if A_scale is replicated tensor-wise scale or sharded row-wise
# scale. Treating it as row-wise scaling for safety.
return _ScaleMode.ROW_WISE_SHARDED
elif scale.numel() == 1:
return _ScaleMode.TENSOR_WISE
elif list(scale.shape[:-1]) == full_shape[:-1]:
return _ScaleMode.ROW_WISE_REPLICATED
else:
raise ValueError(
"Invalid scale shape for fp8 all-gather "
f"(shard shape: {shard.shape}, scale shape: {scale.shape})"
)
def _fused_all_gather_matmul_impl(
mm_out_op: torch._ops.OpOverload,
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
A_scale: torch.Tensor | None,
kwargs_list: list[dict[str, Any]],
out_dtypes: list[torch.dtype | None],
gather_dim: int,
group_name: str,
return_A: bool,
) -> tuple[torch.Tensor | None, list[torch.Tensor]]:
if A_shard.dim() < 2:
raise ValueError("A_shard must be a matrix")
for B in Bs:
if B.dim() != 2:
raise ValueError("B must be a matrix")
if len(out_dtypes) != len(Bs):
raise ValueError("len(out_types) must be the same as len(Bs)")
if len(kwargs_list) != len(Bs):
raise ValueError("len(kwargs_list) must be the same as len(Bs)")
if gather_dim < 0 or gather_dim >= A_shard.dim():
raise ValueError("Invalid gather_dim")
group = c10d._resolve_process_group(group_name)
if gather_dim == A_shard.ndim - 1 or gather_dim == -1:
return _fused_all_gather_matmul_last_gather_dim_impl(
mm_out_op,
A_shard,
Bs,
A_scale,
kwargs_list,
out_dtypes,
gather_dim,
group_name,
return_A,
)
# Move the gather_dim to the front and flatten the tensor into a 2D matrix.
# The flattened tensor doesn't need to be contiguous (for computation
# efficiency), as _pipelined_all_gather_and_consume guarantees that shards
# passed to shard_consumer are contiguous.
A_shard_flat = A_shard.movedim(gather_dim, 0)
leading_dims = [group.size()] + list(A_shard_flat.shape[:-1])
A_shard_flat = A_shard_flat.flatten(0, -2)
# Helper function for reverting the above transformation
def unflatten(t: torch.Tensor) -> torch.Tensor:
return t.view(*leading_dims, -1).flatten(0, 1).movedim(0, gather_dim)
A_flat = A_shard_flat.new_empty(
A_shard_flat.shape[0] * group.size(),
A_shard_flat.shape[1],
)
outputs = [
A_flat.new_empty(A_flat.shape[0], B.shape[1], dtype=out_dtype or B.dtype)
for B, out_dtype in zip(Bs, out_dtypes)
]
output_shards = [output.chunk(group.size()) for output in outputs]
scale_mode = _check_and_verify_fp8_all_gather_scale_mode(
shard=A_shard, scale=A_scale, gather_dim=gather_dim, group_size=group.size()
)
# Computing block-wise matmul along the first dim of A
if scale_mode == _ScaleMode.ROW_WISE_SHARDED:
assert A_scale is not None
A_scale_shard = A_scale.movedim(gather_dim, 0).flatten(0, -2)
A_scale_flat = A_scale_shard.new_empty(
A_scale_shard.shape[0] * group.size(),
A_scale_shard.shape[1],
)
def row_wise_sharded_consumer(shard: list[torch.Tensor], rank: int) -> None:
for idx, (B, kwargs) in enumerate(zip(Bs, kwargs_list)):
mm_out_op(
shard[0],
B,
scale_a=shard[1],
**kwargs,
out=output_shards[idx][rank],
)
_pipelined_multi_all_gather_and_consume(
[A_shard_flat, A_scale_shard],
row_wise_sharded_consumer,
[A_flat, A_scale_flat],
group_name,
return_A,
)
elif scale_mode == _ScaleMode.ROW_WISE_REPLICATED:
assert A_scale is not None
A_scale_shards = (
A_scale.movedim(gather_dim, 0).flatten(0, -2).chunk(group.size())
)
def row_wise_replicated_consumer(shard: torch.Tensor, rank: int) -> None:
for idx, (B, kwargs) in enumerate(zip(Bs, kwargs_list)):
mm_out_op(
shard,
B,
scale_a=A_scale_shards[rank],
**kwargs,
out=output_shards[idx][rank],
)
_pipelined_all_gather_and_consume(
A_shard_flat,
row_wise_replicated_consumer,
A_flat,
group_name,
return_A,
)
else:
if scale_mode == _ScaleMode.TENSOR_WISE:
assert A_scale is not None
for kwargs in kwargs_list:
kwargs["scale_a"] = A_scale
else:
assert scale_mode == _ScaleMode.UNSCALED
def default_consumer(shard: torch.Tensor, rank: int) -> None:
for idx, (B, kwargs) in enumerate(zip(Bs, kwargs_list)):
mm_out_op(shard, B, **kwargs, out=output_shards[idx][rank])
_pipelined_all_gather_and_consume(
A_shard_flat,
default_consumer,
A_flat,
group_name,
return_A,
)
A = unflatten(A_flat) if return_A else None
return A, [unflatten(output) for output in outputs]
def _pipelined_all_gather_and_consume_last_dim(
shard: torch.Tensor,
shard_consumer: Callable[[torch.Tensor, int], None],
ag_out: torch.Tensor,
group_name: str,
ag_out_needed: bool = True,
) -> None:
p2p_workspace_size_req = 0
p2p_workspace_size_req = shard.numel() * shard.element_size()
symm_mem = get_symm_mem_workspace(group_name, min_size=p2p_workspace_size_req)
group_size = symm_mem.world_size
rank = symm_mem.rank
symm_mem.barrier(channel=0)
backend_stream = _get_backend_stream()
backend_stream.wait_stream(torch.cuda.current_stream())
def copy_shard(dst: torch.Tensor, src: torch.Tensor) -> None:
dst.copy_(src)
def get_p2p_buf(remote_rank: int) -> torch.Tensor:
buf = symm_mem.get_buffer(
remote_rank,
shard.shape,
shard.dtype,
)
return buf
local_p2p_buf = get_p2p_buf(rank)
shards = ag_out.chunk(group_size)
copy_shard(dst=local_p2p_buf, src=shard)
symm_mem.barrier(channel=1)
backend_stream.wait_stream(torch.cuda.current_stream())
# At this point, all ranks have copied their local shard to
# their local p2p buffer. Each rank can now copy and consume
# remote shards.
shard_consumer(shard, rank)
for step in range(1, group_size):
if step % 2 == 0:
stream = torch.cuda.current_stream()
else:
stream = backend_stream
remote_rank = (step + rank) % group_size
remote_p2p_buf = get_p2p_buf(remote_rank)
with stream:
copy_shard(dst=shards[remote_rank], src=remote_p2p_buf)
shard_consumer(shards[remote_rank], remote_rank)
if ag_out_needed:
# Copy from input to the all-gather output. Opportunistically overlap
# it with the last shard_consumer.
if group_size % 2 == 0:
stream = torch.cuda.current_stream()
else:
stream = backend_stream
with stream:
copy_shard(dst=shards[rank], src=shard)
torch.cuda.current_stream().wait_stream(backend_stream)
symm_mem.barrier(channel=0)
def _fused_all_gather_matmul_last_gather_dim_impl(
mm_out_op: torch._ops.OpOverload,
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
A_scale: torch.Tensor | None,
kwargs_list: list[dict[str, Any]],
out_dtypes: list[torch.dtype | None],
gather_dim: int,
group_name: str,
return_A: bool,
) -> tuple[torch.Tensor | None, list[torch.Tensor]]:
group = c10d._resolve_process_group(group_name)
group_size = group.size()
B_shards = [B.chunk(group.size()) for B in Bs]
leading_dims = list(A_shard.shape[:-1])
A_shard_flat = A_shard.flatten(0, -2)
def unflatten(t: torch.Tensor) -> torch.Tensor:
return t.view(*leading_dims, -1)
A_flat_out = A_shard_flat.new_empty(
A_shard_flat.shape[0] * group.size(),
A_shard_flat.shape[1],
)
outputs = [
torch.empty(
(A_shard_flat.shape[0], B.shape[1]),
dtype=out_dtype or B.dtype,
device=A_shard.device,
)
for B, out_dtype in zip(Bs, out_dtypes)
]
first = True
events = [torch.cuda.Event() for _ in outputs]
def default_consumer(shard: torch.Tensor, rank: int) -> None:
nonlocal first
for out, event, B_shard, kwargs in zip(outputs, events, B_shards, kwargs_list):
event.wait()
if first:
torch.ops.aten.mm.out(shard, B_shard[rank], **kwargs, out=out)
else:
out.addmm_(shard, B_shard[rank])
event.record()
first = False
_pipelined_all_gather_and_consume_last_dim(
A_shard_flat,
default_consumer,
A_flat_out,
group_name,
return_A,
)
ret_A = None
if return_A:
# This path is inefficient and will be filtered out at passes stage
# Added only for completeness.
A_split_cat_out_flat = torch.cat(A_flat_out.chunk(group_size), dim=-1)
ret_A = unflatten(A_split_cat_out_flat)
return ret_A, [unflatten(output) for output in outputs]
@torch.library.impl(lib, "fused_all_gather_matmul", "Meta")
def _fused_all_gather_matmul_fallback(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
gather_dim: int,
group_name: str,
*,
return_A: bool = True,
) -> tuple[torch.Tensor | None, list[torch.Tensor]]:
group_size = c10d._get_group_size_by_name(group_name)
A = torch.ops._c10d_functional.all_gather_into_tensor(
A_shard.contiguous(), group_size, group_name
)
A = torch.ops._c10d_functional.wait_tensor(A)
if gather_dim == A.ndim - 1 or gather_dim == -1:
A_splits = A.chunk(group_size)
A_mm = torch.cat(A_splits, dim=-1)
res = [torch.matmul(A_mm, B) for B in Bs]
if return_A:
return A_mm, res
else:
return None, res
A = A.view(group_size, *A_shard.shape).movedim(gather_dim + 1, 1).flatten(0, 1)
res = [torch.matmul(A, B).movedim(0, gather_dim) for B in Bs]
if return_A:
return A.movedim(0, gather_dim), res
else:
return None, res
@torch.library.impl(lib, "fused_all_gather_matmul", "CUDA")
def _fused_all_gather_matmul(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
gather_dim: int,
group_name: str,
*,
return_A: bool = True,
) -> tuple[torch.Tensor | None, list[torch.Tensor]]:
"""
Perform the following logic with micro-pipelined computation and
communication:
all_gather_tensor(A_shard, gather_dim, group_name) @ B
Optimal stride order for A_shard - if A_shard.movedim(gather_dim, 0) is
contiguous, no extra copy is required for input layout transformation.
Otherwise A_shard needs to be copied once.
"""
if _is_test_mode:
return _fused_all_gather_matmul_fallback(
A_shard, Bs, gather_dim, group_name, return_A=return_A
)
if _should_use_fused_all_gather_matmul_native(A_shard, Bs, gather_dim, group_name):
group = c10d._resolve_process_group(group_name)
leading_dims = list(A_shard.shape[:-1])
leading_dims[0] *= group.size()
A, out = _fused_all_gather_matmul_native(
A_shard.flatten(0, -2), Bs[0], group_name
)
return A.view(*leading_dims, -1), [out.view(*leading_dims, -1)]
if _should_use_multimem_all_gather_matmul(
A_shard, gather_dim, group_name, return_A
):
return None, _multimem_all_gather_matmul(A_shard, Bs, group_name)
with torch.profiler.record_function("fused_all_gather_matmul"):
return _fused_all_gather_matmul_impl(
torch.ops.aten.mm.out,
A_shard,
Bs,
None,
[{} for B in Bs],
[B.dtype for B in Bs],
gather_dim,
group_name,
return_A,
)
def _should_use_fused_all_gather_matmul_native(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
gather_dim: int,
group_name: str,
) -> bool:
group = c10d._resolve_process_group(group_name)
local_M = math.prod(A_shard.shape[:-1])
return (
"TORCH_SYMM_MEM_ENABLE_NATIVE_ASYNC_TP" in os.environ
and A_shard.is_contiguous()
and gather_dim == 0
# _async_input_mm requires local_M to be divisible by world_size.
and local_M % group.size() == 0
# _async_input_mm outperforms the decomposition-based approach when the
# global M is small.
and 2048 < local_M * group.size() <= 4096
# _async_input_mm only supports a single B.
and len(Bs) == 1
)
def _fused_all_gather_matmul_native(
A_shard: torch.Tensor,
B: torch.Tensor,
group_name: str,
) -> tuple[torch.Tensor, torch.Tensor]:
symm_mem = rendezvous(A_shard, group_name)
if symm_mem is None:
symm_mem = get_symm_mem_workspace(
group_name, A_shard.numel() * A_shard.element_size()
)
symm_mem.barrier()
buf = symm_mem.get_buffer(symm_mem.rank, A_shard.shape, A_shard.dtype)
buf.copy_(A_shard)
A_shard = buf
rank = symm_mem.rank
world_size = symm_mem.world_size
current_stream = torch.cuda.current_stream()
backend_stream = _get_backend_stream(priority=-1)
symm_mem.barrier()
backend_stream.wait_stream(current_stream)
current_stream.wait_stream(backend_stream)
A = A_shard.new_empty(A_shard.shape[0] * world_size, A_shard.shape[1])
A_signals = torch.zeros(world_size, dtype=torch.uint32, device=A_shard.device)
A_shards = A.chunk(world_size)
A_shards[rank].copy_(A_shard)
if not torch.cuda.is_current_stream_capturing():
_SymmetricMemory.stream_write_value32(A_signals, rank, 1)
else:
_SymmetricMemory.memset32(A_signals, offset=rank, val=1, count=1)
out = torch.ops.symm_mem._async_input_mm(A, B, A_signals, rank)
for step in range(1, world_size):
src_rank = (rank + step) % world_size
src_buf = symm_mem.get_buffer(src_rank, A_shard.shape, A_shard.dtype)
with backend_stream:
A_shards[src_rank].copy_(src_buf)
if not torch.cuda.is_current_stream_capturing():
# cuStreamWriteValue32 issues a system level fence before the write
_SymmetricMemory.stream_write_value32(A_signals, src_rank, 1)
else:
_SymmetricMemory.memset32(A_signals, offset=src_rank, val=1, count=1)
current_stream.wait_stream(backend_stream)
backend_stream.wait_stream(current_stream)
symm_mem.barrier()
return A, out
def _should_use_multimem_all_gather_matmul(
A_shard: torch.Tensor,
gather_dim: int,
group_name: str,
return_A: bool,
) -> bool:
group = c10d._resolve_process_group(group_name)
local_M = math.prod(A_shard.shape[:-1])
has_multicast_support = (
A_shard.device.type == "cuda"
and _SymmetricMemory.has_multicast_support(
DeviceType.CUDA, A_shard.device.index
)
)
return (
has_multicast_support
and not return_A
and A_shard.is_contiguous()
and gather_dim == 0
# The heuristic is empirical. We could refine it with a more
# sophisticated perf model.
and local_M * group.size() <= 2048
)
def _multimem_all_gather_matmul(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
group_name: str,
) -> list[torch.Tensor]:
group = c10d._resolve_process_group(group_name)
A_shape = torch.Size((A_shard.shape[0] * group.size(), *A_shard.shape[1:]))
symm_mem = get_symm_mem_workspace(
group_name, A_shape.numel() * A_shard.element_size()
)
A = symm_mem.get_buffer(symm_mem.rank, A_shape, A_shard.dtype)
torch.ops.symm_mem.multimem_all_gather_out(A_shard, group_name, A)
return [torch.matmul(A, B) for B in Bs]
@torch.library.impl(lib, "fused_all_gather_scaled_matmul", "Meta")
def _fused_all_gather_scaled_matmul_fallback(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
A_scale: torch.Tensor,
B_scales: list[torch.Tensor],
gather_dim: int,
group_name: str,
biases: list[torch.Tensor | None],
result_scales: list[torch.Tensor | None],
out_dtypes: list[torch.dtype | None],
use_fast_accum: list[bool],
) -> tuple[torch.Tensor, list[torch.Tensor]]:
out_dtypes = _maybe_convert_scalar_types_to_dtypes(out_dtypes)
group_size = c10d._get_group_size_by_name(group_name)
A = torch.ops._c10d_functional.all_gather_into_tensor(
A_shard.contiguous(), group_size, group_name
)
A = torch.ops._c10d_functional.wait_tensor(A)
A = A.view(group_size, *A_shard.shape).movedim(gather_dim + 1, 1).flatten(0, 1)
scale_mode = _check_and_verify_fp8_all_gather_scale_mode(
shard=A_shard, scale=A_scale, gather_dim=gather_dim, group_size=group_size
)
if scale_mode == _ScaleMode.ROW_WISE_SHARDED:
A_scale_shard = A_scale
A_scale = torch.ops._c10d_functional.all_gather_into_tensor(
A_scale.contiguous(), group_size, group_name
)
A_scale = torch.ops._c10d_functional.wait_tensor(A_scale)
A_scale = (
A_scale.view(group_size, *A_scale_shard.shape)
.movedim(gather_dim + 1, 1)
.flatten(0, -2)
)
elif scale_mode == _ScaleMode.ROW_WISE_REPLICATED:
A_scale = A_scale.movedim(gather_dim, 0).flatten(0, -2)
else:
assert scale_mode == _ScaleMode.TENSOR_WISE
def scaled_matmul(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
bias: torch.Tensor | None,
result_scale: torch.Tensor | None,
out_dtype: torch.dtype | None,
use_fast_accum: bool,
) -> torch.Tensor:
leading_dims = A.shape[:-1]
res = torch.ops.aten._scaled_mm(
A.flatten(0, -2),
B,
A_scale,
B_scale,
bias,
result_scale,
out_dtype=out_dtype,
use_fast_accum=use_fast_accum,
)
return res.unflatten(0, leading_dims)
return A.movedim(0, gather_dim), [
scaled_matmul(
A, B, A_scale, B_scale, bias, result_scale, out_dtype, fast_accum
).movedim(0, gather_dim)
for B, B_scale, bias, result_scale, out_dtype, fast_accum in zip(
Bs, B_scales, biases, result_scales, out_dtypes, use_fast_accum
)
]
@torch.library.impl(lib, "fused_all_gather_scaled_matmul", "CUDA")
def _fused_all_gather_scaled_matmul(
A_shard: torch.Tensor,
Bs: list[torch.Tensor],
A_scale: torch.Tensor,
B_scales: list[torch.Tensor],
gather_dim: int,
group_name: str,
biases: list[torch.Tensor | None],
result_scales: list[torch.Tensor | None],
out_dtypes: list[torch.dtype | None],
use_fast_accum: list[bool],
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""
Perform the following logic with micro-pipelined computation and
communication:
A = all_gather_tensor(A_shard, gather_dim, group_name)
leading_dims = A.shape[:-1]
res = torch.ops.aten._scaled_mm(A.flatten(0, -2), B, A_scale, B_scale)
res = res.unflatten(0, leading_dims)
The input `A_scale` can be tensor-wise, row-wise-sharded or
row-wise-replicated.
Optimal stride order for `A_shard` - if `A_shard.movedim(gather_dim, 0)` is
contiguous, no extra copy is required for input layout transformation.
Otherwise A_shard needs to be copied once.
"""
out_dtypes = _maybe_convert_scalar_types_to_dtypes(out_dtypes)
if len(biases) != len(Bs):
raise ValueError("len(biases) must be the same as len(Bs)")
if len(result_scales) != len(Bs):
raise ValueError("len(result_scales) must be the same as len(Bs)")
if len(out_dtypes) != len(Bs):
raise ValueError("len(out_dtypes) must be the same as len(Bs)")
if len(use_fast_accum) != len(Bs):
raise ValueError("len(use_gast_accum_list) must be the same as len(Bs)")
if _is_test_mode:
return _fused_all_gather_scaled_matmul_fallback(
A_shard,
Bs,
A_scale,
B_scales,
gather_dim,
group_name,
biases,
result_scales,
out_dtypes,
use_fast_accum,
)
with torch.profiler.record_function("fused_all_gather_scaled_matmul"):
A, res = _fused_all_gather_matmul_impl(
torch.ops.aten._scaled_mm.out,
A_shard,
Bs,
A_scale,
[
{
"scale_b": B_scale,
"bias": bias,
"scale_result": result_scale,
"out_dtype": out_dtype,
"use_fast_accum": fast_accum,
}
for B_scale, bias, result_scale, out_dtype, fast_accum in zip(
B_scales, biases, result_scales, out_dtypes, use_fast_accum
)
],
out_dtypes,
gather_dim,
group_name,
True,
)
assert A is not None
return A, res
def make_contiguous_for_perm(
t: torch.Tensor,
perm: list[int],
) -> torch.Tensor:
"""
Restride `t` such that `t.permute(perm)` is contiguous.
"""
inv_perm = [0] * len(perm)
for i, p in enumerate(perm):
inv_perm[p] = i
return t.permute(perm).contiguous().permute(inv_perm)
def restride_A_shard_for_fused_all_gather_matmul(
t: torch.Tensor,
gather_dim: int,
) -> torch.Tensor:
"""
Restride the `A_shard` arg of `fused_all_gather_matmul` for optimal perf.
See the doc for `fused_all_gather_matmul` for detail.
"""
perm = list(range(len(t.shape)))
perm.insert(0, perm.pop(gather_dim))
return make_contiguous_for_perm(t, perm)
@torch.library.impl(lib, "fused_matmul_reduce_scatter", "CUDA")
def _fused_matmul_reduce_scatter(
A: torch.Tensor,
B: torch.Tensor,
reduce_op: str,
scatter_dim: int,
group_name: str,
) -> torch.Tensor:
"""
Perform the following logic with micro-pipelined computation and
communication:
reduce_scatter_tensor(A @ B, reduce_op, scatter_dim, group_name)
Optimal stride order for A - if A.movedim(scatter_dim, 0) is contiguous, no
extra copy is required for input layout transformation. Otherwise A needs
to be copied once.
"""
if _is_test_mode:
return _fused_matmul_reduce_scatter_fallback(
A, B, reduce_op, scatter_dim, group_name
)
with torch.profiler.record_function("fused_matmul_reduce_scatter"):
return _fused_matmul_reduce_scatter_impl(
mm_out_op=torch.ops.aten.mm.out,
A=A,
B=B,
kwargs={},
out_dtype=A.dtype,
reduce_op=reduce_op,
scatter_dim=scatter_dim,
group_name=group_name,
)
@torch.library.impl(lib, "fused_matmul_reduce_scatter", "Meta")
def _fused_matmul_reduce_scatter_fallback(
A: torch.Tensor,
B: torch.Tensor,
reduce_op: str,
scatter_dim: int,
group_name: str,
) -> torch.Tensor:
res = funcol.reduce_scatter_tensor(A @ B, reduce_op, scatter_dim, group_name)
res = funcol.wait_tensor(res)
return res
def _fused_matmul_reduce_scatter_impl(
mm_out_op: torch._ops.OpOverload,
A: torch.Tensor,
B: torch.Tensor,
kwargs: dict[str, Any],
out_dtype: torch.dtype | None,
reduce_op: str,
scatter_dim: int,
group_name: str,
) -> torch.Tensor:
if A.dim() < 2:
raise ValueError("A_shard must be a matrix")
if scatter_dim < 0 or scatter_dim >= A.dim():
raise ValueError("Invalid gather_dim")
if B.dim() != 2:
raise ValueError("B must be a matrix")
if reduce_op == "sum":
reduce_fn = partial(torch.sum, dim=0)
elif reduce_op == "avg":
reduce_fn = partial(torch.mean, dim=0)
else:
raise ValueError("reduce_op must be sum or avg")
group = c10d._resolve_process_group(group_name)
out_shape = [*A.shape[:-1], B.shape[1]]
out_shape[scatter_dim] //= group.size()
if scatter_dim == A.ndim - 1:
B_shards = B.chunk(group.size(), dim=B.ndim - 1)
A_flat = A.flatten(0, -2)
def _chunk_producer(rank: int, out: torch.Tensor) -> None:
mm_out_op(A_flat, B_shards[rank], **kwargs, out=out)
leading_dims = list(A.shape[:-1])
stacked_partials = torch.empty(
(A_flat.shape[0], B.shape[1]),
dtype=out_dtype or A.dtype,
device=A.device,
)
_pipelined_produce_and_all2all(
_chunk_producer,
stacked_partials,
group_name,
out_chunk_dim=1,
)
stacked_partials_view = stacked_partials.reshape(
*leading_dims, group.size(), -1
)
return reduce_fn(
stacked_partials_view,
dim=-2,
)
# Move the scatter_dim to the front and flatten the tensor into a 2D matrix
x = A.movedim(scatter_dim, 0)
leading_dims = [group.size()] + list(x.shape[:-1])
leading_dims[1] //= group.size()
x = x.flatten(0, -2)
A_shards = x.chunk(group.size())
# Computing block-wise matmul along the first dim of A
def chunk_producer(rank: int, out: torch.Tensor) -> None:
mm_out_op(A_shards[rank], B, **kwargs, out=out)
stacked_partials = x.new_empty(x.shape[0], B.shape[1], dtype=out_dtype or A.dtype)
_pipelined_produce_and_all2all(
chunk_producer,
stacked_partials,
group_name,
)
# Ensures that the transpose and reduction produce contiguous result
# in a single reduction kernel.
return reduce_fn(
stacked_partials.view(*leading_dims, -1)
.movedim(1, scatter_dim + 1)
.movedim(0, scatter_dim),
dim=scatter_dim,
)
@torch.library.impl(lib, "fused_scaled_matmul_reduce_scatter", "CUDA")
def _fused_scaled_matmul_reduce_scatter(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
reduce_op: str,
orig_scatter_dim: int,
scatter_dim_after_maybe_reshape: int,
group_name: str,
output_shape: list[int],
bias: torch.Tensor | None = None,
result_scale: torch.Tensor | None = None,
out_dtype: torch.dtype | None = None,
use_fast_accum: bool = False,
) -> torch.Tensor:
if _is_test_mode:
return _fused_scaled_matmul_reduce_scatter_fallback(
A,
B,
A_scale,
B_scale,
reduce_op,
orig_scatter_dim,
scatter_dim_after_maybe_reshape,
group_name,
output_shape,
bias,
result_scale,
out_dtype,
use_fast_accum,
)
with torch.profiler.record_function("fused_scaled_matmul_reduce_scatter"):
return _fused_scaled_matmul_reduce_scatter_impl(
mm_out_op=torch.ops.aten._scaled_mm.out,
A=A,
B=B,
A_scale=A_scale,
kwargs={
"scale_b": B_scale,
"bias": bias,
"scale_result": result_scale,
"out_dtype": out_dtype,
"use_fast_accum": use_fast_accum,
},
out_dtype=out_dtype,
reduce_op=reduce_op,
orig_scatter_dim=orig_scatter_dim,
scatter_dim_after_maybe_reshape=scatter_dim_after_maybe_reshape,
group_name=group_name,
output_shape=output_shape,
)
@torch.library.impl(lib, "fused_scaled_matmul_reduce_scatter", "Meta")
def _fused_scaled_matmul_reduce_scatter_fallback(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
reduce_op: str,
orig_scatter_dim: int,
scatter_dim_after_maybe_reshape: int,
group_name: str,
output_shape: list[int],
bias: torch.Tensor | None = None,
result_scale: torch.Tensor | None = None,
out_dtype: torch.dtype | None = None,
use_fast_accum: bool = False,
) -> torch.Tensor:
if A_scale.numel() > 1:
if A_scale.shape[:-1] != A.shape[:-1]:
raise ValueError(
"For row-wise scaling, the leading dims of A_scale "
"must match the leading dims of A "
f"(A shape: {A.shape}, A_scale shape: {A_scale.shape})"
)
A_scale = A_scale.flatten(0, -2).contiguous()
elif A_scale.numel() != 1:
raise ValueError(
"Invalid A_scale shape "
f"(A shape: {A.shape}, A_scale shape: {A_scale.shape})"
)
C = torch._scaled_mm(
A.flatten(0, -2).contiguous(),
B,
A_scale,
B_scale,
bias,
result_scale,
out_dtype,
use_fast_accum,
)
C = C.view(*output_shape[:-1], B.shape[1])
res = funcol.reduce_scatter_tensor(
C,
reduce_op,
orig_scatter_dim, # need original scatter dim for 3D+ output tensor here
group_name,
)
res = funcol.wait_tensor(res)
return res
def _fused_scaled_matmul_reduce_scatter_impl(
mm_out_op: torch._ops.OpOverload,
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
kwargs: dict[str, Any],
out_dtype: torch.dtype | None,
reduce_op: str,
orig_scatter_dim: int,
scatter_dim_after_maybe_reshape: int,
group_name: str,
output_shape: list[int],
) -> torch.Tensor:
if A.dim() < 2:
raise ValueError("A_shard must be a matrix")
if (
scatter_dim_after_maybe_reshape < 0
or scatter_dim_after_maybe_reshape >= A.dim()
):
raise ValueError("Invalid scatter dim for 2D tensor input to scaled_mm")
if orig_scatter_dim < 0 or orig_scatter_dim >= len(output_shape):
raise ValueError("Invalid scatter dim for 3D+ output tensor")
if B.dim() != 2:
raise ValueError("B must be a matrix")
if reduce_op == "sum":
reduce_fn = partial(torch.sum, dim=0)
elif reduce_op == "avg":
reduce_fn = partial(torch.mean, dim=0)
else:
raise ValueError("reduce_op must be sum or avg")
group = c10d._resolve_process_group(group_name)
# Move scatter to first dim, then shard the tensor along the first dim, so the chunk producer
# can perform matmuls along the first dim.
A_with_scatter_dim_0 = A.movedim(scatter_dim_after_maybe_reshape, 0)
# To handle case where A is 3D+, reshape to 2D to prepare for mm which requires 2D inputs.
A_2D_with_scatter_dim_0 = A_with_scatter_dim_0.flatten(0, -2)
# Partition A along the first dim to prepare for sharding across TP process group.
A_shards = A_2D_with_scatter_dim_0.chunk(group.size())
# Now that 'A' is sharded along the first dim, we need to update its scale(s) accordingly.
# How we do this depends on if we are using tensorwise scaling, rowwise scaling, or no scaling.
tensorwise_scaling = A_scale is not None and A_scale.numel() == 1
rowwise_scaling = A_scale is not None and A_scale.numel() > 1
# For tensorwise scaling, the scale should be replicated so each shard has a copy.
if tensorwise_scaling:
A_scale_shards = [A_scale] * group.size()
# For rowwise scaling, we need to move the scatter dim to the first dim to match the
# dim swap of the 'A' tensor. Then we can shard the scales along the first dim, just like
# the 'A' tensor.
elif rowwise_scaling:
if A_scale.shape[:-1] != A.shape[:-1]:
raise ValueError(
"For row-wise scaling, the leading dims of A_scale "
"must match the leading dims of A "
f"(A shape: {A.shape}, A_scale shape: {A_scale.shape})"
)
A_scale = (
A_scale.movedim(scatter_dim_after_maybe_reshape, 0)
.contiguous()
.flatten(0, -2)
)
A_scale_shards = list(A_scale.chunk(group.size()))
# cuBLAS's row-wise kernel requires scales to be aligned to 16 bytes.
# When we slice them we might break this and need to reallocate them.
A_scale_shards = [
t if t.data_ptr() % 16 == 0 else t.clone() for t in A_scale_shards
]
else:
raise ValueError("A_scale cannot be none for scaled_mm")
# Computing block-wise matmul along the first dim of A
def chunk_producer(rank: int, out: torch.Tensor) -> None:
mm_out_op(A_shards[rank], B, scale_a=A_scale_shards[rank], **kwargs, out=out)
# Stacked partials will be the 2D outputs of the pipelined scaled mm, and will
# have the shape (A_with_scatter_dim_0_tensor.shape[0], B.shape[1]) to align with the formula:
# (a*b,c) @ (c,d) = (a*b,d)
stacked_partials = A_with_scatter_dim_0.new_empty(
A_2D_with_scatter_dim_0.shape[0], B.shape[1], dtype=out_dtype or A.dtype
)
# Execute the pipelined mm/scaled_mm.
_pipelined_produce_and_all2all(
chunk_producer,
stacked_partials,
group_name,
)
# We now need to transform the *unreduced* stacked 2D partial mm outputs to an *unreduced* 3D+ output,
# then reduce-scatter. To do this, we first need to determine the shape of the unreduced 3D+ output,
# to reshape our stacked partials so we can apply the reduce-scatter.
#
# The *unreduced* 3D+ tensor will have dim 0 = `group_size`, as we have `group_size` instances of
# stacked partial outputs. The next dims will be A's leading dims (sharded along the original scatter dim),
# as it was the left operand of the mm op. We can use -1 as the final dim of the view to populate the rest.
stacked_partials_3D_leading_dims = [group.size()] + list(
# We use A from after the dim swap 0<=>scatter_dim, but before the flatten,
# to get the leading dims of the 3D+ view of stacked partials.
A_with_scatter_dim_0.shape[:-1]
)
# The `group_size` leading dim has been prepended to `stacked_partials_3D_leading_dims`,
# to capture the partial output from each rank. We need to divide the sharding/scatter dim
# by the group size. If the original scatter dim was 0, then it is now dim 1 in this
# tensor, since this new `group_size` dim was prepended.
stacked_partial_scatter_dim = orig_scatter_dim if orig_scatter_dim > 0 else 1
stacked_partials_3D_leading_dims[stacked_partial_scatter_dim] //= group.size()
# Ensures that the transpose and reduction produce contiguous result
# in a single reduction kernel.
reduced_out = reduce_fn(
# View 2D stacked partials as 3D+ tensor of shape (`group_size`, ...)
stacked_partials.view(*stacked_partials_3D_leading_dims, -1)
# We originally swapped 0<=>scatter_dim_after_maybe_reshape. Now after
# prepending the `group_size` dim, to undo this original swap, we
# must swap 1<=>scatter_dim_after_maybe_reshape+1.
.movedim(1, scatter_dim_after_maybe_reshape + 1),
# Reduce along the `group_size` dim (0).
dim=0,
)
# Output shape must be scattered along original scatter dim as well.
output_shape[orig_scatter_dim] //= group.size()
out = reduced_out.view(*output_shape)
return out
def restride_A_for_fused_matmul_reduce_scatter(
t: torch.Tensor,
scatter_dim: int,
) -> torch.Tensor:
"""
Restride the `A_shard` arg of `fused_matmul_reduce_scatter` for optimal
perf. See the doc for `fused_matmul_reduce_scatter` for detail.
"""
perm = list(range(len(t.shape)))
perm.insert(0, perm.pop(scatter_dim))
return make_contiguous_for_perm(t, perm)
def _maybe_convert_scalar_types_to_dtypes(
scalar_types: list[Any],
) -> list[torch.dtype | None]:
"""
When a list of `torch.dtype`s is passed through the dispatcher as
`ScalarType[]`, it is converted to a list of scalar type enum values. This
function converts it back to a list of `torch.dtype`s.
"""
# Order defined in https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
_SCALAR_TYPE_TO_DTYPE = {
0: torch.uint8,
1: torch.int8,
2: torch.short,
3: torch.int,
4: torch.int64,
5: torch.half,
6: torch.float,
7: torch.double,
8: torch.complex32,
9: torch.complex64,
10: torch.complex128,
11: torch.bool,
12: torch.qint8,
13: torch.quint8,
14: torch.qint32,
15: torch.bfloat16,
16: torch.float8_e5m2,
17: torch.float8_e4m3fn,
18: torch.float8_e5m2fnuz,
19: torch.float8_e4m3fnuz,
}
if any(not isinstance(x, (type(None), int)) for x in scalar_types):
return scalar_types
dtypes: list[torch.dtype | None] = []
for scalar_type in scalar_types:
if scalar_type is None:
dtypes.append(scalar_type)
elif scalar_type not in _SCALAR_TYPE_TO_DTYPE:
raise ValueError(f"Unrecognized scalar type {scalar_type}")
else:
dtypes.append(_SCALAR_TYPE_TO_DTYPE[scalar_type])
return dtypes
| _ScaleMode |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6040,
"end": 6096
} | class ____(ResampleReduction):
how = "min"
| ResampleMin |
python | google__jax | tests/pallas/tpu_pallas_random_test.py | {
"start": 8891,
"end": 10341
} | class ____(parameterized.TestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Need TPU devices")
super().setUp()
def test_block_invariance(self):
def make_kernel_body(index_map):
def body(key_ref, o_ref):
key = key_ref[...]
samples = pltpu.sample_block(
jax.random.uniform,
key,
block_size=o_ref[...].shape,
tile_size=(16, 128),
total_size=(64, 512),
block_index=index_map(pl.program_id(0), pl.program_id(1)),
minval=0.0,
maxval=1.0)
o_ref[...] = samples
return body
global_key = jax_random.key(0, impl="pallas_tpu")
o_shape = jnp.ones((64, 512), dtype=jnp.float32)
key_spec = pl.BlockSpec(memory_space=pltpu.SMEM)
out_spec = pl.BlockSpec((16, 128), lambda i, j: (i, j))
result_16x128 = pl.pallas_call(
make_kernel_body(index_map=lambda i, j: (i, j)),
out_shape=o_shape,
in_specs=[key_spec],
out_specs=out_spec,
grid=(4, 4),
)(global_key)
out_spec = pl.BlockSpec((32, 256), lambda i, j: (j, i))
result_32x256 = pl.pallas_call(
make_kernel_body(index_map=lambda i, j: (j, i)),
in_specs=[key_spec],
out_shape=o_shape,
out_specs=out_spec,
grid=(2, 2),
)(global_key)
np.testing.assert_array_equal(result_16x128, result_32x256)
| BlockInvarianceTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/weixin/provider.py | {
"start": 483,
"end": 930
} | class ____(OAuth2Provider):
id = "weixin"
name = "Weixin"
account_class = WeixinAccount
oauth2_adapter_class = WeixinOAuth2Adapter
def extract_uid(self, data):
return data["openid"]
def get_default_scope(self):
return ["snsapi_login"]
def extract_common_fields(self, data):
return dict(username=data.get("nickname"), name=data.get("nickname"))
provider_classes = [WeixinProvider]
| WeixinProvider |
python | doocs__leetcode | solution/1700-1799/1792.Maximum Average Pass Ratio/Solution.py | {
"start": 0,
"end": 410
} | class ____:
def maxAverageRatio(self, classes: List[List[int]], extraStudents: int) -> float:
h = [(a / b - (a + 1) / (b + 1), a, b) for a, b in classes]
heapify(h)
for _ in range(extraStudents):
_, a, b = heappop(h)
a, b = a + 1, b + 1
heappush(h, (a / b - (a + 1) / (b + 1), a, b))
return sum(v[1] / v[2] for v in h) / len(classes)
| Solution |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 4468,
"end": 4817
} | class ____(IntEnum):
INVALID = 0
L2 = 1
IP = 2
# Only supported for byte vectors
HAMMING = 3
JACCARD = 4
TANIMOTO = 5
SUBSTRUCTURE = 6
SUPERSTRUCTURE = 7
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self._name_}>"
def __str__(self) -> str:
return self._name_
| MetricType |
python | django__django | django/db/models/fields/json.py | {
"start": 23345,
"end": 23465
} | class ____(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith
):
pass
| KeyTransformIStartsWith |
python | paramiko__paramiko | paramiko/pkey.py | {
"start": 2741,
"end": 3106
} | class ____(Exception):
"""
An unknown public/private key algorithm was attempted to be read.
"""
def __init__(self, key_type=None, key_bytes=None):
self.key_type = key_type
self.key_bytes = key_bytes
def __str__(self):
return f"UnknownKeyType(type={self.key_type!r}, bytes=<{len(self.key_bytes)}>)" # noqa
| UnknownKeyType |
python | ray-project__ray | doc/source/ray-core/doc_code/actors.py | {
"start": 529,
"end": 768
} | class ____:
# Disable task events reporting for this method.
@ray.method(enable_task_events=False)
def foo(self):
pass
foo_actor = FooActor.remote()
ray.get(foo_actor.foo.remote())
# __enable_task_events_end__
| FooActor |
python | kubernetes-client__python | kubernetes/client/models/v1_deployment_status.py | {
"start": 383,
"end": 13231
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'collision_count': 'int',
'conditions': 'list[V1DeploymentCondition]',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'terminating_replicas': 'int',
'unavailable_replicas': 'int',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'collision_count': 'collisionCount',
'conditions': 'conditions',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'terminating_replicas': 'terminatingReplicas',
'unavailable_replicas': 'unavailableReplicas',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, collision_count=None, conditions=None, observed_generation=None, ready_replicas=None, replicas=None, terminating_replicas=None, unavailable_replicas=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""V1DeploymentStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._collision_count = None
self._conditions = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._terminating_replicas = None
self._unavailable_replicas = None
self._updated_replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
if replicas is not None:
self.replicas = replicas
if terminating_replicas is not None:
self.terminating_replicas = terminating_replicas
if unavailable_replicas is not None:
self.unavailable_replicas = unavailable_replicas
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this V1DeploymentStatus. # noqa: E501
Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
:return: The available_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this V1DeploymentStatus.
Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
:param available_replicas: The available_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._available_replicas = available_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1DeploymentStatus. # noqa: E501
Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
:return: The collision_count of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1DeploymentStatus.
Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
:param collision_count: The collision_count of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1DeploymentStatus. # noqa: E501
Represents the latest available observations of a deployment's current state. # noqa: E501
:return: The conditions of this V1DeploymentStatus. # noqa: E501
:rtype: list[V1DeploymentCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1DeploymentStatus.
Represents the latest available observations of a deployment's current state. # noqa: E501
:param conditions: The conditions of this V1DeploymentStatus. # noqa: E501
:type: list[V1DeploymentCondition]
"""
self._conditions = conditions
@property
def observed_generation(self):
"""Gets the observed_generation of this V1DeploymentStatus. # noqa: E501
The generation observed by the deployment controller. # noqa: E501
:return: The observed_generation of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1DeploymentStatus.
The generation observed by the deployment controller. # noqa: E501
:param observed_generation: The observed_generation of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1DeploymentStatus. # noqa: E501
Total number of non-terminating pods targeted by this Deployment with a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1DeploymentStatus.
Total number of non-terminating pods targeted by this Deployment with a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1DeploymentStatus. # noqa: E501
Total number of non-terminating pods targeted by this deployment (their labels match the selector). # noqa: E501
:return: The replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1DeploymentStatus.
Total number of non-terminating pods targeted by this deployment (their labels match the selector). # noqa: E501
:param replicas: The replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def terminating_replicas(self):
"""Gets the terminating_replicas of this V1DeploymentStatus. # noqa: E501
Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. # noqa: E501
:return: The terminating_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._terminating_replicas
@terminating_replicas.setter
def terminating_replicas(self, terminating_replicas):
"""Sets the terminating_replicas of this V1DeploymentStatus.
Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. # noqa: E501
:param terminating_replicas: The terminating_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._terminating_replicas = terminating_replicas
@property
def unavailable_replicas(self):
"""Gets the unavailable_replicas of this V1DeploymentStatus. # noqa: E501
Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
:return: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._unavailable_replicas
@unavailable_replicas.setter
def unavailable_replicas(self, unavailable_replicas):
"""Sets the unavailable_replicas of this V1DeploymentStatus.
Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
:param unavailable_replicas: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._unavailable_replicas = unavailable_replicas
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1DeploymentStatus. # noqa: E501
Total number of non-terminating pods targeted by this deployment that have the desired template spec. # noqa: E501
:return: The updated_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1DeploymentStatus.
Total number of non-terminating pods targeted by this deployment that have the desired template spec. # noqa: E501
:param updated_replicas: The updated_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeploymentStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeploymentStatus):
return True
return self.to_dict() != other.to_dict()
| V1DeploymentStatus |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 81429,
"end": 89911
} | class ____(LongT5PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
"lm_head.weight": "shared.weight",
}
def __init__(self, config: LongT5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = LongT5Stack(decoder_config)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
Training](./longt5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
Training](./longt5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
>>> model = LongT5ForConditionalGeneration.from_pretrained(
... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
... )
>>> # Let's try a very long input.
>>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt")
>>> input_ids = inputs.input_ids
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
abstractthe aim of this article is to provide an overview of the literature on the role of dog
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
labels = labels.to(lm_logits.device)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@auto_docstring
| LongT5ForConditionalGeneration |
python | huggingface__transformers | src/transformers/models/esm/configuration_esm.py | {
"start": 2574,
"end": 5469
} | class ____:
num_blocks: int = 48
sequence_state_dim: int = 1024
pairwise_state_dim: int = 128
sequence_head_width: int = 32
pairwise_head_width: int = 32
position_bins: int = 32
dropout: float = 0
layer_drop: float = 0
cpu_grad_checkpoint: bool = False
max_recycles: int = 4
chunk_size: Optional[int] = 128
structure_module: "StructureModuleConfig" = None
def __post_init__(self):
if self.structure_module is None:
self.structure_module = StructureModuleConfig()
elif isinstance(self.structure_module, dict):
self.structure_module = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}.")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f" {self.sequence_state_dim} and {self.sequence_state_dim}."
)
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}."
)
sequence_num_heads = self.sequence_state_dim // self.sequence_head_width
pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}."
)
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}."
)
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}.")
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = asdict(self)
output["structure_module"] = self.structure_module.to_dict()
return output
@dataclass
| TrunkConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 9874,
"end": 12687
} | class ____(IterableExportStream, ABC):
"""
For streams that could produce large amount of data in single request so we
cant just use IterableExportStreamRanged to split it in even ranges. If
request processing takes a lot of time API server could just close
connection and connector code would fail with ChunkedEncodingError.
To solve this problem we use AdjustableSliceGenerator that able to adjust
next slice range based on two factor:
1. Previous slice range / time to process ratio.
2. Had previous request failed with ChunkedEncodingError
In case of slice processing request failed with ChunkedEncodingError (which
means that API server closed connection cause of request takes to much
time) make CHUNKED_ENCODING_ERROR_RETRIES (6) retries each time reducing
slice length.
See AdjustableSliceGenerator description for more details on next slice length adjustment alghorithm.
"""
_adjustable_generator: AdjustableSliceGenerator = None
CHUNKED_ENCODING_ERROR_RETRIES = 6
def stream_slices(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Optional[StreamSlice]]:
start_datetime = self.get_start_date(stream_state)
self._adjustable_generator = AdjustableSliceGenerator(start_datetime, self._end_date)
return self._adjustable_generator
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str],
stream_slice: StreamSlice,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
start_time = pendulum.now()
for _ in range(self.CHUNKED_ENCODING_ERROR_RETRIES):
try:
self.logger.info(
f"Processing slice of {(stream_slice.end_date - stream_slice.start_date).total_days()} days for stream {self.name}"
)
for record in super().read_records(
sync_mode=sync_mode,
cursor_field=cursor_field,
stream_slice=stream_slice,
stream_state=stream_state,
):
now = pendulum.now()
self._adjustable_generator.adjust_range(now - start_time)
yield record
start_time = now
break
except ChunkedEncodingError:
self.logger.warn("ChunkedEncodingError occurred, decrease days range and try again")
stream_slice = self._adjustable_generator.reduce_range()
else:
raise Exception(f"ChunkedEncodingError: Reached maximum number of retires: {self.CHUNKED_ENCODING_ERROR_RETRIES}")
| IterableExportStreamAdjustableRange |
python | getsentry__sentry | src/sentry/monitors/types.py | {
"start": 1086,
"end": 2793
} | class ____:
"""
Represents a check-in to be processed
"""
ts: datetime
"""
The timestamp the check-in was produced into the kafka topic. This differs
from the start_time that is part of the CheckIn
"""
partition: int
"""
The kafka partition id the check-in was produced into.
"""
message: CheckIn
"""
The original unpacked check-in message contents.
"""
payload: CheckinPayload
"""
The json-decoded check-in payload contained within the message. Includes
the full check-in details.
"""
@cached_property
def valid_monitor_slug(self):
return slugify_monitor_slug(self.payload["monitor_slug"])
@property
def processing_key(self):
"""
This key is used to uniquely identify the check-in group this check-in
belongs to. Check-ins grouped together will never be processed in
parallel with other check-ins belonging to the same group
"""
project_id = self.message["project_id"]
env = self.payload.get("environment")
return f"{project_id}:{self.valid_monitor_slug}:{env}"
def to_dict(self) -> CheckinItemData:
return {
"ts": self.ts.isoformat(),
"partition": self.partition,
"message": self.message,
"payload": self.payload,
}
@classmethod
def from_dict(cls, data: CheckinItemData) -> CheckinItem:
return cls(
datetime.fromisoformat(data["ts"]),
data["partition"],
data["message"],
data["payload"],
)
IntervalUnit = Literal["year", "month", "week", "day", "hour", "minute"]
@dataclass
| CheckinItem |
python | huggingface__transformers | src/transformers/models/dia/modeling_dia.py | {
"start": 19542,
"end": 21918
} | class ____(DiaPreTrainedModel):
def __init__(self, config: DiaEncoderConfig):
super().__init__(config)
self.config = config
self.embedding = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[DiaEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.rotary_emb = DiaRotaryEmbedding(config=config)
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[BaseModelOutput, tuple]:
hidden_states = self.embedding(input_ids)
# RoPE
# Note: We expect right padding and hence always generate
# the position ids on the fly to reduce preparation overhead
position_ids = torch.arange(input_ids.shape[-1], device=input_ids.device)[None, :]
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=attention_mask,
)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
encoder_states += (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| DiaEncoder |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_pubsub.py | {
"start": 24127,
"end": 25714
} | class ____:
@pytest.fixture
def hook(self):
return PubSubAsyncHook()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook._get_subscriber_client")
async def test_pull(self, mock_subscriber_client, hook):
client = mock_subscriber_client.return_value
await hook.pull(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10, return_immediately=False
)
mock_subscriber_client.assert_called_once()
client.pull.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook._get_subscriber_client")
async def test_acknowledge(self, mock_subscriber_client, hook):
client = mock_subscriber_client.return_value
await hook.acknowledge(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=_generate_messages(3),
)
mock_subscriber_client.assert_called_once()
client.acknowledge.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=["1", "2", "3"],
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestPubSubAsyncHook |
python | django__django | tests/gis_tests/layermap/tests.py | {
"start": 17460,
"end": 18055
} | class ____:
def db_for_read(self, model, **hints):
return "other"
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
# ContentType objects are created during a post-migrate signal while
# performing fixture teardown using the default database alias and
# don't abide by the database specified by this router.
return True
def allow_migrate(self, db, app_label, **hints):
return True
@override_settings(DATABASE_ROUTERS=[OtherRouter()])
| OtherRouter |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 12910,
"end": 14356
} | class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
UpdateMixin,
UpdateModelMixin,
ReadOnlyModelViewSet,
):
model = Version
lookup_field = "slug"
lookup_url_kwarg = "version_slug"
# Allow ``.`` (dots) on version slug
lookup_value_regex = r"[^/]+"
filterset_class = VersionFilter
permission_classes = [ReadOnlyPermission | (IsAuthenticated & IsProjectAdmin)]
def get_serializer_class(self):
"""
Return correct serializer depending on the action.
For GET it returns a serializer with many fields and on PUT/PATCH/POST,
it return a serializer to validate just a few fields.
"""
if self.action in ("list", "retrieve"):
return VersionSerializer
return VersionUpdateSerializer
def update(self, request, *args, **kwargs):
"""Overridden to call ``post_save`` method on the updated version."""
# Get the current value before updating.
version = self.get_object()
was_active = version.active
result = super().update(request, *args, **kwargs)
# Get the updated version.
version = self.get_object()
version.post_save(was_active=was_active)
return result
def get_queryset(self):
"""Overridden to allow internal versions only."""
return super().get_queryset().exclude(type=EXTERNAL)
| VersionsViewSet |
python | spack__spack | lib/spack/spack/test/installer_tui.py | {
"start": 31498,
"end": 35582
} | class ____:
"""Test search mode with display filtering"""
def test_search_mode_filters_displayed_builds(self):
"""Test that search mode actually filters what's displayed"""
status, _, fake_stdout = create_build_status(total=4)
specs = [
MockSpec("package-foo", "1.0"),
MockSpec("package-bar", "2.0"),
MockSpec("other-thing", "3.0"),
MockSpec("package-baz", "4.0"),
]
for spec in specs:
status.add_build(spec, explicit=True, control_w_conn=MockConnection())
# Enter search mode and search for "package"
status.enter_search()
assert status.search_mode is True
for character in "package":
status.search_input(character)
assert status.search_term == "package"
# Update to render
status.update()
output = fake_stdout.getvalue()
# Should contain filtered builds
assert "package-foo" in output
assert "package-bar" in output
assert "package-baz" in output
# Should not contain the filtered-out build
assert "other-thing" not in output
# Should show filter prompt
assert "filter>" in output
assert status.search_term in output
def test_search_mode_with_navigation(self):
"""Test that navigation respects search filter"""
status, _, _ = create_build_status(total=4)
specs = [
MockSpec("package-a", "1.0"),
MockSpec("other-b", "2.0"),
MockSpec("package-c", "3.0"),
MockSpec("other-d", "4.0"),
]
for spec in specs:
status.add_build(spec, explicit=True, control_w_conn=MockConnection())
# Set search term to filter for "package"
status.search_term = "package"
# Start navigating, should only go through "package-a" and "package-c"
status.next()
assert status.tracked_build_id == specs[0].dag_hash() # package-a
status.next(1)
# Should skip other-b and go to package-c
assert status.tracked_build_id == specs[2].dag_hash() # package-c
status.next(1)
# Should wrap around to package-a
assert status.tracked_build_id == specs[0].dag_hash() # package-a
def test_search_input_enter_navigates_to_next(self):
"""Test that pressing enter in search mode navigates to next match"""
status, _, _ = create_build_status(total=3)
specs = add_mock_builds(status, 3)
# Enter search mode
status.enter_search()
for character in "pkg":
status.search_input(character)
# Press enter (should navigate to first match)
status.search_input("\r")
# Should have started following first matching build
assert status.overview_mode is False
assert status.tracked_build_id == specs[0].dag_hash()
def test_clearing_search_shows_all_builds(self):
"""Test that clearing search term shows all builds again"""
status, _, fake_stdout = create_build_status(total=3)
specs = [
MockSpec("package-a", "1.0"),
MockSpec("other-b", "2.0"),
MockSpec("package-c", "3.0"),
]
for spec in specs:
status.add_build(spec, explicit=True, control_w_conn=MockConnection())
# Enter search and type something
status.enter_search()
status.search_input("p")
status.search_input("a")
status.search_input("c")
assert status.search_term == "pac"
# Clear it with backspace
status.search_input("\x7f") # backspace
status.search_input("\x7f") # backspace
status.search_input("\x7f") # backspace
assert status.search_term == ""
# Update to render
status.update()
output = fake_stdout.getvalue()
# All builds should be visible now
assert "package-a" in output
assert "other-b" in output
assert "package-c" in output
| TestSearchFilteringIntegration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 686572,
"end": 687054
} | class ____(sgqlc.types.Type):
"""Represents the language of a repository."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "size")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(sgqlc.types.non_null("Language"), graphql_name="node")
size = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="size")
"""The number of bytes of code written in the language."""
| LanguageEdge |
python | langchain-ai__langchain | libs/core/tests/unit_tests/runnables/test_runnable.py | {
"start": 6205,
"end": 6420
} | class ____(Runnable[str, int]):
@override
def invoke(
self,
input: str,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> int:
return len(input)
| FakeRunnable |
python | pandas-dev__pandas | pandas/tests/io/test_common.py | {
"start": 941,
"end": 14404
} | class ____:
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = "~/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = "/somefolder/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
rel_path = icom.stringify_path(Path("."))
assert rel_path == "."
redundant_path = icom.stringify_path(Path("foo//bar"))
assert redundant_path == os.path.join("foo", "bar")
def test_stringify_path_fspath(self):
p = CustomFSPath("foo/bar.csv")
result = icom.stringify_path(p)
assert result == "foo/bar.csv"
def test_stringify_file_and_path_like(self, temp_file):
# GH 38125: do not stringify file objects that are also path-like
fsspec = pytest.importorskip("fsspec")
with fsspec.open(f"file://{temp_file}", mode="wb") as fsspec_obj:
assert fsspec_obj == icom.stringify_path(fsspec_obj)
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
def test_infer_compression_from_path(self, compression_format, path_type):
extension, expected = compression_format
path = path_type("foo/bar.csv" + extension)
compression = icom.infer_compression(path, compression="infer")
assert compression == expected
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
def test_get_handle_with_path(self, path_type):
with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:
filename = path_type("~/" + Path(tmp).name + "/sometest")
with icom.get_handle(filename, "w") as handles:
assert Path(handles.handle.name).is_absolute()
assert os.path.expanduser(filename) == handles.handle.name
def test_get_handle_with_buffer(self):
with StringIO() as input_buffer:
with icom.get_handle(input_buffer, "r") as handles:
assert handles.handle == input_buffer
assert not input_buffer.closed
assert input_buffer.closed
# Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time
def test_bytesiowrapper_returns_correct_bytes(self):
# Test latin1, ucs-2, and ucs-4 chars
data = """a,b,c
1,2,3
©,®,®
Look,a snake,🐍"""
with icom.get_handle(StringIO(data), "rb", is_text=False) as handles:
result = b""
chunksize = 5
while True:
chunk = handles.handle.read(chunksize)
# Make sure each chunk is correct amount of bytes
assert len(chunk) <= chunksize
if len(chunk) < chunksize:
# Can be less amount of bytes, but only at EOF
# which happens when read returns empty
assert len(handles.handle.read()) == 0
result += chunk
break
result += chunk
assert result == data.encode("utf-8")
# Test that pyarrow can handle a file opened with get_handle
def test_get_handle_pyarrow_compat(self):
pa_csv = pytest.importorskip("pyarrow.csv")
# Test latin1, ucs-2, and ucs-4 chars
data = """a,b,c
1,2,3
©,®,®
Look,a snake,🐍"""
expected = pd.DataFrame(
{"a": ["1", "©", "Look"], "b": ["2", "®", "a snake"], "c": ["3", "®", "🐍"]}
)
s = StringIO(data)
with icom.get_handle(s, "rb", is_text=False) as handles:
df = pa_csv.read_csv(handles.handle).to_pandas()
if pa_version_under19p0:
expected = expected.astype("object")
tm.assert_frame_equal(df, expected)
assert not s.closed
def test_iterator(self):
with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
with pd.read_csv(StringIO(self.data1), chunksize=1) as it:
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", OSError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", FileNotFoundError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_non_existent(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
rf"'.+does_not_exist\.{fn_ext}'"
)
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"method, module, error_class, fn_ext",
[
(pd.DataFrame.to_csv, "os", OSError, "csv"),
(pd.DataFrame.to_html, "os", OSError, "html"),
(pd.DataFrame.to_excel, "xlrd", OSError, "xlsx"),
(pd.DataFrame.to_feather, "pyarrow", OSError, "feather"),
(pd.DataFrame.to_parquet, "pyarrow", OSError, "parquet"),
(pd.DataFrame.to_stata, "os", OSError, "dta"),
(pd.DataFrame.to_json, "os", OSError, "json"),
(pd.DataFrame.to_pickle, "os", OSError, "pickle"),
],
)
# NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
pytest.importorskip(module)
dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
path = os.path.join(HERE, "data", "missing_folder", "does_not_exist." + fn_ext)
with pytest.raises(
error_class,
match=r"Cannot save file into a non-existent directory: .*missing_folder",
):
method(dummy_frame, path)
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_table, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", OSError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", FileNotFoundError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_expands_user_home_dir(
self, reader, module, error_class, fn_ext, monkeypatch
):
pytest.importorskip(module)
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
rf"'.+does_not_exist\.{fn_ext}'"
)
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"reader, module, path",
[
(pd.read_csv, "os", ("io", "data", "csv", "iris.csv")),
(pd.read_table, "os", ("io", "data", "csv", "iris.csv")),
(
pd.read_fwf,
"os",
("io", "data", "fixed_width", "fixed_width_format.txt"),
),
(pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")),
(
pd.read_feather,
"pyarrow",
("io", "data", "feather", "feather-0_3_1.feather"),
),
(
pd.read_hdf,
"tables",
("io", "data", "legacy_hdf", "pytables_native2.h5"),
),
(pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")),
(pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
(pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
(
pd.read_pickle,
"os",
("io", "data", "pickle", "categorical.0.25.0.pickle"),
),
],
)
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith(".pickle"):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"writer_name, writer_kwargs, module",
[
("to_csv", {}, "os"),
("to_excel", {"engine": "openpyxl"}, "openpyxl"),
("to_feather", {}, "pyarrow"),
("to_html", {}, "os"),
("to_json", {}, "os"),
("to_latex", {}, "os"),
("to_pickle", {}, "os"),
("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"),
],
)
def test_write_fspath_all(self, writer_name, writer_kwargs, module, tmp_path):
if writer_name in ["to_latex"]: # uses Styler implementation
pytest.importorskip("jinja2")
string = str(tmp_path / "string")
fspath = str(tmp_path / "fspath")
df = pd.DataFrame({"A": [1, 2]})
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
writer(mypath, **writer_kwargs)
with open(string, "rb") as f_str, open(fspath, "rb") as f_path:
if writer_name == "to_excel":
# binary representation of excel contains time creation
# data that causes flaky CI failures
result = pd.read_excel(f_str, **writer_kwargs)
expected = pd.read_excel(f_path, **writer_kwargs)
tm.assert_frame_equal(result, expected)
else:
result = f_str.read()
expected = f_path.read()
assert result == expected
def test_write_fspath_hdf5(self, tmp_path):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip("tables")
df = pd.DataFrame({"A": [1, 2]})
string = str(tmp_path / "string")
fspath = str(tmp_path / "fspath")
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key="bar")
df.to_hdf(string, key="bar")
result = pd.read_hdf(fspath, key="bar")
expected = pd.read_hdf(string, key="bar")
tm.assert_frame_equal(result, expected)
@pytest.fixture
def mmap_file(datapath):
return datapath("io", "data", "csv", "test_mmap.csv")
| TestCommonIOCapabilities |
python | getsentry__sentry | src/sentry/interfaces/security.py | {
"start": 3849,
"end": 5640
} | class ____(SecurityReport):
"""
A CSP violation report.
See also: https://www.w3.org/TR/CSP/#violation-events
>>> {
>>> "document_uri": "http://example.com/",
>>> "violated_directive": "style-src cdn.example.com",
>>> "blocked_uri": "http://example.com/style.css",
>>> "effective_directive": "style-src",
>>> }
"""
score = 1300
display_score = 1300
title = "CSP Report"
@classmethod
def to_python(cls, data, **kwargs):
data.setdefault("document_uri", None)
data.setdefault("violated_directive", None)
data.setdefault("blocked_uri", None)
data.setdefault("effective_directive", None)
return super().to_python(data, **kwargs)
def to_string(self, event) -> str:
return orjson.dumps(
{"csp-report": self.get_api_context()},
option=orjson.OPT_UTC_Z | orjson.OPT_NON_STR_KEYS,
).decode()
def to_email_html(self, event, **kwargs):
return render_to_string(
"sentry/partial/interfaces/csp_email.html", {"data": self.get_api_context()}
)
@cached_property
def normalized_blocked_uri(self):
return csp.normalize_value(self.blocked_uri)
@cached_property
def local_script_violation_type(self):
"""
If this is a locally-sourced script-src error, gives the type.
"""
if (
self.violated_directive
and self.effective_directive == "script-src"
and self.normalized_blocked_uri == csp.LOCAL
):
if "'unsafe-inline'" in self.violated_directive:
return "unsafe-inline"
elif "'unsafe-eval'" in self.violated_directive:
return "unsafe-eval"
return None
| Csp |
python | Textualize__textual | tests/test_widget_mount_point.py | {
"start": 97,
"end": 1295
} | class ____(Widget):
pass
async def test_find_dom_spot():
# Build up a "fake" DOM for an application.
screen = Widget(name="Screen")
header = Widget(name="Header", id="header")
body = Body(id="body")
content = [Content(id=f"item{n}") for n in range(1000)]
body._add_children(*content)
footer = Widget(name="Footer", id="footer")
screen._add_children(header, body, footer)
# Just as a quick double-check, make sure the main components are in
# their intended place.
assert list(screen._nodes) == [header, body, footer]
# Now check that we find what we're looking for in the places we expect
# to find them.
assert screen._find_mount_point(1) == (screen, 1)
assert screen._find_mount_point(body) == screen._find_mount_point(1)
assert screen._find_mount_point("Body") == screen._find_mount_point(body)
assert screen._find_mount_point("#body") == screen._find_mount_point(1)
# Finally, let's be sure that we get an error if, for some odd reason,
# we go looking for a widget that isn't actually part of the DOM we're
# looking in.
with pytest.raises(MountError):
_ = screen._find_mount_point(Widget())
| Body |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 18668,
"end": 20081
} | class ____(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
image_size: int = 224,
patch_size: Union[int, tuple[int, int]] = 16,
num_channels: int = 3,
embed_dim: int = 768,
):
super().__init__()
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
if not isinstance(patch_size, collections.abc.Iterable):
patch_size = (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model"
f" ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
| PatchEmbeddings |
python | redis__redis-py | tests/test_asyncio/test_pubsub.py | {
"start": 12170,
"end": 17354
} | class ____:
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
async def async_message_handler(self, message):
self.async_message = message
async def test_published_message_to_channel(self, r: redis.Redis, pubsub):
p = pubsub
await p.subscribe("foo")
assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
assert await r.publish("foo", "test message") == 1
message = await wait_for_message(p)
assert isinstance(message, dict)
assert message == make_message("message", "foo", "test message")
async def test_published_message_to_pattern(self, r: redis.Redis, pubsub):
p = pubsub
await p.subscribe("foo")
await p.psubscribe("f*")
assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
assert await wait_for_message(p) == make_message("psubscribe", "f*", 2)
# 1 to pattern, 1 to channel
assert await r.publish("foo", "test message") == 2
message1 = await wait_for_message(p)
message2 = await wait_for_message(p)
assert isinstance(message1, dict)
assert isinstance(message2, dict)
expected = [
make_message("message", "foo", "test message"),
make_message("pmessage", "foo", "test message", pattern="f*"),
]
assert message1 in expected
assert message2 in expected
assert message1 != message2
async def test_channel_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
await p.subscribe(foo=self.message_handler)
assert await wait_for_message(p) is None
assert await r.publish("foo", "test message") == 1
assert await wait_for_message(p) is None
assert self.message == make_message("message", "foo", "test message")
await p.aclose()
async def test_channel_async_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
await p.subscribe(foo=self.async_message_handler)
assert await wait_for_message(p) is None
assert await r.publish("foo", "test message") == 1
assert await wait_for_message(p) is None
assert self.async_message == make_message("message", "foo", "test message")
await p.aclose()
async def test_channel_sync_async_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
await p.subscribe(foo=self.message_handler)
await p.subscribe(bar=self.async_message_handler)
assert await wait_for_message(p) is None
assert await r.publish("foo", "test message") == 1
assert await r.publish("bar", "test message 2") == 1
assert await wait_for_message(p) is None
assert self.message == make_message("message", "foo", "test message")
assert self.async_message == make_message("message", "bar", "test message 2")
await p.aclose()
@pytest.mark.onlynoncluster
async def test_pattern_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
await p.psubscribe(**{"f*": self.message_handler})
assert await wait_for_message(p) is None
assert await r.publish("foo", "test message") == 1
assert await wait_for_message(p) is None
assert self.message == make_message(
"pmessage", "foo", "test message", pattern="f*"
)
await p.aclose()
async def test_unicode_channel_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
channel = "uni" + chr(4456) + "code"
channels = {channel: self.message_handler}
await p.subscribe(**channels)
assert await wait_for_message(p) is None
assert await r.publish(channel, "test message") == 1
assert await wait_for_message(p) is None
assert self.message == make_message("message", channel, "test message")
await p.aclose()
@pytest.mark.onlynoncluster
# see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
# #known-limitations-with-pubsub
async def test_unicode_pattern_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = "uni" + chr(4456) + "*"
channel = "uni" + chr(4456) + "code"
await p.psubscribe(**{pattern: self.message_handler})
assert await wait_for_message(p) is None
assert await r.publish(channel, "test message") == 1
assert await wait_for_message(p) is None
assert self.message == make_message(
"pmessage", channel, "test message", pattern=pattern
)
await p.aclose()
async def test_get_message_without_subscribe(self, r: redis.Redis, pubsub):
p = pubsub
with pytest.raises(RuntimeError) as info:
await p.get_message()
expect = (
"connection not set: did you forget to call subscribe() or psubscribe()?"
)
assert expect in info.exconly()
@pytest.mark.onlynoncluster
| TestPubSubMessages |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 58415,
"end": 61021
} | class ____(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
3600: datetime.timedelta(hours=1),
'-999999999 00': datetime.timedelta(days=-999999999),
'999999999 00': datetime.timedelta(days=999999999),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'-1000000000 00': ['The number of days must be between -999999999 and 999999999.'],
'1000000000 00': ['The number of days must be between -999999999 and 999999999.'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
field = serializers.DurationField()
def test_invalid_format(self):
with pytest.raises(ValueError) as exc_info:
serializers.DurationField(format='unknown')
assert str(exc_info.value) == (
"Unknown duration format provided, got 'unknown'"
" while expecting 'django', 'iso-8601' or `None`."
)
with pytest.raises(TypeError) as exc_info:
serializers.DurationField(format=123)
assert str(exc_info.value) == (
"duration format must be either str or `None`, not int"
)
def test_invalid_format_in_config(self):
field = serializers.DurationField()
with override_settings(REST_FRAMEWORK={'DURATION_FORMAT': 'unknown'}):
with pytest.raises(ValueError) as exc_info:
field.to_representation(datetime.timedelta(days=1))
assert str(exc_info.value) == (
"Unknown duration format provided, got 'unknown'"
" while expecting 'django', 'iso-8601' or `None`."
)
with override_settings(REST_FRAMEWORK={'DURATION_FORMAT': 123}):
with pytest.raises(TypeError) as exc_info:
field.to_representation(datetime.timedelta(days=1))
assert str(exc_info.value) == (
"duration format must be either str or `None`, not int"
)
| TestDurationField |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin_ini/plugin_fail.py | {
"start": 5427,
"end": 5702
} | class ____(BaseModel, validate_by_name=True):
x: str = Field(..., alias=x_alias)
z: int
KwargsDynamicAliasModel(y='y', z=1)
# MYPY: error: Missing named argument "x" for "KwargsDynamicAliasModel" [call-arg]
KwargsDynamicAliasModel(x='y', z=1)
| KwargsDynamicAliasModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.