language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 19103,
"end": 19558
} | class ____:
def test_basic(self, xp):
a = xp.asarray([0.5, 0.3, 0.2])
xp_assert_close(windows.general_cosine(5, a),
xp.asarray([0.4, 0.3, 1, 0.3, 0.4], dtype=xp.float64))
a = xp.asarray([0.5, 0.3, 0.2])
xp_assert_close(windows.general_cosine(4, a, sym=False),
xp.asarray([0.4, 0.3, 1, 0.3], dtype=xp.float64))
@make_xp_test_case(windows.general_hamming)
| TestGeneralCosine |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modular_sam3_tracker.py | {
"start": 5486,
"end": 5556
} | class ____(Sam2TwoWayTransformer):
pass
| Sam3TrackerTwoWayTransformer |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_sha1.py | {
"start": 469,
"end": 1561
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_sha1"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def matches_sha1_regex(x):
return bool(re.match(SHA1_REGEX, str(x)))
return column.apply(lambda x: matches_sha1_regex(x) if x else False)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidSha1 |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/default_types.py | {
"start": 17570,
"end": 22455
} | class ____(trace.TraceType):
"""Represents a class annotated by attr.s."""
def __init__(self,
type_name: str,
attribute_names: PythonTuple[str],
attributes: PythonTuple[trace.TraceType],
placeholder_type: Optional[Type[Any]] = None):
self.named_attributes = NamedTuple(type_name, attribute_names, attributes)
self._placeholder_type = placeholder_type
@classmethod
def from_type_and_attributes(
cls, attrs_type: Any,
attributes: PythonTuple[trace.TraceType]) -> "Attrs":
return Attrs(attrs_type.__name__,
tuple(attr.name for attr in attrs_type.__attrs_attrs__),
attributes, attrs_type)
def is_subtype_of(self, other: trace.TraceType) -> bool:
if not isinstance(other, Attrs):
return False
return self.named_attributes.is_subtype_of(other.named_attributes)
def most_specific_common_supertype(
self, others: Sequence[trace.TraceType]) -> Optional["Attrs"]:
"""See base class."""
if not all(isinstance(other, Attrs) for other in others):
return None
supertyped_attributes = (
self.named_attributes.most_specific_common_supertype(
[other.named_attributes for other in others]
)
)
if supertyped_attributes is None:
return None
return Attrs(self.named_attributes.type_name,
self.named_attributes.attribute_names,
supertyped_attributes.attributes.components,
self._placeholder_type)
@classmethod
def experimental_type_proto(cls) -> Type[default_types_pb2.SerializedAttrs]:
return default_types_pb2.SerializedAttrs
@classmethod
def experimental_from_proto(
cls, proto: default_types_pb2.SerializedAttrs) -> "Attrs":
return Attrs(
proto.named_attributes.type_name,
tuple(proto.named_attributes.attribute_names),
Tuple.experimental_from_proto(
proto.named_attributes.attributes).components)
def experimental_as_proto(self) -> default_types_pb2.SerializedAttrs:
return default_types_pb2.SerializedAttrs(
named_attributes=self.named_attributes.experimental_as_proto())
def placeholder_value(self, placeholder_context) -> Any:
if self._placeholder_type is None:
# We don't need to trace after serialization so it is not needed but we
# can generate a placeholder type using the description if ever needed.
raise ValueError("Can not generate placeholder value for Attrs with"
" unspecified placeholder_type. Note: placeholder_type "
"is lost during serialization.")
attribute_placeholders = [
attribute.placeholder_value(placeholder_context)
for attribute in self.named_attributes.attributes.components
]
return self._placeholder_type(*attribute_placeholders)
def to_tensors(self, value: Any):
assert util.is_attrs(value)
flattened_values = []
for attribute_name, attribute_type in zip(
self.named_attributes.attribute_names,
self.named_attributes.attributes.components):
attribute_value = getattr(value, attribute_name)
flattened_values.extend(attribute_type.to_tensors(attribute_value))
return flattened_values
def from_tensors(self, tensors):
if self._placeholder_type is None:
raise ValueError("Packing serialized NamedTuples is not supported.")
return self._placeholder_type(
*[
c.from_tensors(tensors)
for c in self.named_attributes.attributes.components
]
)
def flatten(self) -> PythonList[trace.TraceType]:
flattened_types = []
for component in self.named_attributes.attributes.components:
flattened_types.extend(component.flatten())
return flattened_types
def cast(self, value: Any, casting_context) -> Any:
assert util.is_attrs(value)
attr_names = self.named_attributes.attribute_names
casted_values, was_casted = util.cast_and_return_whether_casted(
self.named_attributes.attributes.components,
[getattr(value, name) for name in attr_names],
casting_context,
)
if was_casted:
return self._placeholder_type(*casted_values)
else:
return value
def __hash__(self) -> int:
return hash(self.named_attributes)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, trace.TraceType):
return NotImplemented
if not isinstance(other, Attrs):
return False
return self.named_attributes == other.named_attributes
def __repr__(self) -> str:
name_component_zip = zip(
self.named_attributes.attribute_names,
self.named_attributes.attributes.components,
)
paired = [f"[{n!r}, {c!r}]" for n, c in name_component_zip]
return f"{self.named_attributes.type_name}[{', '.join(paired)}]"
| Attrs |
python | tensorflow__tensorflow | tensorflow/python/training/basic_loops_test.py | {
"start": 1153,
"end": 3076
} | class ____(test.TestCase):
def testBasicTrainLoop(self):
logdir = _test_dir("basic_train_loop")
# Counts the number of calls.
num_calls = [0]
def train_fn(unused_sess, sv, y, a):
num_calls[0] += 1
self.assertEqual("y", y)
self.assertEqual("A", a)
if num_calls[0] == 3:
sv.request_stop()
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir)
basic_loops.basic_train_loop(
sv, train_fn, args=(sv, "y"), kwargs={"a": "A"})
self.assertEqual(3, num_calls[0])
def testBasicTrainLoopExceptionAborts(self):
logdir = _test_dir("basic_train_loop_exception_aborts")
def train_fn(unused_sess):
train_fn.counter += 1
if train_fn.counter == 3:
raise RuntimeError("Failed")
# Function attribute use to count the number of calls.
train_fn.counter = 0
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir)
with self.assertRaisesRegex(RuntimeError, "Failed"):
basic_loops.basic_train_loop(sv, train_fn)
def testBasicTrainLoopRetryOnAborted(self):
logdir = _test_dir("basic_train_loop_exception_aborts")
class AbortAndRetry:
def __init__(self):
self.num_calls = 0
self.retries_left = 2
def train_fn(self, unused_sess):
self.num_calls += 1
if self.num_calls % 3 == 2:
self.retries_left -= 1
if self.retries_left > 0:
raise errors_impl.AbortedError(None, None, "Aborted here")
else:
raise RuntimeError("Failed Again")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir)
aar = AbortAndRetry()
with self.assertRaisesRegex(RuntimeError, "Failed Again"):
basic_loops.basic_train_loop(sv, aar.train_fn)
self.assertEqual(0, aar.retries_left)
if __name__ == "__main__":
test.main()
| BasicTrainLoopTest |
python | chroma-core__chroma | chromadb/telemetry/product/events.py | {
"start": 8381,
"end": 8665
} | class ____(ProductTelemetryEvent):
collection_uuid: str
delete_amount: int
def __init__(self, collection_uuid: str, delete_amount: int):
super().__init__()
self.collection_uuid = collection_uuid
self.delete_amount = delete_amount
| CollectionDeleteEvent |
python | pypa__warehouse | tests/unit/admin/views/test_banners.py | {
"start": 2424,
"end": 4813
} | class ____:
def test_serialize_form_and_banner(self, db_request):
banner = BannerFactory.create()
db_request.matchdict["banner_id"] = banner.id
result = views.edit_banner(db_request)
assert len(result) == 2
assert isinstance(result["form"], views.BannerForm)
assert result["form"].data["name"] == banner.name
assert result["banner"] == banner
def test_404_if_banner_does_not_exist(self, db_request):
db_request.matchdict["banner_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.edit_banner(db_request)
def test_update_banner(self, db_request, banner_data):
banner = BannerFactory.create(fa_icon="custom")
assert banner.is_live
form = views.BannerForm(MultiDict(), banner)
data = form.data.copy()
data["name"] = "New Name"
data["end"] = str(data["end"])
data.pop("fa_icon") # do not send fa icon within post data
db_request.matchdict["banner_id"] = banner.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
db_request.current_route_path = pretend.call_recorder(
lambda: f"/admin/banners/{banner.id}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.edit_banner(db_request)
db_banner = db_request.db.query(Banner).filter(Banner.id == banner.id).one()
assert resp.status_code == 303
assert resp.location == f"/admin/banners/{banner.id}/"
assert db_banner.name == "New Name"
assert db_banner.fa_icon == "custom" # keep previous value
assert db_request.session.flash.calls == [
pretend.call("Banner updated", queue="success")
]
def test_form_errors_if_invalid_post_data(self, db_request):
banner = BannerFactory.create()
form = views.BannerForm(MultiDict(), banner)
data = form.data.copy()
data["name"] = "New name"
data["end"] = "" # date is required
db_request.matchdict["banner_id"] = banner.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
result = views.edit_banner(db_request)
assert "end" in result["form"].errors
assert "New name" == result["form"].data["name"]
| TestEditBanner |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail_baseConfig.py | {
"start": 1169,
"end": 1308
} | class ____(BaseModel):
class Config:
from_attributes: Any = {} # not sensible, but should still be handled gracefully
| BadConfig1 |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 30162,
"end": 30780
} | class ____(BaseModel):
"""
Serializer for a xcom item.
"""
key: Annotated[str, Field(title="Key")]
timestamp: Annotated[datetime, Field(title="Timestamp")]
logical_date: Annotated[datetime | None, Field(title="Logical Date")] = None
map_index: Annotated[int, Field(title="Map Index")]
task_id: Annotated[str, Field(title="Task Id")]
dag_id: Annotated[str, Field(title="Dag Id")]
run_id: Annotated[str, Field(title="Run Id")]
dag_display_name: Annotated[str, Field(title="Dag Display Name")]
task_display_name: Annotated[str, Field(title="Task Display Name")]
| XComResponse |
python | pypa__setuptools | setuptools/_vendor/autocommand/autoparse.py | {
"start": 1289,
"end": 1396
} | class ____(AutocommandError):
'''kwarg Error: autocommand can't handle a **kwargs parameter'''
| KWArgError |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 59833,
"end": 60665
} | class ____:
"""Abstract base class for repository data."""
def __init__(self, name: Optional[str]) -> None:
self.name = name
@property
def _maybe_name(self) -> str:
"""Return the name if it exists, otherwise an empty string."""
return f"{self.name}: " if self.name else ""
def initialize(self, fetch: bool = True, git: MaybeExecutable = None) -> None:
return None
def update(self, git: MaybeExecutable = None, remote: str = "origin") -> None:
return None
def construct(
self, cache: spack.util.file_cache.FileCache, overrides: Optional[Dict[str, Any]] = None
) -> Dict[str, Union[Repo, Exception]]:
"""Construct Repo instances from the descriptor."""
raise RuntimeError("construct() must be implemented in subclasses")
| RepoDescriptor |
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 13408,
"end": 14975
} | class ____(AggregateDefinition):
def __post_init__(self) -> None:
validate_trace_metric_aggregate_arguments(self.arguments)
def resolve(
self,
alias: str,
search_type: constants.SearchType,
resolved_arguments: ResolvedArguments,
snuba_params: SnubaParams,
query_result_cache: dict[str, EAPResponse],
search_config: SearchResolverConfig,
) -> ResolvedAggregate:
if not isinstance(resolved_arguments[0], AttributeKey):
raise InvalidSearchQuery(
"Trace metric aggregates expect argument 0 to be of type AttributeArgumentDefinition"
)
resolved_attribute = resolved_arguments[0]
if self.attribute_resolver is not None:
resolved_attribute = self.attribute_resolver(resolved_attribute)
metric_name, metric_type, metric_unit = extract_trace_metric_aggregate_arguments(
resolved_arguments
)
return ResolvedTraceMetricAggregate(
public_alias=alias,
internal_name=self.internal_function,
search_type=search_type,
internal_type=self.internal_type,
processor=self.processor,
extrapolation_mode=resolve_extrapolation_mode(
search_config, self.extrapolation_mode_override
),
argument=resolved_attribute,
metric_name=metric_name,
metric_type=metric_type,
metric_unit=metric_unit,
)
@dataclass(kw_only=True)
| TraceMetricAggregateDefinition |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 8043,
"end": 10384
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `TestDetrFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = TestDetrFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.data.copy_(module.weight)
new_module.bias.data.copy_(module.bias)
new_module.running_mean.data.copy_(module.running_mean)
new_module.running_var.data.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
| TestDetrFrozenBatchNorm2d |
python | streamlit__streamlit | lib/tests/streamlit/commands/execution_control_test.py | {
"start": 959,
"end": 9637
} | class ____(unittest.TestCase):
def test_returns_empty_list_if_scope_is_app(self):
assert _new_fragment_id_queue(None, scope="app") == []
def test_raises_exception_if_no_fragment_id_queue(self):
ctx = MagicMock()
ctx.fragment_ids_this_run = []
with pytest.raises(StreamlitAPIException):
_new_fragment_id_queue(ctx, scope="fragment")
def test_asserts_if_curr_id_not_in_queue(self):
ctx = MagicMock()
ctx.fragment_ids_this_run = ["some_fragment_id"]
ctx.current_fragment_id = "some_other_fragment_id"
with pytest.raises(
RuntimeError,
match=r"Could not find current_fragment_id in fragment_id_queue. This should never happen.",
):
_new_fragment_id_queue(ctx, scope="fragment")
def test_drops_items_in_queue_until_curr_id(self):
ctx = MagicMock()
ctx.fragment_ids_this_run = [
"id1",
"id2",
"id3",
"curr_id",
"id4",
"id5",
]
ctx.current_fragment_id = "curr_id"
assert _new_fragment_id_queue(ctx, scope="fragment") == [
"curr_id",
"id4",
"id5",
]
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_rerun_is_fragment_scoped_rerun_flag_false(patched_get_script_run_ctx):
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
rerun(scope="app")
ctx.script_requests.request_rerun.assert_called_with(
RerunData(
query_string=ctx.query_string,
page_script_hash=ctx.page_script_hash,
fragment_id_queue=[],
is_fragment_scoped_rerun=False,
cached_message_hashes=ctx.cached_message_hashes,
context_info=ctx.context_info,
)
)
@patch(
"streamlit.commands.execution_control._new_fragment_id_queue",
MagicMock(return_value=["some_fragment_ids"]),
)
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_rerun_is_fragment_scoped_rerun_flag_true(patched_get_script_run_ctx):
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
rerun(scope="fragment")
ctx.script_requests.request_rerun.assert_called_with(
RerunData(
query_string=ctx.query_string,
page_script_hash=ctx.page_script_hash,
fragment_id_queue=["some_fragment_ids"],
is_fragment_scoped_rerun=True,
cached_message_hashes=ctx.cached_message_hashes,
context_info=ctx.context_info,
)
)
def test_st_rerun_invalid_scope_throws_error():
with pytest.raises(StreamlitAPIException):
rerun(scope="foo")
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_switch_page_context_info(patched_get_script_run_ctx):
"""Test that context_info is passed to RerunData in st.switch_page."""
ctx = MagicMock()
ctx.pages_manager = MagicMock() # Ensure pages_manager is present
ctx.script_requests = MagicMock()
ctx.main_script_path = "/some/path/your_app.py"
ctx.query_string = ""
ctx.page_script_hash = "some_hash" # This is for the current page, not the target
ctx.cached_message_hashes = MagicMock()
ctx.context_info = {"test_key": "test_value"} # Set a specific context_info
ctx.session_state = MagicMock()
query_params_cm = MagicMock()
mock_query_params = MagicMock()
query_params_cm.__enter__.return_value = mock_query_params
query_params_cm.__exit__.return_value = False
ctx.session_state.query_params.return_value = query_params_cm
patched_get_script_run_ctx.return_value = ctx
# Mock the StreamlitPage object and its _script_hash attribute
mock_page = MagicMock(spec=StreamlitPage)
mock_page._script_hash = "target_page_hash"
with patch(
"streamlit.commands.execution_control.get_main_script_directory",
return_value="/some/path",
):
switch_page(mock_page)
ctx.script_requests.request_rerun.assert_called_once()
call_args = ctx.script_requests.request_rerun.call_args[0][0]
assert isinstance(call_args, RerunData)
assert call_args.page_script_hash == "target_page_hash"
assert call_args.context_info == {"test_key": "test_value"}
mock_query_params.clear.assert_called_once_with()
mock_query_params.from_dict.assert_not_called()
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_switch_page_applies_query_params(patched_get_script_run_ctx):
"""Test that providing query_params sets them before rerunning."""
ctx = MagicMock()
ctx.query_string = ""
ctx.cached_message_hashes = set()
ctx.context_info = {"foo": "bar"}
ctx.script_requests = MagicMock()
ctx.session_state = MagicMock()
query_params_cm = MagicMock()
mock_query_params = MagicMock()
query_params_cm.__enter__.return_value = mock_query_params
query_params_cm.__exit__.return_value = False
ctx.session_state.query_params.return_value = query_params_cm
def _from_dict_side_effect(value):
assert value == {"team": "streamlit"}
ctx.query_string = "team=streamlit"
mock_query_params.from_dict.side_effect = _from_dict_side_effect
mocked_page = MagicMock(spec=StreamlitPage)
mocked_page._script_hash = "target_page_hash"
patched_get_script_run_ctx.return_value = ctx
switch_page(mocked_page, query_params={"team": "streamlit"})
mock_query_params.from_dict.assert_called_once_with({"team": "streamlit"})
mock_query_params.clear.assert_not_called()
ctx.script_requests.request_rerun.assert_called_once()
rerun_arg = ctx.script_requests.request_rerun.call_args[0][0]
assert isinstance(rerun_arg, RerunData)
assert rerun_arg.query_string == "team=streamlit"
assert rerun_arg.page_script_hash == "target_page_hash"
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_switch_page_applies_iterable_query_params(patched_get_script_run_ctx):
"""Test that tuple-based query_params are accepted."""
ctx = MagicMock()
ctx.query_string = ""
ctx.cached_message_hashes = set()
ctx.context_info = {}
ctx.script_requests = MagicMock()
ctx.session_state = MagicMock()
query_params_cm = MagicMock()
mock_query_params = MagicMock()
query_params_cm.__enter__.return_value = mock_query_params
query_params_cm.__exit__.return_value = False
ctx.session_state.query_params.return_value = query_params_cm
query_param_items = [
("foo", "bar"),
("stream", ["lit", "rocks"]),
]
def _from_dict_side_effect(value):
assert value == query_param_items
ctx.query_string = "foo=bar&stream=lit&stream=rocks"
mock_query_params.from_dict.side_effect = _from_dict_side_effect
mocked_page = MagicMock(spec=StreamlitPage)
mocked_page._script_hash = "target_page_hash"
patched_get_script_run_ctx.return_value = ctx
switch_page(mocked_page, query_params=query_param_items)
mock_query_params.from_dict.assert_called_once_with(query_param_items)
mock_query_params.clear.assert_not_called()
ctx.script_requests.request_rerun.assert_called_once()
rerun_arg = ctx.script_requests.request_rerun.call_args[0][0]
assert isinstance(rerun_arg, RerunData)
assert rerun_arg.query_string == "foo=bar&stream=lit&stream=rocks"
assert rerun_arg.page_script_hash == "target_page_hash"
@patch("streamlit.commands.execution_control.get_script_run_ctx")
def test_st_switch_page_rejects_invalid_query_params(patched_get_script_run_ctx):
"""Test that invalid query_params types raise a StreamlitAPIException."""
ctx = MagicMock()
ctx.session_state = MagicMock()
ctx.script_requests = MagicMock()
ctx.query_string = ""
ctx.cached_message_hashes = set()
ctx.context_info = {}
query_params_cm = MagicMock()
mock_query_params = MagicMock()
query_params_cm.__enter__.return_value = mock_query_params
query_params_cm.__exit__.return_value = False
ctx.session_state.query_params.return_value = query_params_cm
patched_get_script_run_ctx.return_value = ctx
mocked_page = MagicMock(spec=StreamlitPage)
mocked_page._script_hash = "target_page_hash"
with pytest.raises(StreamlitAPIException, match=r"`query_params` must be"):
switch_page(mocked_page, query_params="not valid") # type: ignore[arg-type]
ctx.script_requests.request_rerun.assert_not_called()
mock_query_params.clear.assert_not_called()
mock_query_params.from_dict.assert_not_called()
| NewFragmentIdQueueTest |
python | doocs__leetcode | solution/1000-1099/1021.Remove Outermost Parentheses/Solution2.py | {
"start": 0,
"end": 304
} | class ____:
def removeOuterParentheses(self, s: str) -> str:
ans = []
cnt = 0
for c in s:
if c == '(':
cnt += 1
if cnt > 1:
ans.append(c)
if c == ')':
cnt -= 1
return ''.join(ans)
| Solution |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/interfaces.py | {
"start": 7876,
"end": 8018
} | class ____(GxDatasourceWarning):
pass
BatchMetadata: TypeAlias = Dict[str, Any]
@pydantic_dc.dataclass(frozen=True)
| GxSerializationWarning |
python | pyca__cryptography | src/cryptography/hazmat/primitives/padding.py | {
"start": 423,
"end": 1018
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def update(self, data: utils.Buffer) -> bytes:
"""
Pads the provided bytes and returns any available data as bytes.
"""
@abc.abstractmethod
def finalize(self) -> bytes:
"""
Finalize the padding, returns bytes.
"""
def _byte_padding_check(block_size: int) -> None:
if not (0 <= block_size <= 2040):
raise ValueError("block_size must be in range(0, 2041).")
if block_size % 8 != 0:
raise ValueError("block_size must be a multiple of 8.")
| PaddingContext |
python | google__jax | jax/_src/source_info_util.py | {
"start": 2685,
"end": 3601
} | class ____:
stack: tuple[Scope | Transform, ...] = ()
def extend(self, name: str) -> NameStack:
return NameStack((*self.stack, Scope(name)))
def transform(self, transform_name: str) -> NameStack:
return NameStack((*self.stack, Transform(transform_name)))
def __getitem__(self, idx: slice) -> NameStack:
return NameStack(self.stack[idx])
def __len__(self):
return len(self.stack)
def __add__(self, other: NameStack) -> NameStack:
return NameStack(self.stack + other.stack)
def __radd__(self, other: NameStack) -> NameStack:
return NameStack(other.stack + self.stack)
def __str__(self) -> str:
scope: list[str] = []
for elem in self.stack[::-1]:
elem.wrap(scope)
return '/'.join(reversed(scope))
def new_name_stack(name: str = '') -> NameStack:
name_stack = NameStack()
if name:
name_stack = name_stack.extend(name)
return name_stack
| NameStack |
python | celery__celery | t/unit/worker/test_control.py | {
"start": 1985,
"end": 3287
} | class ____:
def test_stop(self):
parent = Mock()
g = gPidbox(parent)
stopped = g._node_stopped = Mock()
shutdown = g._node_shutdown = Mock()
close_chan = g._close_channel = Mock()
g.stop(parent)
shutdown.set.assert_called_with()
stopped.wait.assert_called_with()
close_chan.assert_called_with(parent)
assert g._node_stopped is None
assert g._node_shutdown is None
close_chan.reset()
g.stop(parent)
close_chan.assert_called_with(parent)
def test_resets(self):
parent = Mock()
g = gPidbox(parent)
g._resets = 100
g.reset()
assert g._resets == 101
def test_loop(self):
parent = Mock()
conn = self.app.connection_for_read()
parent.connection_for_read.return_value = conn
drain = conn.drain_events = Mock()
g = gPidbox(parent)
parent.connection = Mock()
do_reset = g._do_reset = Mock()
call_count = [0]
def se(*args, **kwargs):
if call_count[0] > 2:
g._node_shutdown.set()
g.reset()
call_count[0] += 1
drain.side_effect = se
g.loop(parent)
assert do_reset.call_count == 4
| test_Pidbox_green |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-zep/llama_index/vector_stores/zep/base.py | {
"start": 659,
"end": 11778
} | class ____(BasePydanticVectorStore):
"""
Zep Vector Store for storing and retrieving embeddings.
Zep supports both normalized and non-normalized embeddings. Cosine similarity is
used to compute distance and the returned score is normalized to be between 0 and 1.
Args:
collection_name (str): Name of the Zep collection in which to store embeddings.
api_url (str): URL of the Zep API.
api_key (str, optional): Key for the Zep API. Defaults to None.
collection_description (str, optional): Description of the collection.
Defaults to None.
collection_metadata (dict, optional): Metadata of the collection.
Defaults to None.
embedding_dimensions (int, optional): Dimensions of the embeddings.
Defaults to None.
is_auto_embedded (bool, optional): Whether the embeddings are auto-embedded.
Defaults to False.
Examples:
`pip install llama-index-vector-stores-zep`
```python
from llama_index.vector_stores.zep import ZepVectorStore
vector_store = ZepVectorStore(
api_url="<api_url>",
api_key="<api_key>",
collection_name="<unique_collection_name>", # Can either be an existing collection or a new one
embedding_dimensions=1536, # Optional, required if creating a new collection
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
_client: ZepClient = PrivateAttr()
_collection: DocumentCollection = PrivateAttr()
def __init__(
self,
collection_name: str,
api_url: str,
api_key: Optional[str] = None,
collection_description: Optional[str] = None,
collection_metadata: Optional[Dict[str, Any]] = None,
embedding_dimensions: Optional[int] = None,
is_auto_embedded: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__()
self._client = ZepClient(base_url=api_url, api_key=api_key)
collection: Union[DocumentCollection, None] = None
try:
collection = self._client.document.get_collection(name=collection_name)
except zep_python.NotFoundError:
if embedding_dimensions is None:
raise ValueError(
"embedding_dimensions must be specified if collection does not"
" exist"
)
logger.info(
f"Collection {collection_name} does not exist, "
f"will try creating one with dimensions={embedding_dimensions}"
)
collection = self._client.document.add_collection(
name=collection_name,
embedding_dimensions=embedding_dimensions,
is_auto_embedded=is_auto_embedded,
description=collection_description,
metadata=collection_metadata,
)
assert collection is not None
self._collection = collection
@classmethod
def class_name(cls) -> str:
return "ZepVectorStore"
@property
def client(self) -> Any:
"""Get client."""
return self._client
def _prepare_documents(
self, nodes: List[BaseNode]
) -> Tuple[List["ZepDocument"], List[str]]:
docs: List["ZepDocument"] = []
ids: List[str] = []
for node in nodes:
metadata_dict: Dict[str, Any] = node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
if len(node.get_content()) == 0:
raise ValueError("No content to add to Zep")
docs.append(
ZepDocument(
document_id=node.node_id,
content=node.get_content(metadata_mode=MetadataMode.NONE),
embedding=node.get_embedding(),
metadata=metadata_dict,
)
)
ids.append(node.node_id)
return docs, ids
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Add nodes to the collection.
Args:
nodes (List[BaseNode]): List of nodes with embeddings.
Returns:
List[str]: List of IDs of the added documents.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if self._collection.is_auto_embedded:
raise ValueError("Collection is auto embedded, cannot add embeddings")
docs, ids = self._prepare_documents(nodes)
self._collection.add_documents(docs)
return ids
async def async_add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Asynchronously add nodes to the collection.
Args:
nodes (List[BaseNode]): List of nodes with embeddings.
Returns:
List[str]: List of IDs of the added documents.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if self._collection.is_auto_embedded:
raise ValueError("Collection is auto embedded, cannot add embeddings")
docs, ids = self._prepare_documents(nodes)
await self._collection.aadd_documents(docs)
return ids
def delete(self, ref_doc_id: Optional[str] = None, **delete_kwargs: Any) -> None: # type: ignore
"""
Delete a document from the collection.
Args:
ref_doc_id (Optional[str]): ID of the document to delete.
Not currently supported.
delete_kwargs: Must contain "uuid" key with UUID of the document to delete.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if ref_doc_id and len(ref_doc_id) > 0:
raise NotImplementedError(
"Delete by ref_doc_id not yet implemented for Zep."
)
if "uuid" in delete_kwargs:
self._collection.delete_document(uuid=delete_kwargs["uuid"])
else:
raise ValueError("uuid must be specified")
async def adelete(
self, ref_doc_id: Optional[str] = None, **delete_kwargs: Any
) -> None: # type: ignore
"""
Asynchronously delete a document from the collection.
Args:
ref_doc_id (Optional[str]): ID of the document to delete.
Not currently supported.
delete_kwargs: Must contain "uuid" key with UUID of the document to delete.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if ref_doc_id and len(ref_doc_id) > 0:
raise NotImplementedError(
"Delete by ref_doc_id not yet implemented for Zep."
)
if "uuid" in delete_kwargs:
await self._collection.adelete_document(uuid=delete_kwargs["uuid"])
else:
raise ValueError("uuid must be specified")
def _parse_query_result(
self, results: List["ZepDocument"]
) -> VectorStoreQueryResult:
similarities: List[float] = []
ids: List[str] = []
nodes: List[TextNode] = []
for d in results:
node = metadata_dict_to_node(d.metadata or {})
node.set_content(d.content)
nodes.append(node)
if d.score is None:
d.score = 0.0
similarities.append(d.score)
if d.document_id is None:
d.document_id = ""
ids.append(d.document_id)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def _to_zep_filters(self, filters: MetadataFilters) -> Dict[str, Any]:
"""Convert filters to Zep filters. Filters are ANDed together."""
filter_conditions: List[Dict[str, Any]] = []
for f in filters.legacy_filters():
filter_conditions.append({"jsonpath": f'$[*] ? (@.{f.key} == "{f.value}")'})
return {"where": {"and": filter_conditions}}
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query the index for the top k most similar nodes to the given query.
Args:
query (VectorStoreQuery): Query object containing either a query string
or a query embedding.
Returns:
VectorStoreQueryResult: Result of the query, containing the most similar
nodes, their similarities, and their IDs.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if query.query_embedding is None and query.query_str is None:
raise ValueError("query must have one of query_str or query_embedding")
# If we have an embedding, we shouldn't use the query string
# Zep does not allow both to be set
if query.query_embedding:
query.query_str = None
metadata_filters = None
if query.filters is not None:
metadata_filters = self._to_zep_filters(query.filters)
results = self._collection.search(
text=query.query_str,
embedding=query.query_embedding,
metadata=metadata_filters,
limit=query.similarity_top_k,
)
return self._parse_query_result(results)
async def aquery(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Asynchronously query the index for the top k most similar nodes to the
given query.
Args:
query (VectorStoreQuery): Query object containing either a query string or
a query embedding.
Returns:
VectorStoreQueryResult: Result of the query, containing the most similar
nodes, their similarities, and their IDs.
"""
if not isinstance(self._collection, DocumentCollection):
raise ValueError("Collection not initialized")
if query.query_embedding is None and query.query_str is None:
raise ValueError("query must have one of query_str or query_embedding")
# If we have an embedding, we shouldn't use the query string
# Zep does not allow both to be set
if query.query_embedding:
query.query_str = None
metadata_filters = None
if query.filters is not None:
metadata_filters = self._to_zep_filters(query.filters)
results = await self._collection.asearch(
text=query.query_str,
embedding=query.query_embedding,
metadata=metadata_filters,
limit=query.similarity_top_k,
)
return self._parse_query_result(results)
| ZepVectorStore |
python | pypa__warehouse | warehouse/events/models.py | {
"start": 5073,
"end": 9005
} | class ____:
Event: typing.ClassVar[type]
@declared_attr
def events(cls): # noqa: N805
cls.Event = type(
f"{cls.__name__}Event",
(Event, db.Model),
dict(
__tablename__=f"{cls.__name__.lower()}_events",
__table_args__=(
Index(f"ix_{cls.__name__.lower()}_events_source_id", "source_id"),
),
source_id=mapped_column(
UUID(as_uuid=True),
ForeignKey(
f"{cls.__tablename__}.id",
deferrable=True,
initially="DEFERRED",
ondelete="CASCADE",
),
nullable=False,
),
source=orm.relationship(
cls,
back_populates="events",
order_by=f"desc({cls.__name__}Event.time)",
),
),
)
return orm.relationship(
cls.Event,
cascade="all, delete-orphan",
passive_deletes=True,
lazy="dynamic",
back_populates="source",
order_by=f"desc({cls.__name__}Event.time)",
)
def record_event(self, *, tag, request: Request, additional=None):
"""Records an Event record on the associated model."""
# Get-or-create a new IpAddress object
ip_address = request.ip_address
# Add `request.ip_address.geoip_info` data to `Event.additional`
if ip_address.geoip_info is not None:
additional = additional or {}
additional["geoip_info"] = ip_address.geoip_info
if user_agent := request.headers.get("User-Agent"):
try:
parsed_user_agent = linehaul_user_agent_parser.parse(user_agent)
if (
parsed_user_agent is not None
and parsed_user_agent.installer is not None
and parsed_user_agent.installer.name == "Browser"
):
parsed_user_agent = user_agent_parser.Parse(user_agent)
additional = additional or {}
additional["user_agent_info"] = {
"installer": "Browser",
# See https://github.com/pypi/linehaul-cloud-function/issues/203
"device": parsed_user_agent["device"]["family"], # noqa: E501
"os": parsed_user_agent["os"]["family"],
"user_agent": parsed_user_agent["user_agent"][
"family"
], # noqa: E501
}
else:
additional = additional or {}
additional["user_agent_info"] = {
"installer": (
parsed_user_agent.installer.name
if parsed_user_agent and parsed_user_agent.installer
else None
),
"implementation": (
parsed_user_agent.implementation.name
if parsed_user_agent and parsed_user_agent.implementation
else None
),
"system": (
parsed_user_agent.system.name
if parsed_user_agent and parsed_user_agent.system
else None
),
}
except linehaul_user_agent_parser.UnknownUserAgentError:
pass
event = self.Event(
source=self,
tag=tag,
ip_address=ip_address,
additional=additional,
)
request.db.add(event)
return event
| HasEvents |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 6460,
"end": 7270
} | class ____:
EXTERNAL_TASK_SENSOR = "ExternalTaskSensor"
S3_SENSOR = "S3KeySensor"
@classmethod
def get_supported_sensors(cls):
return list(cls.__dict__.values())
def run_id_creator(val):
# join `[dag-id,run-id]` of airflow dag.
return hashlib.md5("-".join([str(x) for x in val]).encode("utf-8")).hexdigest()[
:RUN_HASH_ID_LEN
]
def task_id_creator(val):
# join `[dag-id,run-id]` of airflow dag.
return hashlib.md5("-".join([str(x) for x in val]).encode("utf-8")).hexdigest()[
:TASK_ID_HASH_LEN
]
def id_creator(val, hash_len):
# join `[dag-id,run-id]` of airflow dag.
return hashlib.md5("-".join([str(x) for x in val]).encode("utf-8")).hexdigest()[
:hash_len
]
def json_dump(val):
return json.dumps(val)
| SensorNames |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 25732,
"end": 26747
} | class ____(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving])
def test_init_arg_with_runtime_change(tmp_path, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed."""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
logger=TensorBoardLogger(tmp_path),
enable_progress_bar=False,
enable_checkpointing=False,
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get("running_arg") == 123
| RuntimeParamChangeModelSaving |
python | getsentry__sentry | tests/acceptance/test_emails.py | {
"start": 2757,
"end": 3853
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
create_default_projects()
# This email address is required to match FIXTURES.
self.user = self.create_user("foo@example.com")
self.login_as(self.user)
def test_emails(self) -> None:
for url, name in EMAILS:
# HTML output is captured as a snapshot
self.browser.get(build_url(url, "html"))
self.browser.wait_until("#preview")
# Text output is asserted against static fixture files
self.browser.get(build_url(url, "txt"))
self.browser.wait_until("#preview")
elem = self.browser.find_element(by=By.CSS_SELECTOR, value="#preview pre")
text_src = elem.get_attribute("innerHTML")
# Avoid relying on IDs as this can cause flakey tests
text_src = redact_ips(redact_ids(replace_amp(text_src)))
fixture_src = read_txt_email_fixture(name)
assert redact_notification_uuid(fixture_src) == redact_notification_uuid(text_src)
| EmailTestCase |
python | viewflow__viewflow | viewflow/jsonstore.py | {
"start": 6699,
"end": 6763
} | class ____(JSONFieldMixin, fields.FloatField):
pass
| FloatField |
python | PyCQA__pylint | pylint/checkers/deprecated.py | {
"start": 864,
"end": 11549
} | class ____(BaseChecker):
"""A mixin implementing logic for checking deprecated symbols.
A class implementing mixin must define "deprecated-method" Message.
"""
DEPRECATED_ATTRIBUTE_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4906": (
"Using deprecated attribute %r",
"deprecated-attribute",
"The attribute is marked as deprecated and will be removed in the future.",
{"shared": True},
),
}
DEPRECATED_MODULE_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4901": (
"Deprecated module %r",
"deprecated-module",
"A module marked as deprecated is imported.",
{"old_names": [("W0402", "old-deprecated-module")], "shared": True},
),
}
DEPRECATED_METHOD_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4902": (
"Using deprecated method %s()",
"deprecated-method",
"The method is marked as deprecated and will be removed in the future.",
{"old_names": [("W1505", "old-deprecated-method")], "shared": True},
),
}
DEPRECATED_ARGUMENT_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4903": (
"Using deprecated argument %s of method %s()",
"deprecated-argument",
"The argument is marked as deprecated and will be removed in the future.",
{"old_names": [("W1511", "old-deprecated-argument")], "shared": True},
),
}
DEPRECATED_CLASS_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4904": (
"Using deprecated class %s of module %s",
"deprecated-class",
"The class is marked as deprecated and will be removed in the future.",
{"old_names": [("W1512", "old-deprecated-class")], "shared": True},
),
}
DEPRECATED_DECORATOR_MESSAGE: dict[str, MessageDefinitionTuple] = {
"W4905": (
"Using deprecated decorator %s()",
"deprecated-decorator",
"The decorator is marked as deprecated and will be removed in the future.",
{"old_names": [("W1513", "old-deprecated-decorator")], "shared": True},
),
}
@utils.only_required_for_messages("deprecated-attribute")
def visit_attribute(self, node: nodes.Attribute) -> None:
"""Called when an `Attribute` node is visited."""
self.check_deprecated_attribute(node)
@utils.only_required_for_messages(
"deprecated-method",
"deprecated-argument",
"deprecated-class",
"deprecated-module",
)
def visit_call(self, node: nodes.Call) -> None:
"""Called when a :class:`nodes.Call` node is visited."""
self.check_deprecated_class_in_call(node)
for inferred in infer_all(node.func):
# Calling entry point for deprecation check logic.
self.check_deprecated_method(node, inferred)
if (
isinstance(inferred, nodes.FunctionDef)
and inferred.qname() == "builtins.__import__"
and len(node.args) == 1
and (mod_path_node := utils.safe_infer(node.args[0]))
and isinstance(mod_path_node, nodes.Const)
):
self.check_deprecated_module(node, mod_path_node.value)
@utils.only_required_for_messages(
"deprecated-module",
"deprecated-class",
)
def visit_import(self, node: nodes.Import) -> None:
"""Triggered when an import statement is seen."""
for name in (name for name, _ in node.names):
self.check_deprecated_module(node, name)
if "." in name:
# Checking deprecation for import module with class
mod_name, class_name = name.split(".", 1)
self.check_deprecated_class(node, mod_name, (class_name,))
def deprecated_decorators(self) -> Iterable[str]:
"""Callback returning the deprecated decorators.
Returns:
collections.abc.Container of deprecated decorator names.
"""
return ()
@utils.only_required_for_messages("deprecated-decorator")
def visit_decorators(self, node: nodes.Decorators) -> None:
"""Triggered when a decorator statement is seen."""
children = list(node.get_children())
if not children:
return
if isinstance(children[0], nodes.Call):
inferred = safe_infer(children[0].func)
else:
inferred = safe_infer(children[0])
if not isinstance(inferred, (nodes.ClassDef, nodes.FunctionDef)):
return
qname = inferred.qname()
if qname in self.deprecated_decorators():
self.add_message("deprecated-decorator", node=node, args=qname)
@utils.only_required_for_messages("deprecated-module", "deprecated-class")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""Triggered when a from statement is seen."""
basename = get_import_name(node, node.modname)
assert basename is not None, "Module name should not be None"
self.check_deprecated_module(node, basename)
class_names = (name for name, _ in node.names)
self.check_deprecated_class(node, basename, class_names)
def deprecated_methods(self) -> Container[str]:
"""Callback returning the deprecated methods/functions.
Returns:
collections.abc.Container of deprecated function/method names.
"""
return ()
def deprecated_arguments(self, method: str) -> Iterable[tuple[int | None, str]]:
"""Callback returning the deprecated arguments of method/function.
Args:
method (str): name of function/method checked for deprecated arguments
Returns:
collections.abc.Iterable in form:
((POSITION1, PARAM1), (POSITION2: PARAM2) ...)
where
* POSITIONX - position of deprecated argument PARAMX in function definition.
If argument is keyword-only, POSITIONX should be None.
* PARAMX - name of the deprecated argument.
E.g. suppose function:
.. code-block:: python
def bar(arg1, arg2, arg3, arg4, arg5='spam')
with deprecated arguments `arg2` and `arg4`. `deprecated_arguments` should return:
.. code-block:: python
((1, 'arg2'), (3, 'arg4'))
"""
# pylint: disable=unused-argument
return ()
def deprecated_modules(self) -> Iterable[str]:
"""Callback returning the deprecated modules.
Returns:
collections.abc.Container of deprecated module names.
"""
return ()
def deprecated_classes(self, module: str) -> Iterable[str]:
"""Callback returning the deprecated classes of module.
Args:
module (str): name of module checked for deprecated classes
Returns:
collections.abc.Container of deprecated class names.
"""
# pylint: disable=unused-argument
return ()
def deprecated_attributes(self) -> Iterable[str]:
"""Callback returning the deprecated attributes."""
return ()
def check_deprecated_attribute(self, node: nodes.Attribute) -> None:
"""Checks if the attribute is deprecated."""
inferred_expr = safe_infer(node.expr)
if not isinstance(inferred_expr, (nodes.ClassDef, Instance, nodes.Module)):
return
attribute_qname = ".".join((inferred_expr.qname(), node.attrname))
for deprecated_name in self.deprecated_attributes():
if attribute_qname == deprecated_name:
self.add_message(
"deprecated-attribute",
node=node,
args=(attribute_qname,),
confidence=INFERENCE,
)
def check_deprecated_module(self, node: nodes.Import, mod_path: str | None) -> None:
"""Checks if the module is deprecated."""
for mod_name in self.deprecated_modules():
if mod_path == mod_name or (
mod_path and mod_path.startswith(mod_name + ".")
):
self.add_message("deprecated-module", node=node, args=mod_path)
def check_deprecated_method(self, node: nodes.Call, inferred: nodes.NodeNG) -> None:
"""Executes the checker for the given node.
This method should be called from the checker implementing this mixin.
"""
# Reject nodes which aren't of interest to us.
if not isinstance(inferred, ACCEPTABLE_NODES):
return
match node.func:
case nodes.Attribute(attrname=func_name) | nodes.Name(name=func_name):
pass
case _:
# Not interested in other nodes.
return
qnames = {inferred.qname(), func_name}
if any(name in self.deprecated_methods() for name in qnames):
self.add_message("deprecated-method", node=node, args=(func_name,))
return
num_of_args = len(node.args)
kwargs = {kw.arg for kw in node.keywords} if node.keywords else {}
deprecated_arguments = (self.deprecated_arguments(qn) for qn in qnames)
for position, arg_name in chain(*deprecated_arguments):
if arg_name in kwargs:
# function was called with deprecated argument as keyword argument
self.add_message(
"deprecated-argument", node=node, args=(arg_name, func_name)
)
elif position is not None and position < num_of_args:
# function was called with deprecated argument as positional argument
self.add_message(
"deprecated-argument", node=node, args=(arg_name, func_name)
)
def check_deprecated_class(
self, node: nodes.NodeNG, mod_name: str, class_names: Iterable[str]
) -> None:
"""Checks if the class is deprecated."""
for class_name in class_names:
if class_name in self.deprecated_classes(mod_name):
self.add_message(
"deprecated-class", node=node, args=(class_name, mod_name)
)
def check_deprecated_class_in_call(self, node: nodes.Call) -> None:
"""Checks if call the deprecated class."""
match node.func:
case nodes.Attribute(expr=nodes.Name(name=mod_name), attrname=class_name):
self.check_deprecated_class(node, mod_name, (class_name,))
| DeprecatedMixin |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-divide-array-into-subarrays.py | {
"start": 3880,
"end": 5348
} | class ____(object):
def __init__(self):
self.__skiplist = SkipList()
def add(self, k, m):
self.__skiplist.add([k, m, 0])
z = self.__skiplist.find([k, m, 0])
x = y = z
z = z.nexts[0]
while self.__intersect(y, z):
z = self.__skiplist.remove(z)
if x != self.__skiplist.begin():
x = x.prevs[0]
if self.__intersect(x, y):
y = self.__skiplist.remove(y)
self.__intersect(x, y)
y = x
while y != self.__skiplist.begin():
x = x.prevs[0]
if x.val[2] < y.val[2]:
break
y = self.__skiplist.remove(y)
self.__intersect(x, y)
y = x
def query(self, x):
it = self.__skiplist.lower_bound(x, cmp=lambda x, y: x[2] < y)
return it.val[0]*x + it.val[1]
def __intersect(self, x, y):
if y == self.__skiplist.end():
x.val[2] = float("inf")
return False
if x.val[0] == y.val[0]:
x.val[2] = float("inf") if x.val[1] > y.val[1] else float("-inf")
else:
x.val[2] = (y.val[1]-x.val[1])//(x.val[0]-y.val[0])
return x.val[2] >= y.val[2]
def __iter__(self):
return iter(self.__skiplist)
def __len__(self):
return len(self.__skiplist)
def __str__(self):
return str(self.__skiplist)
# prefix sum, dp, convex hull trick
| LineContainer |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 2174,
"end": 2553
} | class ____(BaseModel):
type: Literal["ConstantBackoffStrategy"]
backoff_time_in_seconds: Union[float, str] = Field(
...,
description="Backoff time in seconds.",
examples=[30, 30.5, "{{ config['backoff_time'] }}"],
title="Backoff Time",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| ConstantBackoffStrategy |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_facets_performance.py | {
"start": 5006,
"end": 8779
} | class ____(
OrganizationEventsFacetsPerformanceEndpointBase
):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params, aggregate_column, filter_query = self._setup(request, organization)
except NoProjects:
return Response([])
tag_key = request.GET.get("tagKey")
num_buckets_per_key_s = request.GET.get("numBucketsPerKey")
per_page_s = request.GET.get("per_page", DEFAULT_TAG_KEY_LIMIT)
if not num_buckets_per_key_s:
raise ParseError(
detail="'numBucketsPerKey' must be provided for the performance histogram."
)
try:
per_page = int(per_page_s)
num_buckets_per_key = int(num_buckets_per_key_s)
except ValueError:
raise ParseError(detail="Bucket and tag key per_pages must be numeric.")
if per_page * num_buckets_per_key > 500:
raise ParseError(
detail="The number of total buckets ('per_page' * 'numBucketsPerKey') cannot exceed 500"
)
if not tag_key:
raise ParseError(detail="'tagKey' must be provided when using histograms.")
if tag_key in TAG_ALIASES:
tag_key = TAG_ALIASES[tag_key]
def data_fn(offset, limit, raw_limit):
with sentry_sdk.start_span(op="discover.endpoint", name="discover_query"):
referrer = "api.organization-events-facets-performance-histogram"
top_tags = query_top_tags(
tag_key=tag_key,
limit=limit,
filter_query=filter_query,
aggregate_column=aggregate_column,
snuba_params=snuba_params,
orderby=self.get_orderby(request),
offset=offset,
referrer=referrer,
)
if not top_tags:
return {"tags": [], "histogram": {"data": []}}
# Only pass exactly the number of tags so histogram fetches correct number of rows
histogram_top_tags = top_tags[0:raw_limit]
histogram = query_facet_performance_key_histogram(
top_tags=histogram_top_tags,
tag_key=tag_key,
filter_query=filter_query,
aggregate_column=aggregate_column,
referrer=referrer,
snuba_params=snuba_params,
limit=raw_limit,
num_buckets_per_key=num_buckets_per_key,
)
if not histogram:
return {"tags": top_tags, "histogram": {"data": []}}
for row in histogram["data"]:
row["tags_key"] = tagstore.backend.get_standardized_key(row["tags_key"])
return {"tags": top_tags, "histogram": histogram}
def on_results(data):
return {
"tags": self.handle_results_with_meta(
request, organization, snuba_params.project_ids, {"data": data["tags"]}
),
"histogram": self.handle_results_with_meta(
request, organization, snuba_params.project_ids, data["histogram"]
),
}
with handle_query_errors():
return self.paginate(
request=request,
paginator=HistogramPaginator(data_fn=data_fn),
on_results=on_results,
default_per_page=DEFAULT_TAG_KEY_LIMIT,
max_per_page=50,
)
| OrganizationEventsFacetsPerformanceHistogramEndpoint |
python | automl__auto-sklearn | autosklearn/util/progress_bar.py | {
"start": 144,
"end": 3024
} | class ____(Thread):
"""A Thread that displays a tqdm progress bar in the console.
Treat this class as an ordinary thread. So to display a progress bar,
call start() on an instance of this class. To wait for the thread to
terminate call join(), which will max out the progress bar,
therefore terminate this thread immediately.
Parameters
----------
total : int
The total amount that should be reached by the progress bar once it finishes.
update_interval : float, default=1.0
Specifies how frequently the progress bar is updated (in seconds).
disable : bool, default=False
Turns on or off the progress bar. If True, this thread does not get
initialized and won't be started if start() is called.
tqdm_kwargs : Any, optional
Keyword arguments that are passed into tqdm's constructor. Refer to:
`tqdm <https://tqdm.github.io/docs/tqdm/>`_ for a list of parameters that
tqdm accepts. Note that 'postfix' cannot be specified in the kwargs since it is
already passed into tqdm by this class.
Examples
--------
.. code:: python
progress_bar = ProgressBar(
total=10,
desc="Executing code that runs for 10 seconds",
colour="green",
)
# colour is a tqdm parameter passed as a tqdm_kwargs
try:
progress_bar.start()
# some code that runs for 10 seconds
except SomeException:
# something went wrong
finally:
progress_bar.join()
# perform some cleanup
"""
def __init__(
self,
total: int,
update_interval: float = 1.0,
disable: bool = False,
**tqdm_kwargs: Any,
):
self.disable = disable
if not disable:
super().__init__(name="_progressbar_")
self.total = total
self.update_interval = update_interval
self.terminated: bool = False
self.tqdm_kwargs = tqdm_kwargs
def start(self) -> None:
"""Start a new thread that calls the run() method."""
if not self.disable:
super().start()
def run(self) -> None:
"""Display a tqdm progress bar in the console."""
if not self.disable:
for _ in trange(
self.total,
postfix=f"The total time budget for this task is "
f"{datetime.timedelta(seconds=self.total)}",
**self.tqdm_kwargs,
):
if not self.terminated:
time.sleep(self.update_interval)
def join(self, timeout: float | None = None) -> None:
"""Maxes out the progress bar and thereby terminating this thread."""
if not self.disable:
self.terminated = True
super().join(timeout)
| ProgressBar |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 4412,
"end": 4489
} | class ____(OAuth2Error):
error = 'missing_token_type'
| MissingTokenTypeError |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 8248,
"end": 8423
} | class ____(PrefectSignal):
"""
Raised when the API sends an 'ABORT' instruction during state proposal.
Indicates that the run should exit immediately.
"""
| Abort |
python | explosion__spaCy | spacy/lang/sq/__init__.py | {
"start": 152,
"end": 251
} | class ____(Language):
lang = "sq"
Defaults = AlbanianDefaults
__all__ = ["Albanian"]
| Albanian |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass5.py | {
"start": 115,
"end": 137
} | class ____:
a: int
| A |
python | pyparsing__pyparsing | pyparsing/util.py | {
"start": 3908,
"end": 5001
} | class ____:
"""
A memoizing mapping that retains `capacity` deleted items
The memo tracks retained items by their access order; once `capacity` items
are retained, the least recently used item is discarded.
"""
def __init__(self, capacity):
self._capacity = capacity
self._active = {}
self._memory = {}
def __getitem__(self, key):
try:
return self._active[key]
except KeyError:
self._memory[key] = self._memory.pop(key)
return self._memory[key]
def __setitem__(self, key, value):
self._memory.pop(key, None)
self._active[key] = value
def __delitem__(self, key):
try:
value = self._active.pop(key)
except KeyError:
pass
else:
oldest_keys = list(self._memory)[: -(self._capacity + 1)]
for key_to_delete in oldest_keys:
self._memory.pop(key_to_delete)
self._memory[key] = value
def clear(self):
self._active.clear()
self._memory.clear()
| LRUMemo |
python | python__mypy | mypy/semanal_pass1.py | {
"start": 538,
"end": 5584
} | class ____(TraverserVisitor):
"""Analyze reachability of blocks and imports and other local things.
This runs before semantic analysis, so names have not been bound. Imports are
also not resolved yet, so we can only access the current module.
This determines static reachability of blocks and imports due to version and
platform checks, among others.
The main entry point is 'visit_file'.
Reachability of imports needs to be determined very early in the build since
this affects which modules will ultimately be processed.
Consider this example:
import sys
def do_stuff() -> None:
if sys.version_info >= (3, 10):
import xyz # Only available in Python 3.10+
xyz.whatever()
...
The block containing 'import xyz' is unreachable in Python 3 mode. The import
shouldn't be processed in Python 3 mode, even if the module happens to exist.
"""
def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) -> None:
self.platform = options.platform
self.cur_mod_id = mod_id
self.cur_mod_node = file
self.options = options
self.is_global_scope = True
self.skipped_lines: set[int] = set()
for i, defn in enumerate(file.defs):
defn.accept(self)
if isinstance(defn, AssertStmt) and assert_will_always_fail(defn, options):
# We've encountered an assert that's always false,
# e.g. assert sys.platform == 'lol'. Truncate the
# list of statements. This mutates file.defs too.
if i < len(file.defs) - 1:
next_def, last = file.defs[i + 1], file.defs[-1]
if last.end_line is not None:
# We are on a Python version recent enough to support end lines.
self.skipped_lines |= set(range(next_def.line, last.end_line + 1))
file.imports = [
i for i in file.imports if (i.line, i.column) <= (defn.line, defn.column)
]
del file.defs[i + 1 :]
break
file.skipped_lines = self.skipped_lines
def visit_func_def(self, node: FuncDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_func_def(node)
self.is_global_scope = old_global_scope
file_node = self.cur_mod_node
if (
self.is_global_scope
and file_node.is_stub
and node.name == "__getattr__"
and file_node.is_package_init_file()
):
# __init__.pyi with __getattr__ means that any submodules are assumed
# to exist, even if there is no stub. Note that we can't verify that the
# return type is compatible, since we haven't bound types yet.
file_node.is_partial_stub_package = True
def visit_class_def(self, node: ClassDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_class_def(node)
self.is_global_scope = old_global_scope
def visit_import_from(self, node: ImportFrom) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_from(node)
def visit_import_all(self, node: ImportAll) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_all(node)
def visit_import(self, node: Import) -> None:
node.is_top_level = self.is_global_scope
super().visit_import(node)
def visit_if_stmt(self, s: IfStmt) -> None:
infer_reachability_of_if_statement(s, self.options)
for expr in s.expr:
expr.accept(self)
for node in s.body:
node.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
if b.end_line is not None:
# We are on a Python version recent enough to support end lines.
self.skipped_lines |= set(range(b.line, b.end_line + 1))
return
super().visit_block(b)
def visit_match_stmt(self, s: MatchStmt) -> None:
infer_reachability_of_match_statement(s, self.options)
for guard in s.guards:
if guard is not None:
guard.accept(self)
for body in s.bodies:
body.accept(self)
# The remaining methods are an optimization: don't visit nested expressions
# of common statements, since they can have no effect.
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
pass
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
pass
def visit_return_stmt(self, s: ReturnStmt) -> None:
pass
def visit_for_stmt(self, s: ForStmt) -> None:
s.body.accept(self)
if s.else_body is not None:
s.else_body.accept(self)
| SemanticAnalyzerPreAnalysis |
python | MongoEngine__mongoengine | mongoengine/base/fields.py | {
"start": 21897,
"end": 22677
} | class ____(BaseField):
"""A field wrapper around MongoDB's ObjectIds."""
def to_python(self, value):
try:
if not isinstance(value, ObjectId):
value = ObjectId(value)
except Exception:
pass
return value
def to_mongo(self, value):
if isinstance(value, ObjectId):
return value
try:
return ObjectId(str(value))
except Exception as e:
self.error(str(e))
def prepare_query_value(self, op, value):
if value is None:
return value
return self.to_mongo(value)
def validate(self, value):
try:
ObjectId(str(value))
except Exception:
self.error("Invalid ObjectID")
| ObjectIdField |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/cursor.py | {
"start": 42013,
"end": 42699
} | class ____(NoCursorFetchStrategy):
"""Cursor strategy for a DML result that has no open cursor.
This is a result set that does not return rows, i.e. for an INSERT,
UPDATE, DELETE that does not include RETURNING.
"""
__slots__ = ()
def _non_result(
self,
result: CursorResult[Unpack[TupleAny]],
default: Any,
err: Optional[BaseException] = None,
) -> Any:
# we only expect to have a _NoResultMetaData() here right now.
assert not result._metadata.returns_rows
result._metadata._we_dont_return_rows(err) # type: ignore[union-attr]
_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
| NoCursorDMLFetchStrategy |
python | numba__numba | numba/tests/doc_examples/test_structref_usage.py | {
"start": 501,
"end": 1022
} | class ____(types.StructRef):
def preprocess_fields(self, fields):
# This method is called by the type constructor for additional
# preprocessing on the fields.
# Here, we don't want the struct to take Literal types.
return tuple((name, types.unliteral(typ)) for name, typ in fields)
# Define a Python type that can be use as a proxy to the StructRef
# allocated inside Numba. Users can construct the StructRef via
# the constructor for this type in python code and jit-code.
| MyStructType |
python | pytorch__pytorch | torch/utils/data/datapipes/datapipe.py | {
"start": 974,
"end": 1419
} | class ____(list[_T]):
def __init__(self, items: Iterable[_T]) -> None:
items = list(items)
super().__init__(items)
self.items = items
def as_str(self, indent: str = "") -> str:
return indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
def __iter__(self) -> Iterator[_T]:
yield from super().__iter__()
def raw_iterator(self) -> Iterator[_T]:
yield from self.items
| DataChunk |
python | pandas-dev__pandas | pandas/tests/io/formats/test_console.py | {
"start": 83,
"end": 2435
} | class ____:
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
side effect should be an exception that will be raised.
"""
def __init__(self, encoding) -> None:
super().__init__()
self.val = encoding
@property
def encoding(self):
return self.raise_or_return(self.val)
@staticmethod
def raise_or_return(val):
if isinstance(val, str):
return val
else:
raise val
@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]])
def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
context.setattr(f"sys.{empty}", MockEncoding(""))
context.setattr(f"sys.{filled}", MockEncoding(filled))
assert detect_console_encoding() == filled
@pytest.mark.parametrize("encoding", [AttributeError, OSError, "ascii"])
def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
# GH 21552
with monkeypatch.context() as context:
context.setattr("locale.getpreferredencoding", lambda: "foo")
context.setattr("sys.stdout", MockEncoding(encoding))
assert detect_console_encoding() == "foo"
@pytest.mark.parametrize(
"std,locale",
[
["ascii", "ascii"],
["ascii", locale.Error],
[AttributeError, "ascii"],
[AttributeError, locale.Error],
[OSError, "ascii"],
[OSError, locale.Error],
],
)
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
# When both the stdout/stdin encoding and locale preferred encoding checks
# fail (or return 'ascii', we should default to the sys default encoding.
# GH 21552
with monkeypatch.context() as context:
context.setattr(
"locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale)
)
context.setattr("sys.stdout", MockEncoding(std))
context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding")
assert detect_console_encoding() == "sysDefaultEncoding"
| MockEncoding |
python | google__jax | tests/pallas/tpu_all_gather_test.py | {
"start": 2150,
"end": 4469
} | class ____(jtu.JaxTestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Need TPU devices")
if not jtu.is_device_tpu(version=5, variant="e"):
# TODO(sharadmv,apaszke): expand support to more versions
self.skipTest("Currently only supported on TPU v5e")
super().setUp()
@hp.given(hps.booleans(), _array_shapes(), _array_dtypes())
def test_all_gather_1d_mesh(self, is_vmem, shape, dtype):
if jax.device_count() < 2:
self.skipTest("Need more devices")
memory_space = pltpu.VMEM if is_vmem else pltpu.ANY
mesh_shape = (jax.device_count(),)
mesh = jax.sharding.Mesh(
mesh_utils.create_device_mesh(mesh_shape, jax.devices()), ["x"]
)
leading, *rest = shape
shape = (mesh.shape["x"] * leading, *rest)
x = random.normal(random.key(0), shape, dtype=jnp.float32).astype(dtype)
x_sharded = jax.device_put(x, jax.sharding.NamedSharding(mesh, P("x")))
y = all_gather.all_gather(x_sharded, mesh=mesh, axis_name="x",
memory_space=memory_space)
np.testing.assert_array_equal(y, x)
@hp.given(hps.booleans(), _array_shapes(), _array_dtypes(),
hps.sampled_from(["x", "y"]))
def test_all_gather_2d_mesh(self, is_vmem, shape, dtype,
axis_name):
if jax.device_count() < 2:
self.skipTest("Need more devices")
if jax.device_count() % 2:
self.skipTest("Need an even number of devices")
memory_space = pltpu.VMEM if is_vmem else pltpu.ANY
mesh_shape = (2, jax.device_count() // 2)
mesh = jax.sharding.Mesh(
mesh_utils.create_device_mesh(mesh_shape, jax.devices()), ["x", "y"]
)
if axis_name == "x":
sharding = jax.sharding.NamedSharding(mesh, P("x", None))
else:
sharding = jax.sharding.NamedSharding(mesh, P("y", None))
leading, *rest = shape
shape = (mesh.shape[axis_name] * leading, *rest)
x = random.normal(random.key(0), shape, dtype=jnp.float32).astype(dtype)
x_sharded = jax.device_put(x, sharding)
y = all_gather.all_gather(x_sharded, mesh=mesh, axis_name=axis_name,
memory_space=memory_space)
np.testing.assert_array_equal(y, x)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| AllGatherTest |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 84268,
"end": 87003
} | class ____(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying `Affine2DBase` *a* then `Affine2DBase` *b*.
You will generally not call this constructor directly but write ``a +
b`` instead, which will automatically choose the best kind of composite
transform instance to create.
"""
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
raise ValueError("The output dimension of 'a' must be equal to "
"the input dimensions of 'b'")
self.input_dims = a.input_dims
self.output_dims = b.output_dims
super().__init__(**kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for left, right in self._a._iter_break_from_left_to_right():
yield left, right + self._b
for left, right in self._b._iter_break_from_left_to_right():
yield self._a + left, right
__str__ = _make_str_method("_a", "_b")
def get_matrix(self):
# docstring inherited
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
| CompositeAffine2D |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 92480,
"end": 93048
} | class ____(nn.Module):
def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None:
super().__init__()
self.layers = [nn.Linear(in_dim, out_dim), activation]
# Maintain submodule indexing as if part of a Sequential block
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
| Mask2FormerPredictionBlock |
python | pydantic__pydantic | pydantic/functional_validators.py | {
"start": 21692,
"end": 30227
} | class ____(Protocol):
"""A `@model_validator` decorated function signature. This is used when `mode='before'`."""
def __call__( # noqa: D102
self,
cls: Any,
# this can be a dict, a model instance
# or anything else that gets passed to validate_python
# thus validators _must_ handle all cases
value: Any,
info: core_schema.ValidationInfo[Any],
/,
) -> Any: ...
ModelAfterValidatorWithoutInfo = Callable[[_ModelType], _ModelType]
"""A `@model_validator` decorated function signature. This is used when `mode='after'` and the function does not
have info argument.
"""
ModelAfterValidator = Callable[[_ModelType, core_schema.ValidationInfo[Any]], _ModelType]
"""A `@model_validator` decorated function signature. This is used when `mode='after'`."""
_AnyModelWrapValidator = Union[ModelWrapValidator[_ModelType], ModelWrapValidatorWithoutInfo[_ModelType]]
_AnyModelBeforeValidator = Union[
FreeModelBeforeValidator, ModelBeforeValidator, FreeModelBeforeValidatorWithoutInfo, ModelBeforeValidatorWithoutInfo
]
_AnyModelAfterValidator = Union[ModelAfterValidator[_ModelType], ModelAfterValidatorWithoutInfo[_ModelType]]
@overload
def model_validator(
*,
mode: Literal['wrap'],
) -> Callable[
[_AnyModelWrapValidator[_ModelType]], _decorators.PydanticDescriptorProxy[_decorators.ModelValidatorDecoratorInfo]
]: ...
@overload
def model_validator(
*,
mode: Literal['before'],
) -> Callable[
[_AnyModelBeforeValidator], _decorators.PydanticDescriptorProxy[_decorators.ModelValidatorDecoratorInfo]
]: ...
@overload
def model_validator(
*,
mode: Literal['after'],
) -> Callable[
[_AnyModelAfterValidator[_ModelType]], _decorators.PydanticDescriptorProxy[_decorators.ModelValidatorDecoratorInfo]
]: ...
def model_validator(
*,
mode: Literal['wrap', 'before', 'after'],
) -> Any:
"""!!! abstract "Usage Documentation"
[Model Validators](../concepts/validators.md#model-validators)
Decorate model methods for validation purposes.
Example usage:
```python
from typing_extensions import Self
from pydantic import BaseModel, ValidationError, model_validator
class Square(BaseModel):
width: float
height: float
@model_validator(mode='after')
def verify_square(self) -> Self:
if self.width != self.height:
raise ValueError('width and height do not match')
return self
s = Square(width=1, height=1)
print(repr(s))
#> Square(width=1.0, height=1.0)
try:
Square(width=1, height=2)
except ValidationError as e:
print(e)
'''
1 validation error for Square
Value error, width and height do not match [type=value_error, input_value={'width': 1, 'height': 2}, input_type=dict]
'''
```
For more in depth examples, see [Model Validators](../concepts/validators.md#model-validators).
Args:
mode: A required string literal that specifies the validation mode.
It can be one of the following: 'wrap', 'before', or 'after'.
Returns:
A decorator that can be used to decorate a function to be used as a model validator.
"""
def dec(f: Any) -> _decorators.PydanticDescriptorProxy[Any]:
# auto apply the @classmethod decorator. NOTE: in V3, do not apply the conversion for 'after' validators:
f = _decorators.ensure_classmethod_based_on_signature(f)
if mode == 'after' and isinstance(f, classmethod):
warnings.warn(
category=PydanticDeprecatedSince212,
message=(
"Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. "
f'See the documentation at https://docs.pydantic.dev/{version_short()}/concepts/validators/#model-after-validator.'
),
stacklevel=2,
)
dec_info = _decorators.ModelValidatorDecoratorInfo(mode=mode)
return _decorators.PydanticDescriptorProxy(f, dec_info)
return dec
AnyType = TypeVar('AnyType')
if TYPE_CHECKING:
# If we add configurable attributes to IsInstance, we'd probably need to stop hiding it from type checkers like this
InstanceOf = Annotated[AnyType, ...] # `IsInstance[Sequence]` will be recognized by type checkers as `Sequence`
else:
@dataclasses.dataclass(**_internal_dataclass.slots_true)
class InstanceOf:
'''Generic type for annotating a type that is an instance of a given class.
Example:
```python
from pydantic import BaseModel, InstanceOf
class Foo:
...
class Bar(BaseModel):
foo: InstanceOf[Foo]
Bar(foo=Foo())
try:
Bar(foo=42)
except ValidationError as e:
print(e)
"""
[
β {
β β 'type': 'is_instance_of',
β β 'loc': ('foo',),
β β 'msg': 'Input should be an instance of Foo',
β β 'input': 42,
β β 'ctx': {'class': 'Foo'},
β β 'url': 'https://errors.pydantic.dev/0.38.0/v/is_instance_of'
β }
]
"""
```
'''
@classmethod
def __class_getitem__(cls, item: AnyType) -> AnyType:
return Annotated[item, cls()]
@classmethod
def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
from pydantic import PydanticSchemaGenerationError
# use the generic _origin_ as the second argument to isinstance when appropriate
instance_of_schema = core_schema.is_instance_schema(_generics.get_origin(source) or source)
try:
# Try to generate the "standard" schema, which will be used when loading from JSON
original_schema = handler(source)
except PydanticSchemaGenerationError:
# If that fails, just produce a schema that can validate from python
return instance_of_schema
else:
# Use the "original" approach to serialization
instance_of_schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
function=lambda v, h: h(v), schema=original_schema
)
return core_schema.json_or_python_schema(python_schema=instance_of_schema, json_schema=original_schema)
__hash__ = object.__hash__
if TYPE_CHECKING:
SkipValidation = Annotated[AnyType, ...] # SkipValidation[list[str]] will be treated by type checkers as list[str]
else:
@dataclasses.dataclass(**_internal_dataclass.slots_true)
class SkipValidation:
"""If this is applied as an annotation (e.g., via `x: Annotated[int, SkipValidation]`), validation will be
skipped. You can also use `SkipValidation[int]` as a shorthand for `Annotated[int, SkipValidation]`.
This can be useful if you want to use a type annotation for documentation/IDE/type-checking purposes,
and know that it is safe to skip validation for one or more of the fields.
Because this converts the validation schema to `any_schema`, subsequent annotation-applied transformations
may not have the expected effects. Therefore, when used, this annotation should generally be the final
annotation applied to a type.
"""
def __class_getitem__(cls, item: Any) -> Any:
return Annotated[item, SkipValidation()]
@classmethod
def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
with warnings.catch_warnings():
warnings.simplefilter('ignore', ArbitraryTypeWarning)
original_schema = handler(source)
metadata = {'pydantic_js_annotation_functions': [lambda _c, h: h(original_schema)]}
return core_schema.any_schema(
metadata=metadata,
serialization=core_schema.wrap_serializer_function_ser_schema(
function=lambda v, h: h(v), schema=original_schema
),
)
__hash__ = object.__hash__
_FromTypeT = TypeVar('_FromTypeT')
| ModelBeforeValidator |
python | numba__numba | numba/tests/test_parallel_backend.py | {
"start": 11571,
"end": 13910
} | class ____(TestInSubprocess, TestParallelBackendBase):
"""
This is quite contrived, for each test in the TestParallelBackend tests it
generates a test that will run the TestParallelBackend test in a new python
process with an environment modified to ensure a specific threadsafe backend
is used. This is with view of testing the backends independently and in an
isolated manner such that if they hang/crash/have issues, it doesn't kill
the test suite.
"""
_DEBUG = False
@classmethod
def _inject(cls, p, name, backend, backend_guard):
themod = cls.__module__
thecls = TestParallelBackend.__name__
methname = "test_" + p + '_' + name
injected_method = '%s.%s.%s' % (themod, thecls, methname)
def test_template(self):
o, e = self.run_test_in_separate_process(injected_method, backend)
if self._DEBUG:
print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e))
# If the test was skipped in the subprocess, then mark this as a
# skipped test.
m = re.search(r"\.\.\. skipped '(.*?)'", e)
if m is not None:
self.skipTest(m.group(1))
self.assertIn('OK', e)
self.assertTrue('FAIL' not in e)
self.assertTrue('ERROR' not in e)
injected_test = "test_%s_%s_%s" % (p, name, backend)
# Mark as long_running
setattr(cls, injected_test,
tag('long_running')(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
for p in cls.parallelism:
for name in cls.runners.keys():
# handle known problem cases...
# GNU OpenMP is not fork safe
if (p in ('multiprocessing_fork', 'random') and
backend == 'omp' and
sys.platform.startswith('linux')):
continue
# workqueue is not thread safe
if (p in ('threading', 'random') and
backend == 'workqueue'):
continue
cls._inject(p, name, backend, backend_guard)
TestSpecificBackend.generate()
| TestSpecificBackend |
python | huggingface__transformers | tests/models/zoedepth/test_image_processing_zoedepth.py | {
"start": 3847,
"end": 10576
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ZoeDepthImageProcessor if is_vision_available() else None
fast_image_processing_class = ZoeDepthImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = ZoeDepthImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "ensure_multiple_of"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
for image_processing_class in self.image_processor_list:
modified_dict = self.image_processor_dict
modified_dict["size"] = 42
image_processor = image_processing_class(**modified_dict)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_ensure_multiple_of(self):
# Test variable by turning off all other variables which affect the size, size which is not multiple of 32
image = np.zeros((489, 640, 3))
size = {"height": 380, "width": 513}
multiple = 32
for image_processor_class in self.image_processor_list:
image_processor = image_processor_class(
do_pad=False, ensure_multiple_of=multiple, size=size, keep_aspect_ratio=False
)
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, 384, 512])
self.assertTrue(pixel_values.shape[2] % multiple == 0)
self.assertTrue(pixel_values.shape[3] % multiple == 0)
# Test variable by turning off all other variables which affect the size, size which is already multiple of 32
image = np.zeros((511, 511, 3))
height, width = 512, 512
size = {"height": height, "width": width}
multiple = 32
for image_processor_class in self.image_processor_list:
image_processor = image_processor_class(
do_pad=False, ensure_multiple_of=multiple, size=size, keep_aspect_ratio=False
)
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, height, width])
self.assertTrue(pixel_values.shape[2] % multiple == 0)
self.assertTrue(pixel_values.shape[3] % multiple == 0)
def test_keep_aspect_ratio(self):
# Test `keep_aspect_ratio=True` by turning off all other variables which affect the size
height, width = 489, 640
image = np.zeros((height, width, 3))
size = {"height": 512, "width": 512}
for image_processor_class in self.image_processor_list:
image_processor = image_processor_class(
do_pad=False, keep_aspect_ratio=True, size=size, ensure_multiple_of=1
)
pixel_values = image_processor(image, return_tensors="pt").pixel_values
# As can be seen, the image is resized to the maximum size that fits in the specified size
self.assertEqual(list(pixel_values.shape), [1, 3, 512, 670])
# Test `keep_aspect_ratio=False` by turning off all other variables which affect the size
for image_processor_class in self.image_processor_list:
image_processor = image_processor_class(
do_pad=False, keep_aspect_ratio=False, size=size, ensure_multiple_of=1
)
pixel_values = image_processor(image, return_tensors="pt").pixel_values
# As can be seen, the size is respected
self.assertEqual(list(pixel_values.shape), [1, 3, size["height"], size["width"]])
# Test `keep_aspect_ratio=True` with `ensure_multiple_of` set
image = np.zeros((489, 640, 3))
size = {"height": 511, "width": 511}
multiple = 32
for image_processor_class in self.image_processor_list:
image_processor = image_processor_class(size=size, keep_aspect_ratio=True, ensure_multiple_of=multiple)
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672])
self.assertTrue(pixel_values.shape[2] % multiple == 0)
self.assertTrue(pixel_values.shape[3] % multiple == 0)
# extend this test to check if removal of padding works fine!
def test_post_processing_equivalence(self):
outputs = self.image_processor_tester.prepare_depth_outputs()
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
source_sizes = [outputs.predicted_depth.shape[1:]] * self.image_processor_tester.batch_size
target_sizes = [
torch.Size([outputs.predicted_depth.shape[1] // 2, *(outputs.predicted_depth.shape[2:])])
] * self.image_processor_tester.batch_size
processed_fast = image_processor_fast.post_process_depth_estimation(
outputs,
source_sizes=source_sizes,
target_sizes=target_sizes,
)
processed_slow = image_processor_slow.post_process_depth_estimation(
outputs,
source_sizes=source_sizes,
target_sizes=target_sizes,
)
for pred_fast, pred_slow in zip(processed_fast, processed_slow):
depth_fast = pred_fast["predicted_depth"]
depth_slow = pred_slow["predicted_depth"]
torch.testing.assert_close(depth_fast, depth_slow, atol=1e-1, rtol=1e-3)
self.assertLessEqual(torch.mean(torch.abs(depth_fast.float() - depth_slow.float())).item(), 5e-3)
| ZoeDepthImageProcessingTest |
python | django__django | tests/middleware/tests.py | {
"start": 35024,
"end": 43236
} | class ____(SimpleTestCase):
"""
Tests the GZipMiddleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b"a" * 500
incompressible_string = b"".join(
int2byte(random.randint(0, 255)) for _ in range(500)
)
sequence = [b"a" * 500, b"b" * 200, b"a" * 300]
sequence_unicode = ["a" * 500, "Γ©" * 200, "a" * 300]
request_factory = RequestFactory()
def setUp(self):
self.req = self.request_factory.get("/")
self.req.META["HTTP_ACCEPT_ENCODING"] = "gzip, deflate"
self.req.META["HTTP_USER_AGENT"] = (
"Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1"
)
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp["Content-Type"] = "text/html; charset=UTF-8"
def get_response(self, request):
return self.resp
@staticmethod
def decompress(gzipped_string):
# Use zlib to ensure gzipped_string contains exactly one gzip stream.
return zlib.decompress(gzipped_string, zlib.MAX_WBITS | 16)
@staticmethod
def get_mtime(gzipped_string):
with gzip.GzipFile(mode="rb", fileobj=BytesIO(gzipped_string)) as f:
f.read() # must read the data before accessing the header
return f.mtime
def test_compress_response(self):
"""
Compression is performed on responses with compressible content.
"""
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertEqual(r.get("Content-Length"), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Compression is performed on responses with streaming content.
"""
def get_stream_response(request):
resp = StreamingHttpResponse(self.sequence)
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
r = GZipMiddleware(get_stream_response)(self.req)
self.assertEqual(self.decompress(b"".join(r)), b"".join(self.sequence))
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertFalse(r.has_header("Content-Length"))
async def test_compress_async_streaming_response(self):
"""
Compression is performed on responses with async streaming content.
"""
async def get_stream_response(request):
async def iterator():
for chunk in self.sequence:
yield chunk
resp = StreamingHttpResponse(iterator())
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
r = await GZipMiddleware(get_stream_response)(self.req)
self.assertEqual(
self.decompress(b"".join([chunk async for chunk in r])),
b"".join(self.sequence),
)
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertFalse(r.has_header("Content-Length"))
def test_compress_streaming_response_unicode(self):
"""
Compression is performed on responses with streaming Unicode content.
"""
def get_stream_response_unicode(request):
resp = StreamingHttpResponse(self.sequence_unicode)
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
r = GZipMiddleware(get_stream_response_unicode)(self.req)
self.assertEqual(
self.decompress(b"".join(r)),
b"".join(x.encode() for x in self.sequence_unicode),
)
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertFalse(r.has_header("Content-Length"))
def test_compress_file_response(self):
"""
Compression is performed on FileResponse.
"""
with open(__file__, "rb") as file1:
def get_response(req):
file_resp = FileResponse(file1)
file_resp["Content-Type"] = "text/html; charset=UTF-8"
return file_resp
r = GZipMiddleware(get_response)(self.req)
with open(__file__, "rb") as file2:
self.assertEqual(self.decompress(b"".join(r)), file2.read())
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Compression is performed on responses with a status other than 200
(#10762).
"""
self.resp.status_code = 404
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "gzip")
def test_no_compress_short_response(self):
"""
Compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.short_string)
self.assertIsNone(r.get("Content-Encoding"))
def test_no_compress_compressed_response(self):
"""
Compression isn't performed on responses that are already compressed.
"""
self.resp["Content-Encoding"] = "deflate"
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "deflate")
def test_no_compress_incompressible_response(self):
"""
Compression isn't performed on responses with incompressible content.
"""
self.resp.content = self.incompressible_string
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.incompressible_string)
self.assertIsNone(r.get("Content-Encoding"))
def test_compress_deterministic(self):
"""
Compression results are the same for the same content and don't
include a modification time (since that would make the results
of compression non-deterministic and prevent
ConditionalGetMiddleware from recognizing conditional matches
on gzipped content).
"""
class DeterministicGZipMiddleware(GZipMiddleware):
max_random_bytes = 0
r1 = DeterministicGZipMiddleware(self.get_response)(self.req)
r2 = DeterministicGZipMiddleware(self.get_response)(self.req)
self.assertEqual(r1.content, r2.content)
self.assertEqual(self.get_mtime(r1.content), 0)
self.assertEqual(self.get_mtime(r2.content), 0)
def test_random_bytes(self):
"""A random number of bytes is added to mitigate the BREACH attack."""
with mock.patch(
"django.utils.text.secrets.randbelow", autospec=True, return_value=3
):
r = GZipMiddleware(self.get_response)(self.req)
# The fourth byte of a gzip stream contains flags.
self.assertEqual(r.content[3], gzip.FNAME)
# A 3 byte filename "aaa" and a null byte are added.
self.assertEqual(r.content[10:14], b"aaa\x00")
self.assertEqual(self.decompress(r.content), self.compressible_string)
def test_random_bytes_streaming_response(self):
"""A random number of bytes is added to mitigate the BREACH attack."""
def get_stream_response(request):
resp = StreamingHttpResponse(self.sequence)
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
with mock.patch(
"django.utils.text.secrets.randbelow", autospec=True, return_value=3
):
r = GZipMiddleware(get_stream_response)(self.req)
content = b"".join(r)
# The fourth byte of a gzip stream contains flags.
self.assertEqual(content[3], gzip.FNAME)
# A 3 byte filename "aaa" and a null byte are added.
self.assertEqual(content[10:14], b"aaa\x00")
self.assertEqual(self.decompress(content), b"".join(self.sequence))
| GZipMiddlewareTest |
python | huggingface__transformers | src/transformers/models/sam_hq/modular_sam_hq.py | {
"start": 9810,
"end": 19997
} | class ____(nn.Module):
def __init__(self, config: SamHQMaskDecoderConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_multimask_outputs = config.num_multimask_outputs
self.num_mask_tokens = config.num_multimask_outputs + 1
self.iou_token = nn.Embedding(1, self.hidden_size)
self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size)
self.transformer = SamHQTwoWayTransformer(config)
self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
self.upscale_layer_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.activation = nn.GELU()
mlps_list = []
for _ in range(self.num_mask_tokens):
mlps_list += [SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)]
self.output_hypernetworks_mlps = nn.ModuleList(mlps_list)
self.iou_prediction_head = SamHQFeedForward(
self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth
)
self.hq_token = nn.Embedding(1, self.hidden_size)
self.hq_mask_mlp = SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)
self.num_mask_tokens = self.num_mask_tokens + 1
# Compress ViT features
self.compress_vit_conv1 = nn.ConvTranspose2d(config.vit_dim, self.hidden_size, kernel_size=2, stride=2)
self.compress_vit_norm = SamHQLayerNorm(self.hidden_size, data_format="channels_first")
self.compress_vit_conv2 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 8, kernel_size=2, stride=2)
# Embedding encoder
self.encoder_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.encoder_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.encoder_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
# Embedding mask feature
self.mask_conv1 = nn.Conv2d(self.hidden_size // 8, self.hidden_size // 4, kernel_size=3, stride=1, padding=1)
self.mask_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.mask_conv2 = nn.Conv2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=3, stride=1, padding=1)
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
hq_token_only: bool,
intermediate_embeddings: Optional[list[torch.Tensor]] = None,
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
) -> SamHQMMaskDecoderOutputs:
"""
Predict high-quality masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embedding (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (bool):
Whether to return multiple masks or a single mask.
hq_token_only (bool):
Whether to use only the high-quality token output or combine with SAM output.
intermediate_embeddings (`torch.Tensor`):
Intermediate embeddings from the vision encoder for feature fusion.
attention_similarity (`torch.Tensor`, *optional*):
Optional tensor for attention similarity computation.
target_embedding (`torch.Tensor`, *optional*):
Optional target embedding for transformer processing.
Returns:
`Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple of tensors containing:
- A tensor of shape `(batch_size, num_prompts, num_masks, height, width)` containing the output masks.
- A tensor of shape `(batch_size, num_prompts, num_masks)` containing the iou predictions for each mask.
- (Optional) A tuple containing attention tensors if output_attentions is True.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1] if sparse_prompt_embeddings is not None else 1
has_intermediate = intermediate_embeddings is not None and len(intermediate_embeddings) > 0
if has_intermediate:
vit_features = intermediate_embeddings[0].permute(0, 3, 1, 2).contiguous()
embed_encode = self.encoder_conv1(image_embeddings)
embed_encode = self.activation(self.encoder_norm(embed_encode))
embed_encode = self.encoder_conv2(embed_encode)
if has_intermediate:
compressed_vit_features = self.compress_vit_conv1(vit_features)
compressed_vit_features = self.activation(self.compress_vit_norm(compressed_vit_features))
compressed_vit_features = self.compress_vit_conv2(compressed_vit_features)
hq_features = embed_encode + compressed_vit_features
else:
hq_features = embed_encode
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hq_token.weight], dim=0)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings is not None:
tokens = torch.cat([output_tokens, sparse_prompt_embeddings], dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
point_embedding, iou_token_out = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
)
iou_token_out = point_embedding[:, :, 0, :]
mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
image_embeddings = image_embeddings.transpose(2, 3).reshape(
batch_size * point_batch_size, num_channels, height, width
)
upscaled_embedding = self.upscale_conv1(image_embeddings)
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
upscaled_embedding_hq = self.mask_conv1(upscaled_embedding)
upscaled_embedding_hq = self.activation(self.mask_norm(upscaled_embedding_hq))
upscaled_embedding_hq = self.mask_conv2(upscaled_embedding_hq)
if hq_features.shape[0] == 1:
hq_features = hq_features.repeat(batch_size * point_batch_size, 1, 1, 1)
elif hq_features.shape[0] == batch_size and batch_size * point_batch_size != batch_size:
hq_features = hq_features.repeat_interleave(point_batch_size, 0)
upscaled_embedding_hq = upscaled_embedding_hq + hq_features
hyper_in_list = []
for mask_token_index in range(self.num_mask_tokens):
if mask_token_index < self.num_mask_tokens - 1:
current_mlp = self.output_hypernetworks_mlps[mask_token_index]
else:
current_mlp = self.hq_mask_mlp
hyper_in_list += [current_mlp(mask_tokens_out[:, :, mask_token_index, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)
upscaled_embedding_hq = upscaled_embedding_hq.reshape(
batch_size, point_batch_size, num_channels, height * width
)
masks_sam = (hyper_in[:, :, : self.num_mask_tokens - 1] @ upscaled_embedding).reshape(
batch_size, point_batch_size, -1, height, width
)
masks_hq = (hyper_in[:, :, self.num_mask_tokens - 1 :] @ upscaled_embedding_hq).reshape(
batch_size, point_batch_size, -1, height, width
)
masks = torch.cat([masks_sam, masks_hq], dim=2)
iou_pred = self.iou_prediction_head(iou_token_out)
if multimask_output:
mask_slice = slice(1, self.num_mask_tokens - 1)
iou_pred = iou_pred[:, :, mask_slice]
# Sort the IoU scores in descending order and get indices
iou_pred_sorted, sort_indices = torch.sort(iou_pred, dim=2, descending=True)
# Reorder the masks according to sorted scores
masks_sam = masks[:, :, mask_slice, :, :]
masks_sam = torch.gather(
masks_sam,
2,
sort_indices[..., None, None].expand(-1, -1, -1, masks_sam.shape[3], masks_sam.shape[4]),
)
# Update iou_pred with sorted scores
iou_pred = iou_pred_sorted
else:
mask_slice = slice(0, 1)
iou_pred = iou_pred[:, :, mask_slice]
masks_sam = masks[:, :, mask_slice, :, :]
masks_hq = masks[:, :, slice(self.num_mask_tokens - 1, self.num_mask_tokens), :, :]
if hq_token_only:
masks = masks_hq
else:
masks = masks_sam + masks_hq
return masks, iou_pred
| SamHQMaskDecoder |
python | django__django | django/db/models/fields/reverse_related.py | {
"start": 10656,
"end": 12533
} | class ____(ForeignObjectRel):
"""
Used by ManyToManyField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
symmetrical=True,
through=None,
through_fields=None,
db_constraint=True,
):
super().__init__(
field,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.through = through
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.through_fields = through_fields
self.symmetrical = symmetrical
self.db_constraint = db_constraint
@property
def identity(self):
return (
*super().identity,
self.through,
make_hashable(self.through_fields),
self.db_constraint,
)
def get_related_field(self):
"""
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, "remote_field", None)
if rel and rel.model == self.model:
break
return field.foreign_related_fields[0]
| ManyToManyRel |
python | huggingface__transformers | src/transformers/models/imagegpt/image_processing_imagegpt.py | {
"start": 2552,
"end": 15643
} | class ____(BaseImageProcessor):
r"""
Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
(such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
(color clusters).
Args:
clusters (`np.ndarray` or `list[list[int]]`, *optional*):
The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters`
in `preprocess`.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
`preprocess`.
do_color_quantize (`bool`, *optional*, defaults to `True`):
Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
"""
model_input_names = ["pixel_values"]
valid_kwargs = ImageGPTImageProcessorKwargs
def __init__(
self,
# clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor
clusters: Optional[Union[list[list[int]], np.ndarray]] = None,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_normalize: bool = True,
do_color_quantize: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 256, "width": 256}
size = get_size_dict(size)
self.clusters = np.array(clusters) if clusters is not None else None
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_normalize = do_normalize
self.do_color_quantize = do_color_quantize
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def normalize(
self,
image: np.ndarray,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Normalizes an images' pixel values to between [-1, 1].
Args:
image (`np.ndarray`):
Image to normalize.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)
image = image - 1
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_normalize: Optional[bool] = None,
do_color_quantize: Optional[bool] = None,
clusters: Optional[Union[list[list[int]], np.ndarray]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_normalize=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image
do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
Whether to color quantize the image.
clusters (`np.ndarray` or `list[list[int]]`, *optional*, defaults to `self.clusters`):
Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
`do_color_quantize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Only has an effect if `do_color_quantize` is set to `False`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
clusters = clusters if clusters is not None else self.clusters
clusters = np.array(clusters)
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# Here, normalize() is using a constant factor to divide pixel values.
# hence, the method does not need image_mean and image_std.
validate_preprocess_arguments(
do_resize=do_resize,
size=size,
resample=resample,
)
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_normalize and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If you wish to do this, "
"make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].",
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
if do_color_quantize:
images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
images = np.array(images)
images = color_quantize(images, clusters).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
batch_size = images.shape[0]
images = images.reshape(batch_size, -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
images = list(images)
data = {"input_ids": images}
else:
images = [to_channel_dimension_format(image, data_format, input_data_format) for image in images]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
def to_dict(self):
output = super().to_dict()
# Ensure clusters are JSON/equality friendly
if output.get("clusters") is not None and isinstance(output["clusters"], np.ndarray):
output["clusters"] = output["clusters"].tolist()
# Need to set missing keys from slow processor to match the expected behavior in save/load tests compared to fast processor
missing_keys = ["image_mean", "image_std", "rescale_factor", "do_rescale"]
for key in missing_keys:
if key in output:
output[key] = None
return output
__all__ = ["ImageGPTImageProcessor"]
| ImageGPTImageProcessor |
python | huggingface__transformers | src/transformers/models/cohere2_vision/modular_cohere2_vision.py | {
"start": 3308,
"end": 3389
} | class ____(AyaVisionModelOutputWithPast):
pass
| Cohere2VisionModelOutputWithPast |
python | openai__openai-python | src/openai/types/responses/response_mcp_list_tools_completed_event.py | {
"start": 211,
"end": 637
} | class ____(BaseModel):
item_id: str
"""The ID of the MCP tool call item that produced this output."""
output_index: int
"""The index of the output item that was processed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_list_tools.completed"]
"""The type of the event. Always 'response.mcp_list_tools.completed'."""
| ResponseMcpListToolsCompletedEvent |
python | ray-project__ray | doc/source/serve/doc_code/monitoring/logging_config.py | {
"start": 496,
"end": 805
} | class ____:
def __call__(self) -> int:
return "hello world"
serve.run(Model.bind(), logging_config=LoggingConfig(encoding="JSON"))
resp = requests.get("http://localhost:8000/")
# __serve_run_json_end__
# __level_start__
@serve.deployment(logging_config=LoggingConfig(log_level="DEBUG"))
| Model |
python | sanic-org__sanic | scripts/release.py | {
"start": 1554,
"end": 10635
} | class ____:
def __init__(self):
self._old_path = path.dirname(path.abspath(__file__))
self._new_path = path.dirname(self._old_path)
def __enter__(self):
chdir(self._new_path)
def __exit__(self, exc_type, exc_val, exc_tb):
chdir(self._old_path)
def _run_shell_command(command: list):
try:
process = Popen(
command, stderr=PIPE, stdout=PIPE, stdin=PIPE, shell=True
)
output, error = process.communicate()
return_code = process.returncode
return output.decode("utf-8"), error, return_code
except Exception:
return None, None, -1
def _fetch_default_calendar_release_version():
return datetime.now().strftime("%y.%m.0")
def _fetch_current_version(config_file: str) -> str:
if path.isfile(config_file):
config_parser = RawConfigParser()
with open(config_file) as cfg:
config_parser.read_file(cfg)
return (
config_parser.get("version", "current_version")
or _fetch_default_calendar_release_version()
)
else:
return _fetch_default_calendar_release_version()
def _change_micro_version(current_version: str):
version_string = current_version.split(".")
version_string[-1] = str((int(version_string[-1]) + 1))
return ".".join(version_string)
def _get_new_version(
config_file: str = "./setup.cfg",
current_version: str = None,
micro_release: bool = False,
):
if micro_release:
if current_version:
return _change_micro_version(current_version)
elif config_file:
return _change_micro_version(_fetch_current_version(config_file))
else:
return _fetch_default_calendar_release_version()
else:
return _fetch_default_calendar_release_version()
def _get_current_tag(git_command_name="get_tag"):
global GIT_COMMANDS
command = GIT_COMMANDS.get(git_command_name)
out, err, ret = _run_shell_command(command)
if str(out):
return str(out).split("\n")[0]
else:
return None
def _update_release_version_for_sanic(
current_version, new_version, config_file, generate_changelog
):
config_parser = RawConfigParser()
with open(config_file) as cfg:
config_parser.read_file(cfg)
config_parser.set("version", "current_version", new_version)
version_files = config_parser.get("version", "files")
current_version_line = config_parser.get(
"version", "current_version_pattern"
).format(current_version=current_version)
new_version_line = config_parser.get(
"version", "new_version_pattern"
).format(new_version=new_version)
for version_file in version_files.split(","):
with open(version_file) as init_file:
data = init_file.read()
new_data = data.replace(current_version_line, new_version_line)
with open(version_file, "w") as init_file:
init_file.write(new_data)
with open(config_file, "w") as config:
config_parser.write(config)
if generate_changelog:
towncrier.__main(
draft=False,
directory=path.dirname(path.abspath(__file__)),
project_name=None,
project_version=new_version,
project_date=None,
answer_yes=True,
)
command = GIT_COMMANDS.get("commit_version_change")
command[0] = command[0].format(
new_version=new_version, current_version=current_version
)
_, err, ret = _run_shell_command(command)
if int(ret) != 0:
print(
"Failed to Commit Version upgrade changes to Sanic: {}".format(
err.decode("utf-8")
)
)
sys.exit(1)
def _generate_change_log(current_version: str = None):
global GIT_COMMANDS
command = GIT_COMMANDS.get("get_change_log")
command[0] = command[0].format(current_version=current_version)
output, error, ret = _run_shell_command(command=command)
if not str(output):
print("Unable to Fetch Change log details to update the Release Note")
sys.exit(1)
commit_details = OrderedDict()
commit_details["authors"] = {}
commit_details["commits"] = []
for line in str(output).split("\n"):
commit, author, description = line.split(":::")
if "GitHub" not in author:
commit_details["authors"][author] = 1
commit_details["commits"].append(" - ".join([commit, description]))
return commit_details
def _generate_markdown_document(
milestone, release_name, current_version, release_version
):
global JINJA_RELEASE_NOTE_TEMPLATE
release_name = release_name or release_version
change_log = _generate_change_log(current_version=current_version)
return JINJA_RELEASE_NOTE_TEMPLATE.render(
release_name=release_name,
milestone=milestone,
changelogs=change_log["commits"],
authors=change_log["authors"].keys(),
)
def _tag_release(new_version, current_version, milestone, release_name, token):
global GIT_COMMANDS
global RELEASE_NOTE_UPDATE_URL
for command_name in ["create_new_tag", "push_tag"]:
command = GIT_COMMANDS.get(command_name)
command[0] = command[0].format(
new_version=new_version, current_version=current_version
)
out, error, ret = _run_shell_command(command=command)
if int(ret) != 0:
print("Failed to execute the command: {}".format(command[0]))
sys.exit(1)
change_log = _generate_markdown_document(
milestone, release_name, current_version, new_version
)
body = {"name": release_name or new_version, "body": change_log}
headers = {"content-type": "application/json"}
response = patch(
RELEASE_NOTE_UPDATE_URL.format(new_version=new_version, token=token),
data=dumps(body),
headers=headers,
)
response.raise_for_status()
def release(args: Namespace):
current_tag = _get_current_tag()
current_version = _fetch_current_version(args.config)
if current_tag and current_version not in current_tag:
print(
"Tag mismatch between what's in git and what was provided by "
"--current-version. Existing: {}, Give: {}".format(
current_tag, current_version
)
)
sys.exit(1)
new_version = args.release_version or _get_new_version(
args.config, current_version, args.micro_release
)
_update_release_version_for_sanic(
current_version=current_version,
new_version=new_version,
config_file=args.config,
generate_changelog=args.generate_changelog,
)
if args.tag_release:
_tag_release(
current_version=current_version,
new_version=new_version,
milestone=args.milestone,
release_name=args.release_name,
token=args.token,
)
if __name__ == "__main__":
cli = ArgumentParser(description="Sanic Release Manager")
cli.add_argument(
"--release-version",
"-r",
help="New Version to use for Release",
default=_fetch_default_calendar_release_version(),
required=False,
)
cli.add_argument(
"--current-version",
"-cv",
help="Current Version to default in case if you don't want to "
"use the version configuration files",
default=None,
required=False,
)
cli.add_argument(
"--config",
"-c",
help="Configuration file used for release",
default="./setup.cfg",
required=False,
)
cli.add_argument(
"--token",
"-t",
help="Git access token with necessary access to Huge Sanic Org",
required=False,
)
cli.add_argument(
"--milestone",
"-ms",
help="Git Release milestone information to include in release note",
required=False,
)
cli.add_argument(
"--release-name",
"-n",
help="Release Name to use if any",
required=False,
)
cli.add_argument(
"--micro-release",
"-m",
help="Micro Release with patches only",
default=False,
action="store_true",
required=False,
)
cli.add_argument(
"--tag-release",
help="Tag a new release for Sanic",
default=False,
action="store_true",
required=False,
)
cli.add_argument(
"--generate-changelog",
help="Generate changelog for Sanic as part of release",
default=False,
action="store_true",
required=False,
)
args = cli.parse_args()
if args.tag_release:
for key, value in {
"--token/-t": args.token,
"--milestone/-m": args.milestone,
}.items():
if not value:
print(f"{key} is mandatory while using --tag-release")
sys.exit(1)
with Directory():
release(args)
| Directory |
python | tensorflow__tensorflow | tensorflow/python/summary/writer/event_file_writer_v2.py | {
"start": 1014,
"end": 5678
} | class ____(object):
"""Writes `Event` protocol buffers to an event file via the graph.
The `EventFileWriterV2` class is backed by the summary file writer in the v2
summary API (currently in tf.contrib.summary), so it uses a shared summary
writer resource and graph ops to write events.
As with the original EventFileWriter, this class will asynchronously write
Event protocol buffers to the backing file. The Event file is encoded using
the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init() # pylint: disable=assignment-from-no-return
self._flush_op = file_writer.flush() # pylint: disable=assignment-from-no-return
self._close_op = file_writer.close() # pylint: disable=assignment-from-no-return
self._session.run(self._init_op)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._session.run(self._init_op)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(
self._add_event_op, feed_dict={self._event_placeholder: event_pb})
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._session.run(self._flush_op)
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._session.run(self._close_op)
self._closed = True
| EventFileWriterV2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 218163,
"end": 218666
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteProjectV2Workflow"""
__schema__ = github_schema
__field_names__ = ("workflow_id", "client_mutation_id")
workflow_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="workflowId")
"""The ID of the workflow to be removed."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteProjectV2WorkflowInput |
python | has2k1__plotnine | plotnine/scales/scale_identity.py | {
"start": 1667,
"end": 1897
} | class ____(
MapTrainMixin, scale_continuous[Literal["legend"] | None]
):
"""
No alpha scaling
"""
_aesthetics = ["alpha"]
_: KW_ONLY
guide: Literal["legend"] | None = None
@dataclass
| scale_alpha_identity |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/aot_autograd_result.py | {
"start": 10950,
"end": 21243
} | class ____(Generic[TForward, TBackward]):
"""A single result from AOT Autograd compilation, genericized by Forward and Backward types.
A TForward is always an InductorOutput of some sort, which represents the
forward graph of the compile.
A TBackward is an InductorOutput + metadata about the backward, useful for specific
backward-only wrappers. This type is encapsulated by GenericCompiledBackward.
Each AOTAutogradResult is essentially parameterized by 1. the method of loading
from the cache (either Bundled or UnBundled), and 2. The type of the output. For now,
the only type of output we support is Python Wrapper output, i.e. OutputCode.CompiledFxGraph,
but the same technique works for C++ wrapper code; we'd just add an extra InductorOutput type.
"""
# Forward and Backward info
compiled_fw: TForward
compiled_bw: Optional[TBackward]
# Code of the joint graph using print_readable()
# Used for logging purposes
aot_joint_graph_str: Optional[str]
aot_forward_graph_str: Optional[str]
aot_backward_graph_str: Optional[str]
# Runtime_metadata saved right before compilation
runtime_metadata: ViewAndMutationMeta
# Wrappers that run after each aot_dispatch_* function
dispatch_wrappers: list[CompilerWrapper]
# Used by AOTSubclassWrapper
maybe_subclass_meta: Optional[SubclassMeta]
num_fw_outs_saved_for_bw: Optional[int]
# Used by RuntimeWrapper
indices_of_inps_to_detach: list[int]
# Time taken to trace/compile the forward
# forward_time_taken includes AOTAutograd tracing time + inductor compilation time
# backward_time_taken is essentially just the time inductor took to compile
forward_time_taken_ns: int
backward_time_taken_ns: int
# Used by standalone_compile
sanitized_aot_config: AOTConfig
guards_expr: Optional[str]
# Used by Compiled Autograd
serialized_bw_module: Optional[SerializedGraphModule]
def pre_save(self):
"""
Perform any preparations to make the result ready for serialization.
"""
self.compiled_fw.pre_save()
if self.compiled_bw is not None:
self.compiled_bw.pre_save()
# Turn result into the original callable
def wrap_post_compile(
self,
args: list[torch.Tensor],
aot_config: AOTConfig,
fx_config: _CompileFxKwargs,
) -> Callable:
"""
This function takes a result and carefully reconstructs the original callable
that AOTAutograd returned the first time it was run. It does this by running the various
post compile steps that AOTAutograd runs on its compiled artifact after running the fw/bw compilers.
In the inference path, this consists of the Subclass, FunctionalzedRngRuntime, and RuntimeWrappers.
In the autograd path, this consists of AOTAutogradDispatch.post_compile.
The steps here should match exactly the steps that are run in aot_dispatch_base and aot_dispatch_autograd.
Notably absent from the cached path are:
- DebugAssertWrapper
- FakifiedOutWrapper
Which we'll handle separately later on, if necessary.
"""
from torch._dynamo.utils import CompileEventLogger, dynamo_timed
# Log the output of AOTAutogradCache
if aot_config.enable_log:
# TODO: maybe also log to aot_graphs_log
# Unfortunately aot_graphs_log uses
# slightly different formatting though
if self.aot_joint_graph_str is not None:
torch._logging.trace_structured(
"aot_joint_graph", payload_fn=lambda: self.aot_joint_graph_str
)
if self.aot_forward_graph_str is not None:
from torchgen.utils import dataclass_repr
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "aot_forward_graph_fw_metadata",
"encoding": "string",
},
payload_fn=lambda: dataclass_repr(self.runtime_metadata),
)
if self.maybe_subclass_meta is not None:
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "aot_forward_graph_fw_subclass_metadata",
"encoding": "string",
},
payload_fn=lambda: dataclass_repr(self.maybe_subclass_meta),
)
# It's called an inference graph if not running with autograd
name = (
"aot_forward_graph"
if self.aot_backward_graph_str is not None
else "aot_inference_graph"
)
torch._logging.trace_structured(
name, payload_fn=lambda: self.aot_forward_graph_str
)
if self.aot_backward_graph_str is not None:
torch._logging.trace_structured(
"aot_backward_graph", payload_fn=lambda: self.aot_backward_graph_str
)
with dynamo_timed("AOTAutogradCache.inductor_load"):
compiled_fw_func = self.compiled_fw.load(args)
compiled_bw_func = None
if self.compiled_bw is not None:
compiled_bw_func = self.compiled_bw.load(args)
needs_autograd = True
CompileEventLogger.try_add_pt2_compile(
"backend_compile", dispatch_mode="autograd"
)
# Now that we've loaded forward and backward, call post compile on both
# This avoids setting things like BoxedBools in fx_config until
# after both forward and backward cache hit
fw_fx_config: _CompileFxKwargs = {
**fx_config,
"is_backward": False,
}
bw_fx_config: _CompileFxKwargs = {
**fx_config,
"is_backward": True,
}
compiled_fw_func = self.compiled_fw.post_compile(
compiled_fw_func, fw_fx_config
)
compiled_bw_func = self.compiled_bw.post_compile(
compiled_bw_func, bw_fx_config
)
else:
inference_fx_config: _CompileFxKwargs = {
**fx_config,
"is_backward": False,
}
needs_autograd = False
CompileEventLogger.try_add_pt2_compile(
"backend_compile", dispatch_mode="inference"
)
compiled_fw_func = self.compiled_fw.post_compile(
compiled_fw_func, inference_fx_config
)
# Wrap the forward function in post compile wrappers
compiled_fw_func = AOTDispatchSubclassWrapper(
trace_joint=needs_autograd,
fw_only=None,
maybe_subclass_meta=self.maybe_subclass_meta,
num_fw_outs_saved_for_bw=self.num_fw_outs_saved_for_bw,
).post_compile(
compiled_fw_func, aot_config, runtime_metadata=self.runtime_metadata
)
req_subclass_dispatch = self.maybe_subclass_meta is not None
CompileEventLogger.try_add_pt2_compile(
"backend_compile", requires_subclass_dispatch=req_subclass_dispatch
)
# In autograd case, functionalizedRngWrapper should not modify outs
return_new_outs = not needs_autograd
compiled_fw_func = FunctionalizedRngRuntimeWrapper(
return_new_outs=return_new_outs
).post_compile(
compiled_fw_func, aot_config, runtime_metadata=self.runtime_metadata
)
compiled_fw_func._boxed_call = True
disable_amp = torch._C._is_any_autocast_enabled()
if needs_autograd:
assert self.compiled_bw is not None
cached_lazy_backward = None
if self.serialized_bw_module is not None:
cached_lazy_backward = CachedAutogradLazyBackwardCompileInfo(
self.serialized_bw_module.deserialize
)
# This function is run on both cache miss and cache hit, either here
# or in aot_dispatch_autograd. On a cache hit,
# 1. the bw is already compiled
# 2. we don't need to save to the cache again
# so those corresponding arguments are set to None.
compiled_function = AOTDispatchAutograd.post_compile(
compiled_fw_func,
compiled_bw_func,
self.maybe_subclass_meta,
self.compiled_bw.num_symints_saved_for_bw_,
self.compiled_bw.backward_state_indices,
disable_amp,
self.indices_of_inps_to_detach,
cached_lazy_backward,
aot_config,
fw_metadata=self.runtime_metadata,
try_save_cache_entry=None,
)
else:
compiled_function = RuntimeWrapper(
indices_of_inps_to_detach=self.indices_of_inps_to_detach,
trace_joint=False,
disable_amp=disable_amp,
).post_compile(
compiled_fw_func, aot_config, runtime_metadata=self.runtime_metadata
)
# Add serialization function back onto object
compiled_function, _ = post_compile(
self.dispatch_wrappers,
compiled_function,
aot_config,
runtime_metadata=self.runtime_metadata,
)
# Now that we're pretty sure it's a successful load, add guards
# to the existing shape environment from the cache
if self.guards_expr:
from .autograd_cache import AOTAutogradCache
symints = AOTAutogradCache._filter_backed_symints(args)
check = bool(AOTAutogradCache.evaluate_guards(self.guards_expr, symints))
assert check is True
return compiled_function
| GenericAOTAutogradResult |
python | python__mypy | mypy/nodes.py | {
"start": 82372,
"end": 82749
} | class ____(Expression):
"""List literal expression [...]."""
__slots__ = ("items",)
__match_args__ = ("items",)
items: list[Expression]
def __init__(self, items: list[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_list_expr(self)
| ListExpr |
python | django__django | django/template/base.py | {
"start": 41508,
"end": 43731
} | class ____(Node):
child_nodelists = ()
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ""
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = _lazy_re_compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match[1]
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != "as":
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match[1]:
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != "as":
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != "and":
return kwargs
del bits[:1]
return kwargs
| VariableNode |
python | huggingface__transformers | src/transformers/models/chinese_clip/configuration_chinese_clip.py | {
"start": 10374,
"end": 17984
} | class ____(PreTrainedConfig):
r"""
[`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original ChineseCLIP
implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
>>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
>>> configuration = ChineseCLIPConfig()
>>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
>>> model = ChineseCLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
>>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
>>> config_text = ChineseCLIPTextConfig()
>>> config_vision = ChineseCLIPVisionConfig()
>>> config = ChineseCLIPConfig(text_config=config_text, vision_config=config_vision)
```"""
model_type = "chinese_clip"
sub_configs = {"text_config": ChineseCLIPTextConfig, "vision_config": ChineseCLIPVisionConfig}
def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
text_config_dict = kwargs.pop("text_config_dict", None)
vision_config_dict = kwargs.pop("vision_config_dict", None)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
text_config = {}
# This is the complete result when using `text_config_dict`.
_text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
f'The value `text_config["{key}"]` will be overridden.'
)
logger.info(message)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
vision_config = {}
# This is the complete result when using `vision_config_dict`.
_vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_vision_config_dict["id2label"] = {
str(key): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`vision_config_dict` is provided which will be used to initialize "
f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.'
)
logger.info(message)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
text_config = ChineseCLIPTextConfig()
logger.info("`text_config` is `None`. initializing the `ChineseCLIPTextConfig` with default values.")
elif isinstance(text_config, dict):
text_config = ChineseCLIPTextConfig(**text_config)
if vision_config is None:
vision_config = ChineseCLIPVisionConfig()
logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = ChineseCLIPVisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
self.initializer_range = 0.02
super().__init__(**kwargs)
__all__ = ["ChineseCLIPConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig"]
| ChineseCLIPConfig |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 274585,
"end": 276237
} | class ____(Request):
"""
Set the script requirements for a task
:param task: Task ID
:type task: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
"""
_service = "tasks"
_action = "set_requirements"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": "object",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "requirements"],
"type": "object",
}
def __init__(self, task: str, requirements: dict, **kwargs: Any) -> None:
super(SetRequirementsRequest, self).__init__(**kwargs)
self.task = task
self.requirements = requirements
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("requirements")
def requirements(self) -> dict:
return self._property_requirements
@requirements.setter
def requirements(self, value: dict) -> None:
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
| SetRequirementsRequest |
python | numba__numba | numba/tests/test_chained_assign.py | {
"start": 119,
"end": 2480
} | class ____(MemoryLeakMixin, unittest.TestCase):
def test_chain1(self):
from numba.tests.chained_assign_usecases import chain1
args = [
[np.arange(2)],
[np.arange(4, dtype=np.double)],
]
self._test_template(chain1, args)
def test_chain2(self):
from numba.tests.chained_assign_usecases import chain2
args = [
[3],
[3.0],
]
self._test_template(chain2, args)
def test_unpack1(self):
from numba.tests.chained_assign_usecases import unpack1
args = [
[1, 3.0],
[1.0, 3],
]
self._test_template(unpack1, args)
def test_unpack2(self):
from numba.tests.chained_assign_usecases import unpack2
args = [
[np.array([2]), np.array([4.0])],
[np.array([2.0]), np.array([4])],
]
self._test_template(unpack2, args)
def test_chain3(self):
from numba.tests.chained_assign_usecases import chain3
args = [
[np.array([0]), np.array([1.5])],
[np.array([0.5]), np.array([1])],
]
self._test_template(chain3, args)
def test_unpack3(self):
from numba.tests.chained_assign_usecases import unpack3
args = [
[np.array([1])],
[np.array([1.0])],
]
self._test_template(unpack3, args)
def test_unpack4(self):
from numba.tests.chained_assign_usecases import unpack4
args = [
[np.array([1])],
[np.array([1.0])],
]
self._test_template(unpack4, args)
def test_unpack5(self):
from numba.tests.chained_assign_usecases import unpack5
args = [
[np.array([2])],
[np.array([2.0])],
]
self._test_template(unpack5, args)
def test_unpack6(self):
from numba.tests.chained_assign_usecases import unpack6
args1 = 3.0, 2
args2 = 3.0, 2.0
self._test_template(unpack6, [args1, args2])
def _test_template(self, pyfunc, argcases):
cfunc = jit(pyfunc)
for args in argcases:
a1 = copy.deepcopy(args)
a2 = copy.deepcopy(args)
np.testing.assert_allclose(pyfunc(*a1), cfunc(*a2))
if __name__ == '__main__':
unittest.main()
| TestChainedAssign |
python | pola-rs__polars | py-polars/src/polars/functions/col.py | {
"start": 4550,
"end": 12580
} | class ____:
"""
Create Polars column expressions.
Notes
-----
An instance of this class is exported under the name `col`. It can be used as
though it were a function by calling, for example, `pl.col("foo")`.
See the :func:`__call__` method for further documentation.
This helper class enables an alternative syntax for creating a column expression
through attribute lookup. For example `col.foo` creates an expression equal to
`col("foo")`. See the :func:`__getattr__` method for further documentation.
The function call syntax is considered the idiomatic way of constructing a column
expression. The alternative attribute syntax can be useful for quick prototyping as
it can save some keystrokes, but has drawbacks in both expressiveness and
readability.
Examples
--------
>>> from polars import col
>>> df = pl.DataFrame(
... {
... "foo": [1, 2],
... "bar": [3, 4],
... }
... )
Create a new column expression using the standard syntax:
>>> df.with_columns(baz=(col("foo") * col("bar")) / 2)
shape: (2, 3)
βββββββ¬ββββββ¬ββββββ
β foo β bar β baz β
β --- β --- β --- β
β i64 β i64 β f64 β
βββββββͺββββββͺββββββ‘
β 1 β 3 β 1.5 β
β 2 β 4 β 4.0 β
βββββββ΄ββββββ΄ββββββ
Use attribute lookup to create a new column expression:
>>> df.with_columns(baz=(col.foo + col.bar))
shape: (2, 3)
βββββββ¬ββββββ¬ββββββ
β foo β bar β baz β
β --- β --- β --- β
β i64 β i64 β i64 β
βββββββͺββββββͺββββββ‘
β 1 β 3 β 4 β
β 2 β 4 β 6 β
βββββββ΄ββββββ΄ββββββ
"""
def __call__(
self,
name: (
str
| PolarsDataType
| PythonDataType
| Iterable[str]
| Iterable[PolarsDataType | PythonDataType]
),
*more_names: str | PolarsDataType | PythonDataType,
) -> Expr:
"""
Create one or more expressions representing columns in a DataFrame.
Parameters
----------
name
The name or datatype of the column(s) to represent.
Accepts regular expression input; regular expressions
should start with `^` and end with `$`.
*more_names
Additional names or datatypes of columns to represent,
specified as positional arguments.
See Also
--------
first
last
nth
Examples
--------
Pass a single column name to represent that column.
>>> df = pl.DataFrame(
... {
... "ham": [1, 2],
... "hamburger": [11, 22],
... "foo": [2, 1],
... "bar": ["a", "b"],
... }
... )
>>> df.select(pl.col("foo"))
shape: (2, 1)
βββββββ
β foo β
β --- β
β i64 β
βββββββ‘
β 2 β
β 1 β
βββββββ
Use dot syntax to save keystrokes for quick prototyping.
>>> from polars import col as c
>>> df.select(c.foo + c.ham)
shape: (2, 1)
βββββββ
β foo β
β --- β
β i64 β
βββββββ‘
β 3 β
β 3 β
βββββββ
Use the wildcard `*` to represent all columns.
>>> df.select(pl.col("*"))
shape: (2, 4)
βββββββ¬ββββββββββββ¬ββββββ¬ββββββ
β ham β hamburger β foo β bar β
β --- β --- β --- β --- β
β i64 β i64 β i64 β str β
βββββββͺββββββββββββͺββββββͺββββββ‘
β 1 β 11 β 2 β a β
β 2 β 22 β 1 β b β
βββββββ΄ββββββββββββ΄ββββββ΄ββββββ
>>> df.select(pl.col("*").exclude("ham"))
shape: (2, 3)
βββββββββββββ¬ββββββ¬ββββββ
β hamburger β foo β bar β
β --- β --- β --- β
β i64 β i64 β str β
βββββββββββββͺββββββͺββββββ‘
β 11 β 2 β a β
β 22 β 1 β b β
βββββββββββββ΄ββββββ΄ββββββ
Regular expression input is supported.
>>> df.select(pl.col("^ham.*$"))
shape: (2, 2)
βββββββ¬ββββββββββββ
β ham β hamburger β
β --- β --- β
β i64 β i64 β
βββββββͺββββββββββββ‘
β 1 β 11 β
β 2 β 22 β
βββββββ΄ββββββββββββ
Multiple columns can be represented by passing a list of names.
>>> df.select(pl.col(["hamburger", "foo"]))
shape: (2, 2)
βββββββββββββ¬ββββββ
β hamburger β foo β
β --- β --- β
β i64 β i64 β
βββββββββββββͺββββββ‘
β 11 β 2 β
β 22 β 1 β
βββββββββββββ΄ββββββ
Or use positional arguments to represent multiple columns in the same way.
>>> df.select(pl.col("hamburger", "foo"))
shape: (2, 2)
βββββββββββββ¬ββββββ
β hamburger β foo β
β --- β --- β
β i64 β i64 β
βββββββββββββͺββββββ‘
β 11 β 2 β
β 22 β 1 β
βββββββββββββ΄ββββββ
Easily select all columns that match a certain data type by passing that
datatype.
>>> df.select(pl.col(pl.String))
shape: (2, 1)
βββββββ
β bar β
β --- β
β str β
βββββββ‘
β a β
β b β
βββββββ
>>> df.select(pl.col(pl.Int64, pl.Float64))
shape: (2, 3)
βββββββ¬ββββββββββββ¬ββββββ
β ham β hamburger β foo β
β --- β --- β --- β
β i64 β i64 β i64 β
βββββββͺββββββββββββͺββββββ‘
β 1 β 11 β 2 β
β 2 β 22 β 1 β
βββββββ΄ββββββββββββ΄ββββββ
"""
return _create_col(name, *more_names)
def __getattr__(self, name: str) -> Expr:
"""
Create a column expression using attribute syntax.
Note that this syntax does not support passing data
types or multiple column names.
Parameters
----------
name
The name of the column to represent.
Examples
--------
>>> from polars import col as c
>>> df = pl.DataFrame(
... {
... "foo": [1, 2],
... "bar": [3, 4],
... }
... )
>>> df.select(c.foo + c.bar)
shape: (2, 1)
βββββββ
β foo β
β --- β
β i64 β
βββββββ‘
β 4 β
β 6 β
βββββββ
"""
# detect if "name" has been mangled by class scoping
# (this can only happen if the colname starts with a double-underscore)
if re.match(r"^_\w+__", name):
import inspect
frame = inspect.currentframe()
while frame is not None:
if (frame := frame.f_back) is not None and ( # type: ignore[union-attr]
_have_qualname or "self" in frame.f_locals
):
# if we are inside class scope confirm the col has been mangled
# with the *specific* class name associated with that scope
if object_name := _get_class_objname(frame):
if name.startswith(
mangled_prefix := f"_{object_name}"
) and isinstance(frame.f_globals.get(object_name), type):
name = name.removeprefix(mangled_prefix)
break
# help autocomplete work with IPython
with contextlib.suppress(AttributeError):
if name.startswith("__wrapped__"):
return getattr(type(self), name)
return _create_col(name)
if not sys.version_info >= (3, 11):
def __getstate__(self) -> Any:
return self.__dict__
def __setstate__(self, state: Any) -> None:
self.__dict__ = state
col: Col = Col()
| Col |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 1476,
"end": 2766
} | class ____(collections.OrderedDict):
pass
clip01 = functools.partial(torch.clip, min=0.0, max=1.0)
def constant3(a, b):
return a - b + (1.0 + 2)
def call(f, *args, **kwargs):
return f(*args, **kwargs)
_variable = 0
def update_global(x):
global _variable
_variable += 1
# Check that updated global variable value is picked up
return x * _variable
def pos_only_fn(*args, **kwargs):
return _pos_only_fn(*args, **kwargs)
def _pos_only_fn(a, b=3, /, **kwargs):
return (
a * b + kwargs.get("a", -13) * kwargs.get("b", 42),
"a" in kwargs,
"b" in kwargs,
)
@contextlib.contextmanager
def update_global_ctx(x):
try:
yield update_global(x)
finally:
pass
def func_with_default(a, b, some_default_arg=True):
if some_default_arg:
return a - b
def make_test(fn=None, expected_frame_count=1):
if fn is None:
return lambda fn: make_test(fn, expected_frame_count=expected_frame_count)
nargs = len(inspect.signature(fn).parameters)
def test_fn(self):
return torch._dynamo.testing.standard_test(
self,
fn=fn,
nargs=nargs,
expected_frame_count=expected_frame_count,
)
return test_fn
| CustomDictSubclass |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 38717,
"end": 40159
} | class ____(unittest.TestCase):
"""Tests person in the ig_NG (Igbo - Nigeria) locale"""
def setUp(self):
self.fake = Faker("ig_NG")
Faker.seed(0)
def test_name(self):
name = self.fake.name()
self.assertIsInstance(name, str)
name = self.fake.name_female()
self.assertIsInstance(name, str)
name = self.fake.name_male()
self.assertIsInstance(name, str)
def test_first_name(self):
name = self.fake.first_name()
self.assertIsInstance(name, str)
assert name in IgNgProvider.first_names
name = self.fake.first_name_female()
self.assertIsInstance(name, str)
assert name in IgNgProvider.first_names
assert name in IgNgProvider.first_names_female
name = self.fake.first_name_male()
self.assertIsInstance(name, str)
assert name in IgNgProvider.first_names
assert name in IgNgProvider.first_names_male
def test_last_name(self):
assert hasattr(IgNgProvider, "last_names")
name = self.fake.last_name()
self.assertIsInstance(name, str)
assert name in IgNgProvider.last_names
name = self.fake.last_name_female()
self.assertIsInstance(name, str)
assert name in IgNgProvider.last_names
name = self.fake.last_name_male()
self.assertIsInstance(name, str)
assert name in IgNgProvider.last_names
| TestIgNG |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/decorators/test_pyspark.py | {
"start": 1641,
"end": 6030
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="pyspark_local",
conn_type="spark",
host="spark://none",
extra="",
)
)
create_connection_without_db(
Connection(
conn_id="spark-connect",
conn_type="spark",
host="sc://localhost",
extra="",
)
)
create_connection_without_db(
Connection(
conn_id="spark-connect-auth",
conn_type="spark_connect",
host="sc://localhost",
password="1234",
login="connect",
extra={
"use_ssl": True,
},
)
)
@pytest.mark.db_test
@mock.patch("pyspark.SparkConf")
@mock.patch("pyspark.sql.SparkSession")
def test_pyspark_decorator_with_connection(self, spark_mock, conf_mock, dag_maker):
config = FakeConfig()
builder = mock.MagicMock()
spark_mock.builder.config.return_value = builder
builder.getOrCreate.return_value = builder
builder.sparkContext.return_value = builder
conf_mock.return_value = config
@task.pyspark(conn_id="pyspark_local", config_kwargs={"spark.executor.memory": "2g"})
def f(spark, sc):
import random
assert spark is not None
assert sc is not None
return [random.random() for _ in range(100)]
with dag_maker():
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
assert len(ti.xcom_pull()) == 100
assert config.get("spark.master") == "spark://none"
assert config.get("spark.executor.memory") == "2g"
assert config.get("spark.remote") is None
assert config.get("spark.app.name")
spark_mock.builder.config.assert_called_once_with(conf=conf_mock())
@pytest.mark.db_test
@mock.patch("pyspark.SparkConf")
@mock.patch("pyspark.sql.SparkSession")
def test_simple_pyspark_decorator(self, spark_mock, conf_mock, dag_maker):
config = FakeConfig()
conf_mock.return_value = config
e = 2
@task.pyspark
def f():
return e
with dag_maker():
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
assert ti.xcom_pull() == e
assert config.get("spark.master") == "local[*]"
spark_mock.builder.config.assert_called_once_with(conf=conf_mock())
@pytest.mark.db_test
@mock.patch("pyspark.SparkConf")
@mock.patch("pyspark.sql.SparkSession")
def test_spark_connect(self, spark_mock, conf_mock, dag_maker):
config = FakeConfig()
conf_mock.return_value = config
@task.pyspark(conn_id="spark-connect")
def f(spark, sc):
assert spark is not None
assert sc is None
return True
with dag_maker():
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
assert ti.xcom_pull()
assert config.get("spark.remote") == "sc://localhost"
assert config.get("spark.master") is None
assert config.get("spark.app.name")
spark_mock.builder.config.assert_called_once_with(conf=conf_mock())
@pytest.mark.db_test
@mock.patch("pyspark.SparkConf")
@mock.patch("pyspark.sql.SparkSession")
def test_spark_connect_auth(self, spark_mock, conf_mock, dag_maker):
config = FakeConfig()
conf_mock.return_value = config
@task.pyspark(conn_id="spark-connect-auth")
def f(spark, sc):
assert spark is not None
assert sc is None
return True
with dag_maker():
f()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
assert ti.xcom_pull()
assert config.get("spark.remote") == "sc://localhost/;user_id=connect;token=1234;use_ssl=True"
assert config.get("spark.master") is None
assert config.get("spark.app.name")
| TestPysparkDecorator |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 23529,
"end": 23623
} | class ____(Interface):
"""Request type interface attached to all request objects"""
| IRequest |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_to_dict.py | {
"start": 322,
"end": 18815
} | class ____:
def test_to_dict_timestamp(self):
# GH#11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp("20130101")
test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]})
test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]})
expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}]
expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}]
assert test_data.to_dict(orient="records") == expected_records
assert test_data_mixed.to_dict(orient="records") == expected_records_mixed
expected_series = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([tsmp, tsmp], name="B"),
}
expected_series_mixed = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([1, 2], name="B"),
}
tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="series"), expected_series_mixed
)
expected_split = {
"index": [0, 1],
"data": [[tsmp, tsmp], [tsmp, tsmp]],
"columns": ["A", "B"],
}
expected_split_mixed = {
"index": [0, 1],
"data": [[tsmp, 1], [tsmp, 2]],
"columns": ["A", "B"],
}
tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="split"), expected_split_mixed
)
def test_to_dict_index_not_unique_with_index_orient(self):
# GH#22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"])
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="index")
def test_to_dict_invalid_orient(self):
df = DataFrame({"A": [0, 1]})
msg = "orient 'xinvalid' not understood"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="xinvalid")
@pytest.mark.parametrize("orient", ["d", "l", "r", "sp", "s", "i"])
def test_to_dict_short_orient_raises(self, orient):
# GH#32515
df = DataFrame({"A": [0, 1]})
with pytest.raises(ValueError, match="not understood"):
df.to_dict(orient=orient)
@pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict])
def test_to_dict(self, mapping):
# orient= should only take the listed options
# see GH#32515
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
# GH#16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("list", into=mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][int(k2) - 1]
recons_data = DataFrame(test_data).to_dict("series", into=mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("split", into=mapping)
expected_split = {
"columns": ["A", "B"],
"index": ["1", "2", "3"],
"data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]],
}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("records", into=mapping)
expected_records = [
{"A": 1.0, "B": "1"},
{"A": 2.0, "B": "2"},
{"A": np.nan, "B": "3"},
]
assert isinstance(recons_data, list)
assert len(recons_data) == 3
for left, right in zip(recons_data, expected_records):
tm.assert_dict_equal(left, right)
# GH#10844
recons_data = DataFrame(test_data).to_dict("index")
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
df = DataFrame(test_data)
df["duped"] = df[df.columns[0]]
recons_data = df.to_dict("index")
comp_data = test_data.copy()
comp_data["duped"] = comp_data[df.columns[0]]
for k, v in comp_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
@pytest.mark.parametrize("mapping", [list, defaultdict, []])
def test_to_dict_errors(self, mapping):
# GH#16122
df = DataFrame(np.random.default_rng(2).standard_normal((3, 3)))
msg = "|".join(
[
"unsupported type: <class 'list'>",
r"to_dict\(\) only accepts initialized defaultdicts",
]
)
with pytest.raises(TypeError, match=msg):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH#16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"])
with tm.assert_produces_warning(UserWarning, match="columns will be omitted"):
df.to_dict()
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"orient,expected",
[
("list", {"A": [2, 5], "B": [3, 6]}),
("dict", {"A": {0: 2, 1: 5}, "B": {0: 3, 1: 6}}),
],
)
def test_to_dict_not_unique(self, orient, expected):
# GH#54824: This is to make sure that dataframes with non-unique column
# would have uniform behavior throughout different orients
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "A", "B"])
result = df.to_dict(orient)
assert result == expected
# orient - orient argument to to_dict function
# item_getter - function for extracting value from
# the resulting dict using column name and index
@pytest.mark.parametrize(
"orient,item_getter",
[
("dict", lambda d, col, idx: d[col][idx]),
("records", lambda d, col, idx: d[idx][col]),
("list", lambda d, col, idx: d[col][idx]),
("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]),
("index", lambda d, col, idx: d[idx][col]),
],
)
def test_to_dict_box_scalars(self, orient, item_getter):
# GH#14216, GH#23753
# make sure that we are boxing properly
df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
result = df.to_dict(orient=orient)
assert isinstance(item_getter(result, "a", 0), int)
assert isinstance(item_getter(result, "b", 0), float)
def test_to_dict_tz(self):
# GH#18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [
(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=timezone.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=timezone.utc),),
]
df = DataFrame(list(data), columns=["d"])
result = df.to_dict(orient="records")
expected = [
{"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=timezone.utc)},
{"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=timezone.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize(
"into, expected",
[
(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
(
OrderedDict,
OrderedDict(
[
(0, {"int_col": 1, "float_col": 1.0}),
(1, {"int_col": 2, "float_col": 2.0}),
(2, {"int_col": 3, "float_col": 3.0}),
]
),
),
(
defaultdict(dict),
defaultdict(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
),
],
)
def test_to_dict_index_dtypes(self, into, expected):
# GH#18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]})
result = df.to_dict(orient="index", into=into)
cols = ["int_col", "float_col"]
result = DataFrame.from_dict(result, orient="index")[cols]
expected = DataFrame.from_dict(expected, orient="index")[cols]
tm.assert_frame_equal(result, expected)
def test_to_dict_numeric_names(self):
# GH#24940
df = DataFrame({str(i): [i] for i in range(5)})
result = set(df.to_dict("records")[0].keys())
expected = set(df.columns)
assert result == expected
def test_to_dict_wide(self):
# GH#24939
df = DataFrame({(f"A_{i:d}"): [i] for i in range(256)})
result = df.to_dict("records")[0]
expected = {f"A_{i:d}": i for i in range(256)}
assert result == expected
@pytest.mark.parametrize(
"data,dtype",
(
([True, True, False], bool),
[
[
datetime(2018, 1, 1),
datetime(2019, 2, 2),
datetime(2020, 3, 3),
],
Timestamp,
],
[[1.0, 2.0, 3.0], float],
[[1, 2, 3], int],
[["X", "Y", "Z"], str],
),
)
def test_to_dict_orient_dtype(self, data, dtype):
# GH22620 & GH21256
df = DataFrame({"a": data})
d = df.to_dict(orient="records")
assert all(type(record["a"]) is dtype for record in d)
@pytest.mark.parametrize(
"data,expected_dtype",
(
[np.uint64(2), int],
[np.int64(-9), int],
[np.float64(1.1), float],
[np.bool_(True), bool],
[np.datetime64("2005-02-25"), Timestamp],
),
)
def test_to_dict_scalar_constructor_orient_dtype(self, data, expected_dtype):
# GH22620 & GH21256
df = DataFrame({"a": data}, index=[0])
d = df.to_dict(orient="records")
result = type(d[0]["a"])
assert result is expected_dtype
def test_to_dict_mixed_numeric_frame(self):
# GH 12859
df = DataFrame({"a": [1.0], "b": [9.0]})
result = df.reset_index().to_dict("records")
expected = [{"index": 0, "a": 1.0, "b": 9.0}]
assert result == expected
@pytest.mark.parametrize(
"index",
[
None,
Index(["aa", "bb"]),
Index(["aa", "bb"], name="cc"),
MultiIndex.from_tuples([("a", "b"), ("a", "c")]),
MultiIndex.from_tuples([("a", "b"), ("a", "c")], names=["n1", "n2"]),
],
)
@pytest.mark.parametrize(
"columns",
[
["x", "y"],
Index(["x", "y"]),
Index(["x", "y"], name="z"),
MultiIndex.from_tuples([("x", 1), ("y", 2)]),
MultiIndex.from_tuples([("x", 1), ("y", 2)], names=["z1", "z2"]),
],
)
def test_to_dict_orient_tight(self, index, columns):
df = DataFrame.from_records(
[[1, 3], [2, 4]],
columns=columns,
index=index,
)
roundtrip = DataFrame.from_dict(df.to_dict(orient="tight"), orient="tight")
tm.assert_frame_equal(df, roundtrip)
@pytest.mark.parametrize(
"orient",
["dict", "list", "split", "records", "index", "tight"],
)
@pytest.mark.parametrize(
"data,expected_types",
(
(
{
"a": [np.int64(1), 1, np.int64(3)],
"b": [np.float64(1.0), 2.0, np.float64(3.0)],
"c": [np.float64(1.0), 2, np.int64(3)],
"d": [np.float64(1.0), "a", np.int64(3)],
"e": [np.float64(1.0), ["a"], np.int64(3)],
"f": [np.float64(1.0), ("a",), np.int64(3)],
},
{
"a": [int, int, int],
"b": [float, float, float],
"c": [float, float, float],
"d": [float, str, int],
"e": [float, list, int],
"f": [float, tuple, int],
},
),
(
{
"a": [1, 2, 3],
"b": [1.1, 2.2, 3.3],
},
{
"a": [int, int, int],
"b": [float, float, float],
},
),
( # Make sure we have one df which is all object type cols
{
"a": [1, "hello", 3],
"b": [1.1, "world", 3.3],
},
{
"a": [int, str, int],
"b": [float, str, float],
},
),
),
)
def test_to_dict_returns_native_types(self, orient, data, expected_types):
# GH 46751
# Tests we get back native types for all orient types
df = DataFrame(data)
result = df.to_dict(orient)
if orient == "dict":
assertion_iterator = (
(i, key, value)
for key, index_value_map in result.items()
for i, value in index_value_map.items()
)
elif orient == "list":
assertion_iterator = (
(i, key, value)
for key, values in result.items()
for i, value in enumerate(values)
)
elif orient in {"split", "tight"}:
assertion_iterator = (
(i, key, result["data"][i][j])
for i in result["index"]
for j, key in enumerate(result["columns"])
)
elif orient == "records":
assertion_iterator = (
(i, key, value)
for i, record in enumerate(result)
for key, value in record.items()
)
elif orient == "index":
assertion_iterator = (
(i, key, value)
for i, record in result.items()
for key, value in record.items()
)
for i, key, value in assertion_iterator:
assert value == data[key][i]
assert type(value) is expected_types[key][i]
@pytest.mark.parametrize("orient", ["dict", "list", "series", "records", "index"])
def test_to_dict_index_false_error(self, orient):
# GH#46398
df = DataFrame({"col1": [1, 2], "col2": [3, 4]}, index=["row1", "row2"])
msg = "'index=False' is only valid when 'orient' is 'split' or 'tight'"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient=orient, index=False)
@pytest.mark.parametrize(
"orient, expected",
[
("split", {"columns": ["col1", "col2"], "data": [[1, 3], [2, 4]]}),
(
"tight",
{
"columns": ["col1", "col2"],
"data": [[1, 3], [2, 4]],
"column_names": [None],
},
),
],
)
def test_to_dict_index_false(self, orient, expected):
# GH#46398
df = DataFrame({"col1": [1, 2], "col2": [3, 4]}, index=["row1", "row2"])
result = df.to_dict(orient=orient, index=False)
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize(
"orient, expected",
[
("dict", {"a": {0: 1, 1: None}}),
("list", {"a": [1, None]}),
("split", {"index": [0, 1], "columns": ["a"], "data": [[1], [None]]}),
(
"tight",
{
"index": [0, 1],
"columns": ["a"],
"data": [[1], [None]],
"index_names": [None],
"column_names": [None],
},
),
("records", [{"a": 1}, {"a": None}]),
("index", {0: {"a": 1}, 1: {"a": None}}),
],
)
def test_to_dict_na_to_none(self, orient, expected):
# GH#50795
df = DataFrame({"a": [1, NA]}, dtype="Int64")
result = df.to_dict(orient=orient)
assert result == expected
def test_to_dict_masked_native_python(self):
# GH#34665
df = DataFrame({"a": Series([1, 2], dtype="Int64"), "B": 1})
result = df.to_dict(orient="records")
assert isinstance(result[0]["a"], int)
df = DataFrame({"a": Series([1, NA], dtype="Int64"), "B": 1})
result = df.to_dict(orient="records")
assert isinstance(result[0]["a"], int)
def test_to_dict_tight_no_warning_with_duplicate_column(self):
# GH#58281
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "A"])
with tm.assert_produces_warning(None):
result = df.to_dict(orient="tight")
expected = {
"index": [0, 1, 2],
"columns": ["A", "A"],
"data": [[1, 2], [3, 4], [5, 6]],
"index_names": [None],
"column_names": [None],
}
assert result == expected
@pytest.mark.parametrize(
"val", [Timestamp(2020, 1, 1), Timedelta(1), Period("2020"), Interval(1, 2)]
)
def test_to_dict_list_pd_scalars(val):
# GH 54824
df = DataFrame({"a": [val]})
result = df.to_dict(orient="list")
expected = {"a": [val]}
assert result == expected
| TestDataFrameToDict |
python | apache__airflow | airflow-core/src/airflow/utils/context.py | {
"start": 3644,
"end": 5324
} | class ____(OutletEventAccessorsSDK):
"""
Lazy mapping of outlet asset event accessors.
:meta private:
"""
@staticmethod
def _get_asset_from_db(name: str | None = None, uri: str | None = None) -> Asset:
if name:
with create_session() as session:
asset = session.scalar(
select(AssetModel).where(AssetModel.name == name, AssetModel.active.has())
)
elif uri:
with create_session() as session:
asset = session.scalar(
select(AssetModel).where(AssetModel.uri == uri, AssetModel.active.has())
)
else:
raise ValueError("Either name or uri must be provided")
if asset is None:
raise ValueError("No active asset found with either name or uri.")
return asset.to_public()
def context_merge(context: Context, *args: Any, **kwargs: Any) -> None:
"""
Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
if not context:
context = Context()
context.update(*args, **kwargs)
def context_copy_partial(source: Context, keys: Container[str]) -> Context:
"""
Create a context by copying items under selected keys in ``source``.
:meta private:
"""
new = {k: v for k, v in source.items() if k in keys}
return cast("Context", new)
| OutletEventAccessors |
python | modin-project__modin | modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/virtual_partition.py | {
"start": 1273,
"end": 8937
} | class ____(PandasDataframeAxisPartition):
"""
The class implements the interface in ``PandasDataframeAxisPartition``.
Parameters
----------
list_of_partitions : Union[list, PandasOnUnidistDataframePartition]
List of ``PandasOnUnidistDataframePartition`` and
``PandasOnUnidistDataframeVirtualPartition`` objects, or a single
``PandasOnUnidistDataframePartition``.
get_ip : bool, default: False
Whether to get node IP addresses to conforming partitions or not.
full_axis : bool, default: True
Whether or not the virtual partition encompasses the whole axis.
call_queue : list, optional
A list of tuples (callable, args, kwargs) that contains deferred calls.
length : unidist.ObjectRef or int, optional
Length, or reference to length, of wrapped ``pandas.DataFrame``.
width : unidist.ObjectRef or int, optional
Width, or reference to width, of wrapped ``pandas.DataFrame``.
"""
_PARTITIONS_METADATA_LEN = 3 # (length, width, ip)
partition_type = PandasOnUnidistDataframePartition
axis = None
# these variables are intentionally initialized at runtime (see #6023)
_DEPLOY_AXIS_FUNC = None
_DEPLOY_SPLIT_FUNC = None
_DRAIN_FUNC = None
@classmethod
def _get_deploy_axis_func(cls): # noqa: GL08
if cls._DEPLOY_AXIS_FUNC is None:
cls._DEPLOY_AXIS_FUNC = UnidistWrapper.put(
PandasDataframeAxisPartition.deploy_axis_func
)
return cls._DEPLOY_AXIS_FUNC
@classmethod
def _get_deploy_split_func(cls): # noqa: GL08
if cls._DEPLOY_SPLIT_FUNC is None:
cls._DEPLOY_SPLIT_FUNC = UnidistWrapper.put(
PandasDataframeAxisPartition.deploy_splitting_func
)
return cls._DEPLOY_SPLIT_FUNC
@classmethod
def _get_drain_func(cls): # noqa: GL08
if cls._DRAIN_FUNC is None:
cls._DRAIN_FUNC = UnidistWrapper.put(PandasDataframeAxisPartition.drain)
return cls._DRAIN_FUNC
@property
def list_of_ips(self):
"""
Get the IPs holding the physical objects composing this partition.
Returns
-------
List
A list of IPs as ``unidist.ObjectRef`` or str.
"""
# Defer draining call queue until we get the ip address
result = [None] * len(self.list_of_block_partitions)
for idx, partition in enumerate(self.list_of_block_partitions):
partition.drain_call_queue()
result[idx] = partition.ip(materialize=False)
return result
@classmethod
@_inherit_docstrings(PandasDataframeAxisPartition.deploy_splitting_func)
def deploy_splitting_func(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
*partitions,
extract_metadata=False,
):
return _deploy_unidist_func.options(
num_returns=(
num_splits * (1 + cls._PARTITIONS_METADATA_LEN)
if extract_metadata
else num_splits
),
).remote(
cls._get_deploy_split_func(),
axis,
func,
f_args,
f_kwargs,
num_splits,
*partitions,
extract_metadata=extract_metadata,
)
@classmethod
def deploy_axis_func(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
maintain_partitioning,
*partitions,
min_block_size,
lengths=None,
manual_partition=False,
max_retries=None,
):
"""
Deploy a function along a full axis.
Parameters
----------
axis : {0, 1}
The axis to perform the function along.
func : callable
The function to perform.
f_args : list or tuple
Positional arguments to pass to ``func``.
f_kwargs : dict
Keyword arguments to pass to ``func``.
num_splits : int
The number of splits to return (see ``split_result_of_axis_func_pandas``).
maintain_partitioning : bool
If True, keep the old partitioning if possible.
If False, create a new partition layout.
*partitions : iterable
All partitions that make up the full axis (row or column).
min_block_size : int
Minimum number of rows/columns in a single split.
lengths : list, optional
The list of lengths to shuffle the object.
manual_partition : bool, default: False
If True, partition the result with `lengths`.
max_retries : int, default: None
The max number of times to retry the func.
Returns
-------
list
A list of ``unidist.ObjectRef``-s.
"""
return _deploy_unidist_func.options(
num_returns=(num_splits if lengths is None else len(lengths))
* (1 + cls._PARTITIONS_METADATA_LEN),
**({"max_retries": max_retries} if max_retries is not None else {}),
).remote(
cls._get_deploy_axis_func(),
axis,
func,
f_args,
f_kwargs,
num_splits,
maintain_partitioning,
*partitions,
manual_partition=manual_partition,
min_block_size=min_block_size,
lengths=lengths,
)
@classmethod
def deploy_func_between_two_axis_partitions(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
len_of_left,
other_shape,
*partitions,
min_block_size,
):
"""
Deploy a function along a full axis between two data sets.
Parameters
----------
axis : {0, 1}
The axis to perform the function along.
func : callable
The function to perform.
f_args : list or tuple
Positional arguments to pass to ``func``.
f_kwargs : dict
Keyword arguments to pass to ``func``.
num_splits : int
The number of splits to return (see ``split_result_of_axis_func_pandas``).
len_of_left : int
The number of values in `partitions` that belong to the left data set.
other_shape : np.ndarray
The shape of right frame in terms of partitions, i.e.
(other_shape[i-1], other_shape[i]) will indicate slice to restore i-1 axis partition.
*partitions : iterable
All partitions that make up the full axis (row or column) for both data sets.
min_block_size : int
Minimum number of rows/columns in a single split.
Returns
-------
list
A list of ``unidist.ObjectRef``-s.
"""
return _deploy_unidist_func.options(
num_returns=num_splits * (1 + cls._PARTITIONS_METADATA_LEN)
).remote(
PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions,
axis,
func,
f_args,
f_kwargs,
num_splits,
len_of_left,
other_shape,
*partitions,
min_block_size=min_block_size,
)
def wait(self):
"""Wait completing computations on the object wrapped by the partition."""
self.drain_call_queue()
futures = self.list_of_blocks
UnidistWrapper.wait(futures)
@_inherit_docstrings(PandasOnUnidistDataframeVirtualPartition)
| PandasOnUnidistDataframeVirtualPartition |
python | google__jax | tests/pallas/gpu_pallas_distributed_test.py | {
"start": 1877,
"end": 10739
} | class ____(TestCase):
def test_remote_dma_basic(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(x_ref, y_ref, ready_sem, recv_sem):
other_dev_id = 1 - lax.axis_index('x')
y_ref[...] = x_ref[...]
pl.semaphore_signal(ready_sem, device_id=other_dev_id)
pl.semaphore_wait(ready_sem)
neighbor_ptr = plgpu.remote_ref(y_ref, other_dev_id)
neighbor_ptr[...] = x_ref[...]
pl.semaphore_signal(recv_sem, device_id=other_dev_id)
pl.semaphore_wait(recv_sem)
x = jnp.arange(2 * 8 * 128.0, dtype=jnp.float32).reshape((2 * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[
plgpu.SemaphoreType.REGULAR,
plgpu.SemaphoreType.REGULAR,
],
)(x)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
y = jax.jit(
jax.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'), check_vma=False,
)
)(x)
expected = x[8:] if jax.process_index() == 0 else x[:8]
np.testing.assert_allclose(y.addressable_shards[0].data, expected)
@parameterized.parameters(('x',), ('y',))
def test_remote_dma_2d_mesh(self, axis):
if jax.process_count() < 4:
self.skipTest('Test requires at least 4 devices (and processes).')
if jax.process_index() > 4:
return # Only 4 processes needed.
def kernel(x_ref, y_ref, recv_sem):
other_dev_id = {axis: 1 - lax.axis_index(axis)}
other_y_ref = plgpu.remote_ref(y_ref, other_dev_id)
other_y_ref[...] = x_ref[...]
pl.semaphore_signal(recv_sem, device_id=other_dev_id)
pl.semaphore_wait(recv_sem)
x = jnp.arange(2 * 8 * 128.0, dtype=jnp.float32).reshape((2 * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)(x)
devices = jax.devices()[:4]
mesh = jax.sharding.Mesh(np.asarray(devices).reshape(2, 2), ['x', 'y'])
y = jax.jit(
jax.shard_map(
body, mesh=mesh, in_specs=P(axis), out_specs=P(axis), check_vma=False,
)
)(x)
expected = x[8:] if jax.process_index() == 0 else x[:8]
np.testing.assert_allclose(y.addressable_shards[0].data, expected)
def test_wait_twice(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, sem):
other_dev_id = 1 - lax.axis_index('x')
pl.semaphore_signal(sem, 2, device_id=other_dev_id)
pl.semaphore_wait(sem)
pl.semaphore_wait(sem)
y_ref[...] = jnp.ones_like(y_ref)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P(None), check_vma=False,
)
)()
np.testing.assert_allclose(y, jnp.ones_like(y))
def test_wait_nodec(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, sem):
other_dev_id = 1 - lax.axis_index('x')
pl.semaphore_signal(sem, 2, device_id=other_dev_id)
pl.semaphore_wait(sem, decrement=False)
pl.semaphore_wait(sem, 2, decrement=False)
pl.semaphore_wait(sem, 2)
y_ref[...] = jnp.ones_like(y_ref)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P(None), check_vma=False,
)
)()
np.testing.assert_allclose(y, jnp.ones_like(y))
def test_signal_parallel(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, sem, sem2):
other_dev_id = 1 - lax.axis_index('x')
plgpu.semaphore_signal_parallel(
plgpu.SemaphoreSignal(sem, device_id=other_dev_id),
plgpu.SemaphoreSignal(sem2, device_id=other_dev_id),
)
pl.semaphore_wait(sem)
pl.semaphore_wait(sem2)
y_ref[...] = jnp.ones_like(y_ref)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR] * 2,
)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P(None), check_vma=False,
)
)()
np.testing.assert_allclose(y, jnp.ones_like(y))
def test_semaphore_signal_collective_axes(self):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, sem):
plgpu.semaphore_signal_multicast(sem, collective_axes='x')
# Wait for the multicast signal (each device gets signaled by all devices)
pl.semaphore_wait(sem, 2) # Wait for signals from both devices
y_ref[...] = jnp.ones_like(y_ref)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P(None), check_vma=False,
)
)()
np.testing.assert_allclose(y, jnp.ones_like(y))
def test_permuted_mesh(self):
def kernel(y_ref, sem):
other_dev_id = 1 - lax.axis_index('x')
pl.semaphore_signal(sem, 1, device_id=other_dev_id)
pl.semaphore_wait(sem)
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
mesh = jax.sharding.Mesh(jax.devices()[::-1], ['x']) # Reverse the devices.
f = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P(None), check_vma=False,
)
)
msg = (
'Mosaic GPU only supports meshes with device ordering that follows'
' row-major device ids.'
)
with self.assertRaisesRegex(NotImplementedError, msg):
f()
@parameterized.parameters(False, True)
def test_copy_tma(self, use_dict):
if jax.process_index() > 2:
return # Only 2 processes needed.
def kernel(y_ref, smem_ref, sem):
dev_id = lax.axis_index("y")
other_dev_id = 1 - dev_id
if use_dict:
ids = lambda x, y: dict(x=x, y=y)
else:
ids = lambda x, y: (x, y)
# Device ID must be an int32.
zero = jnp.int32(0)
@pl.when(dev_id == zero)
def _store():
output = plgpu.layout_cast(lax.broadcasted_iota(jnp.int32, (128, 128), 1), plgpu.Layout.WGMMA)
smem_ref[...] = output
plgpu.copy_smem_to_gmem(smem_ref, plgpu.remote_ref(y_ref, ids(zero, dev_id)))
plgpu.copy_smem_to_gmem(smem_ref, plgpu.remote_ref(y_ref, ids(zero, other_dev_id)))
plgpu.wait_smem_to_gmem(0)
pl.semaphore_signal(sem, 1, device_id=ids(zero, other_dev_id))
pl.semaphore_wait(sem)
transforms = (plgpu.TilingTransform((8, 32)), plgpu.SwizzleTransform(128))
kernel_call = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.int32),
scratch_shapes=[
plgpu.SMEM((128, 128), jnp.int32, transforms=transforms),
plgpu.SemaphoreType.REGULAR,
],
)
mesh = jtu.create_mesh((1, 2), ("x", "y"))
y = jax.jit(
jax.shard_map(
kernel_call, mesh=mesh, in_specs=(), out_specs=P("y"), check_vma=False,
)
)()
y = multihost_utils.process_allgather(y, tiled=True)
ref = lax.broadcasted_iota(jnp.int32, (128, 128), 1)
np.testing.assert_array_equal(y, np.concat([ref, ref], axis=0))
| PallasCallRemoteDMATest |
python | openai__openai-python | src/openai/types/beta/threads/text.py | {
"start": 203,
"end": 319
} | class ____(BaseModel):
annotations: List[Annotation]
value: str
"""The data that makes up the text."""
| Text |
python | cherrypy__cherrypy | cherrypy/test/modfcgid.py | {
"start": 2756,
"end": 4582
} | class ____(helper.LocalSupervisor):
"""Server Controller for ModFCGI and CherryPy."""
using_apache = True
using_wsgi = True
template = conf_fcgid
def __str__(self):
"""Render a :class:`ModFCGISupervisor` instance as a string."""
return 'FCGI Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
"""Spawn an Apache ``mod_fcgid`` supervisor process."""
cherrypy.server.httpserver = servers.FlupFCGIServer(
application=cherrypy.tree,
bindAddress=('127.0.0.1', 4000),
)
cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000)
# For FCGI, we both start apache...
self.start_apache()
# ...and our local server
helper.LocalServer.start(self, modulename)
def start_apache(self):
"""Start Apache instance."""
fcgiconf = CONF_PATH
if not os.path.isabs(fcgiconf):
fcgiconf = os.path.join(curdir, fcgiconf)
# Write the Apache conf file.
with open(fcgiconf, 'wb') as f:
server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1]
output = self.template % {
'port': self.port,
'root': curdir,
'server': server,
}
output = ntob(output.replace('\r\n', '\n'))
f.write(output)
result = read_process(APACHE_PATH, '-k start -f %s' % fcgiconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
helper.LocalServer.stop(self)
def sync_apps(self):
"""Set up the FCGID request handler."""
cherrypy.server.httpserver.fcgiserver.application = self.get_app()
| ModFCGISupervisor |
python | celery__celery | celery/utils/objects.py | {
"start": 3003,
"end": 4215
} | class ____:
"""Attribute -> dict key descriptor.
The target object must support ``__getitem__``,
and optionally ``__setitem__``.
Example:
>>> from collections import defaultdict
>>> class Me(dict):
... deep = defaultdict(dict)
...
... foo = _getitem_property('foo')
... deep_thing = _getitem_property('deep.thing')
>>> me = Me()
>>> me.foo
None
>>> me.foo = 10
>>> me.foo
10
>>> me['foo']
10
>>> me.deep_thing = 42
>>> me.deep_thing
42
>>> me.deep
defaultdict(<type 'dict'>, {'thing': 42})
"""
def __init__(self, keypath, doc=None):
path, _, self.key = keypath.rpartition('.')
self.path = path.split('.') if path else None
self.__doc__ = doc
def _path(self, obj):
return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path
else obj)
def __get__(self, obj, type=None):
if obj is None:
return type
return self._path(obj).get(self.key)
def __set__(self, obj, value):
self._path(obj)[self.key] = value
| getitem_property |
python | openai__openai-python | src/openai/types/beta/assistant.py | {
"start": 1074,
"end": 1235
} | class ____(BaseModel):
code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
file_search: Optional[ToolResourcesFileSearch] = None
| ToolResources |
python | spyder-ide__spyder | spyder/widgets/dock.py | {
"start": 7764,
"end": 10899
} | class ____(QDockWidget):
"""Subclass to override needed methods"""
# Attributes
ALLOWED_AREAS = Qt.AllDockWidgetAreas
LOCATION = Qt.LeftDockWidgetArea
FEATURES = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetMovable
# Signals
sig_plugin_closed = Signal()
sig_title_bar_shown = Signal(bool)
def __init__(self, title, parent):
super().__init__(title, parent)
# Attributes
self.title = title
self.is_shown = False
# Set features
self.setFeatures(self.FEATURES)
# Widgets
self.main = parent
self.empty_titlebar = QWidget(self)
self.titlebar = DockTitleBar(self)
self.dock_tabbar = None # Needed for event filter
# Layout
# Prevent message on internal console
# See: https://bugreports.qt.io/browse/QTBUG-42986
layout = QHBoxLayout(self.empty_titlebar)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.empty_titlebar.setLayout(layout)
self.empty_titlebar.setMinimumSize(0, 0)
self.empty_titlebar.setMaximumSize(0, 0)
# Setup
self.set_title_bar()
self.remove_title_bar()
# Signals
# This installs the tab filter when dockwidgets are undocked and then
# docked to a new location.
self.dockLocationChanged.connect(
lambda area: self.install_tab_event_filter()
)
def closeEvent(self, event):
"""Send a signal on close so that the "Panes" menu can be updated."""
self.sig_plugin_closed.emit()
def install_tab_event_filter(self):
"""
Install an event filter to capture mouse events in the tabs of a
QTabBar holding tabified dockwidgets.
"""
# Avoid to run this before the dockwidget is visible
if not self.is_shown:
return
dock_tabbar = None
# This is necessary to catch an error when closing the app on macOS
# with PyQt 5.15
try:
tabbars = self.main.findChildren(QTabBar)
except RuntimeError:
tabbars = []
for tabbar in tabbars:
for tab in range(tabbar.count()):
title = tabbar.tabText(tab)
if title == self.title:
dock_tabbar = tabbar
break
if dock_tabbar is not None:
self.dock_tabbar = dock_tabbar
# Install filter only once per QTabBar
if getattr(self.dock_tabbar, 'filter', None) is None:
self.dock_tabbar.filter = TabFilter(self.dock_tabbar,
self.main)
self.dock_tabbar.installEventFilter(self.dock_tabbar.filter)
def remove_title_bar(self):
"""Set empty qwidget on title bar."""
self.sig_title_bar_shown.emit(False)
self.setTitleBarWidget(self.empty_titlebar)
def set_title_bar(self):
"""Set custom title bar."""
self.sig_title_bar_shown.emit(True)
self.setTitleBarWidget(self.titlebar)
| SpyderDockWidget |
python | PyCQA__pylint | tests/regrtest_data/dummy_plugin/dummy_plugin.py | {
"start": 84,
"end": 422
} | class ____(BaseChecker):
name = 'dummy_plugin'
msgs = {'I9061': ('Dummy short desc 01', 'dummy-message-01', 'Dummy long desc')}
options = (
('dummy_option_1', {
'type': 'string',
'metavar': '<string>',
'help': 'Dummy option 1',
'default': ''
}),
)
| DummyPlugin1 |
python | jd__tenacity | tenacity/__init__.py | {
"start": 17141,
"end": 17871
} | class ____(FutureGenericT):
"""Encapsulates a (future or past) attempted call to a target function."""
def __init__(self, attempt_number: int) -> None:
super().__init__()
self.attempt_number = attempt_number
@property
def failed(self) -> bool:
"""Return whether a exception is being held in this future."""
return self.exception() is not None
@classmethod
def construct(
cls, attempt_number: int, value: t.Any, has_exception: bool
) -> "Future":
"""Construct a new Future object."""
fut = cls(attempt_number)
if has_exception:
fut.set_exception(value)
else:
fut.set_result(value)
return fut
| Future |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 96591,
"end": 99430
} | class ____(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| Moffat1D |
python | ray-project__ray | python/ray/_private/runtime_env/working_dir.py | {
"start": 4366,
"end": 8719
} | class ____(RuntimeEnvPlugin):
name = "working_dir"
# Note working_dir is not following the priority order of other plugins. Instead
# it's specially treated to happen before all other plugins.
priority = 5
def __init__(self, resources_dir: str, gcs_client: GcsClient):
self._resources_dir = os.path.join(resources_dir, "working_dir_files")
self._gcs_client = gcs_client
try_to_create_directory(self._resources_dir)
def delete_uri(
self, uri: str, logger: Optional[logging.Logger] = default_logger
) -> int:
"""Delete URI and return the number of bytes deleted."""
logger.info("Got request to delete working dir URI %s", uri)
local_dir = get_local_dir_from_uri(uri, self._resources_dir)
local_dir_size = get_directory_size_bytes(local_dir)
deleted = delete_package(uri, self._resources_dir)
if not deleted:
logger.warning(f"Tried to delete nonexistent URI: {uri}.")
return 0
return local_dir_size
def get_uris(self, runtime_env: "RuntimeEnv") -> List[str]: # noqa: F821
working_dir_uri = runtime_env.working_dir()
if working_dir_uri != "":
return [working_dir_uri]
return []
async def create(
self,
uri: Optional[str],
runtime_env: dict,
context: RuntimeEnvContext,
logger: logging.Logger = default_logger,
) -> int:
local_dir = await download_and_unpack_package(
uri,
self._resources_dir,
self._gcs_client,
logger=logger,
overwrite=True,
)
return get_directory_size_bytes(local_dir)
def modify_context(
self,
uris: List[str],
runtime_env_dict: Dict,
context: RuntimeEnvContext,
logger: Optional[logging.Logger] = default_logger,
):
if not uris:
return
# WorkingDirPlugin uses a single URI.
uri = uris[0]
local_dir = get_local_dir_from_uri(uri, self._resources_dir)
if not local_dir.exists():
raise ValueError(
f"Local directory {local_dir} for URI {uri} does "
"not exist on the cluster. Something may have gone wrong while "
"downloading or unpacking the working_dir."
)
if not _WIN32:
context.command_prefix += ["cd", str(local_dir), "&&"]
else:
# Include '/d' incase temp folder is on different drive than Ray install.
context.command_prefix += ["cd", "/d", f"{local_dir}", "&&"]
set_pythonpath_in_context(python_path=str(local_dir), context=context)
@contextmanager
def with_working_dir_env(self, uri):
"""
If uri is not None, add the local working directory to the environment variable
as "RAY_RUNTIME_ENV_CREATE_WORKING_DIR". This is useful for other plugins to
create their environment with reference to the working directory. For example
`pip -r ${RAY_RUNTIME_ENV_CREATE_WORKING_DIR}/requirements.txt`
The environment variable is removed after the context manager exits.
"""
if uri is None:
yield
else:
local_dir = get_local_dir_from_uri(uri, self._resources_dir)
if not local_dir.exists():
raise ValueError(
f"Local directory {local_dir} for URI {uri} does "
"not exist on the cluster. Something may have gone wrong while "
"downloading or unpacking the working_dir."
)
key = ray_constants.RAY_RUNTIME_ENV_CREATE_WORKING_DIR_ENV_VAR
prev = os.environ.get(key)
# Windows backslash paths are weird. When it's passed to the env var, and
# when Pip expands it, the backslashes are interpreted as escape characters
# and messes up the whole path. So we convert it to forward slashes.
# This works at least for all Python applications, including pip.
os.environ[key] = local_dir.as_posix()
try:
yield
finally:
if prev is None:
del os.environ[key]
else:
os.environ[key] = prev
| WorkingDirPlugin |
python | PrefectHQ__prefect | tests/server/models/test_flow_runs.py | {
"start": 12671,
"end": 13809
} | class ____:
async def test_read_flow_run(self, flow, session):
# create a flow run to read
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
read_flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run.id
)
assert flow_run == read_flow_run
async def test_read_flow_run_with_job_variables(self, flow, session):
job_vars = {"foo": "bar"}
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, job_variables=job_vars),
)
read_flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run.id
)
assert read_flow_run.job_variables == job_vars
async def test_read_flow_run_returns_none_if_does_not_exist(self, session):
result = await models.flow_runs.read_flow_run(
session=session, flow_run_id=uuid4()
)
assert result is None
| TestReadFlowRun |
python | ray-project__ray | python/ray/train/tests/test_new_persistence.py | {
"start": 7464,
"end": 22637
} | class ____(tune.Trainable):
"""Implement (almost) the same thing as `train_fn` but as a class."""
def setup(self, config):
# Save some markers in the trial dir.
tmp_path = config.get("tmp_path")
self.fail_markers = {
i: tmp_path / f"fail_marker_{self.trial_id}_iter={i}"
for i in config.get("fail_iters", [])
}
setup_marker = tmp_path / f"setup_marker_{self.trial_id}"
if not setup_marker.exists():
for marker in self.fail_markers.values():
marker.touch()
setup_marker.touch()
self.save_as_dict = config.get("save_checkpoint_as_dict", False)
def step(self) -> dict:
if self.iteration in self.fail_markers:
marker = self.fail_markers[self.iteration]
if marker.exists():
marker.unlink()
raise RuntimeError(f"Failing on iter={self.iteration}")
# Save an artifact in the local trial dir.
artifact_file_name = f"artifact-iter={self.iteration}.txt"
with open(artifact_file_name, "w") as f:
f.write(f"{self.iteration}")
return {
"score": 1,
"done": self.iteration >= self.config.get("num_iterations") - 1,
"should_checkpoint": True,
}
def save_checkpoint(self, temp_checkpoint_dir) -> str:
if self.save_as_dict:
return {"dummy": "data"}
(Path(temp_checkpoint_dir) / "checkpoint.pkl").write_text("dummy")
return temp_checkpoint_dir
def load_checkpoint(self, checkpoint_dict_or_path):
print("Loading state from:", checkpoint_dict_or_path)
print("At iteration =", self.iteration)
if self.save_as_dict:
assert checkpoint_dict_or_path == {"dummy": "data"}
else:
assert (
Path(checkpoint_dict_or_path) / "checkpoint.pkl"
).read_text() == "dummy"
def _resume_from_checkpoint(
checkpoint: Checkpoint,
expected_state: dict,
storage_path: Optional[str] = None,
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
):
print(f"\nStarting run with `resume_from_checkpoint`: {checkpoint}\n")
def assert_fn(config):
checkpoint_to_check = train.get_checkpoint()
with checkpoint_to_check.as_directory() as checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint.pkl"), "rb") as f:
state = pickle.load(f)
print("Loaded state from `resume_from_checkpoint`:", state)
print("Expected state:", expected_state)
assert state == expected_state, (state, expected_state)
dummy_ckpt = tempfile.mkdtemp()
with open(os.path.join(dummy_ckpt, "dummy.txt"), "w") as f:
f.write("data")
train.report({"dummy": 1}, checkpoint=Checkpoint.from_directory(dummy_ckpt))
trainer = DataParallelTrainer(
assert_fn,
scaling_config=train.ScalingConfig(num_workers=2),
run_config=train.RunConfig(
name="test_resume_from_checkpoint",
storage_path=storage_path,
storage_filesystem=storage_filesystem,
),
resume_from_checkpoint=checkpoint,
)
result = trainer.fit()
# Make sure that the checkpoint indexing starts from scratch.
assert Path(
result.checkpoint.path
).name == StorageContext._make_checkpoint_dir_name(0)
# Clean up this run's experiment directory immediately after.
_delete_fs_path(result.filesystem, Path(result.path).parent.as_posix())
def _assert_storage_contents(
local_inspect_dir: Path,
exp_name: str,
checkpoint_config: train.CheckpointConfig,
trainable_name: str,
test_trainer: bool,
no_checkpoint_ranks: List[int] = None,
constants: type = TestConstants,
):
no_checkpoint_ranks = no_checkpoint_ranks or []
# Second, inspect the contents of the storage path
storage_path_ls = list(local_inspect_dir.glob("*"))
assert len(storage_path_ls) == 1 # Only expect 1 experiment dir
exp_dir = storage_path_ls[0]
assert exp_dir.name == exp_name
# Files synced by the driver
assert len(list(exp_dir.glob("tuner.pkl"))) == 1
if test_trainer:
assert len(list(exp_dir.glob("trainer.pkl"))) == 1
# 2 copies of these files:
# 1 for the initial run, and 1 for the manually restored run.
assert len(list(exp_dir.glob("basic-variant-state-*"))) == 2
assert len(list(exp_dir.glob("experiment_state-*"))) == 2
# Files synced by the worker
assert (
len(list(exp_dir.glob(f"{trainable_name}*"))) == 1
if test_trainer
else constants.NUM_TRIALS
)
for trial_dir in exp_dir.glob(f"{trainable_name}*"):
# If set, expect num_to_keep. Otherwise, expect to see all of them.
expected_num_checkpoints = (
checkpoint_config.num_to_keep or constants.NUM_ITERATIONS
)
assert len(list(trial_dir.glob("checkpoint_*"))) == expected_num_checkpoints
checkpoint_idxs = sorted(
[
_get_checkpoint_index(checkpoint_dir.name)
for checkpoint_dir in trial_dir.glob("checkpoint_*")
]
)
# Ex: If num_to_keep=2 out of 6 total checkpoints,
# expect checkpoint_004 and checkpoint_005.
assert checkpoint_idxs == list(
range(
constants.NUM_ITERATIONS - expected_num_checkpoints,
constants.NUM_ITERATIONS,
)
)
for checkpoint_dir in trial_dir.glob("checkpoint_*"):
# 1 shared checkpoint.pkl file, written by the trainable / all workers.
assert (
len(list(checkpoint_dir.glob("checkpoint.pkl"))) == 1
# NOTE: Dict checkpoint is only for the ClassTrainable.
or len(list(checkpoint_dir.glob(_DICT_CHECKPOINT_FILE_NAME))) == 1
)
if test_trainer:
# 1 checkpoint shard per worker.
# Unless the worker did not report a checkpoint (no_checkpoint_ranks).
assert {
_get_checkpoint_shard_rank(checkpoint_shard.name)
for checkpoint_shard in checkpoint_dir.glob(
"checkpoint_shard-*.pkl"
)
} == {
i
for i in range(constants.NUM_WORKERS)
if i not in no_checkpoint_ranks
}
if test_trainer:
expected_num_artifacts = constants.NUM_ITERATIONS * constants.NUM_WORKERS
else:
expected_num_artifacts = constants.NUM_ITERATIONS
assert len(list(trial_dir.glob("artifact-*"))) == expected_num_artifacts
# NOTE: This result file is synced by the driver.
assert len(list(trial_dir.glob(EXPR_RESULT_FILE))) == 1
@pytest.mark.parametrize("trainable", [train_fn, ClassTrainable])
@pytest.mark.parametrize("storage_path_type", ["nfs", "cloud", "custom_fs"])
@pytest.mark.parametrize(
"checkpoint_config",
[train.CheckpointConfig(), train.CheckpointConfig(num_to_keep=2)],
)
def test_tuner(
tmp_path,
trainable,
storage_path_type,
checkpoint_config: train.CheckpointConfig,
):
"""End-to-end test that the new persistence mode works with the Tuner API.
This test covers many `storage_path_type` options:
- storage_path=None --> save locally to the default local path (e.g., ~/ray_results)
- storage_path="nfs" --> save locally to a fake NFS path
- storage_path="cloud" --> save to a mock S3 bucket
- storage_path="custom_fs" --> save to a custom pyarrow filesystem
- The custom fs is a local filesystem that appends a path prefix to every path.
This is the expected output at the storage path:
{storage_path}/{exp_name}
βββ tuner.pkl <- Driver artifacts (global experiment state)
βββ basic-variant-state.json
βββ experiment_state.json
βββ train_fn_a2b9e_00000_0_...
β βββ artifact-iter=0.txt <- Trial artifacts
β βββ ...
β βββ checkpoint_000000 <- Trial checkpoints
β β βββ checkpoint.pkl
β βββ ...
β βββ events.out.tfevents... <- Driver artifacts (trial results)
β βββ params.json
β βββ params.pkl
β βββ progress.csv
β βββ result.json
βββ train_fn_a2b9e_00001_1_...
βββ ... <- Same as above
"""
exp_name = f"tuner_persistence_test-{uuid.uuid4().hex}"
with _resolve_storage_type(storage_path_type, tmp_path) as (
storage_path,
storage_filesystem,
):
run_config = train.RunConfig(
storage_path=storage_path,
storage_filesystem=storage_filesystem,
name=exp_name,
verbose=0,
failure_config=train.FailureConfig(max_failures=1),
checkpoint_config=checkpoint_config,
sync_config=train.SyncConfig(sync_artifacts=True),
)
tuner = tune.Tuner(
trainable,
param_space={
"num_iterations": TestConstants.NUM_ITERATIONS,
"fail_iters": [2, 4],
# NOTE: This param is only used in the ClassTrainable.
"save_checkpoint_as_dict": tune.grid_search([True, False]),
"tmp_path": tmp_path,
},
run_config=run_config,
# 2 samples (from the grid search). Run 1 at at time to test actor reuse
tune_config=tune.TuneConfig(num_samples=1, max_concurrent_trials=1),
)
result_grid = tuner.fit()
assert result_grid.errors
restored_tuner = tune.Tuner.restore(
path=str(URI(run_config.storage_path) / exp_name),
trainable=trainable,
storage_filesystem=storage_filesystem,
resume_errored=True,
)
result_grid = restored_tuner.fit()
assert not result_grid.errors
local_inspect_dir, storage_fs_path = _get_local_inspect_dir(
root_local_path=tmp_path,
storage_path=run_config.storage_path,
storage_filesystem=storage_filesystem,
)
# First, check that the ResultGrid returns the correct paths.
print(result_grid)
experiment_fs_path = result_grid.experiment_path
assert isinstance(result_grid.filesystem, pyarrow.fs.FileSystem), result_grid
assert experiment_fs_path == os.path.join(storage_fs_path, exp_name)
assert len(result_grid) == TestConstants.NUM_TRIALS
for result in result_grid:
trial_fs_path = result.path
assert isinstance(result.filesystem, pyarrow.fs.FileSystem), result
assert trial_fs_path.startswith(experiment_fs_path)
for checkpoint, _ in result.best_checkpoints:
assert checkpoint.path.startswith(trial_fs_path)
# Next, inspect the storage path contents.
_assert_storage_contents(
local_inspect_dir,
exp_name,
checkpoint_config,
trainable_name=trainable.__name__,
test_trainer=False,
)
@pytest.mark.parametrize("storage_path_type", ["nfs", "cloud", "custom_fs"])
@pytest.mark.parametrize(
"checkpoint_config",
[
train.CheckpointConfig(),
train.CheckpointConfig(
num_to_keep=1,
checkpoint_score_attribute=TestConstants.SCORE_KEY,
checkpoint_score_order="max",
),
],
)
def test_trainer(
tmp_path, storage_path_type, checkpoint_config: train.CheckpointConfig
):
"""Same end-to-end test as `test_tuner`, but also includes a
`DataParallelTrainer(resume_from_checkpoint)` test at the end.
{storage_path}/{exp_name}
βββ experiment_state-2023-07-28_10-00-38.json <- Initial exp state
βββ basic-variant-state-2023-07-28_10-00-38.json
βββ experiment_state-2023-07-28_10-01-38.json <- Restored exp state
βββ basic-variant-state-2023-07-28_10-01-38.json
βββ trainer.pkl
βββ tuner.pkl
βββ DataParallelTrainer_46367_00000_0_...
βββ events.out.tfevents...
βββ params.json
βββ params.pkl
βββ progress.csv
βββ result.json
βββ checkpoint_000000
β βββ checkpoint.pkl <- Shared checkpoint file
β βββ checkpoint_shard-rank=0.pkl <- Worker checkpoint shards
β βββ checkpoint_shard-rank=1.pkl
βββ ...
βββ artifact-rank=0-iter=0.txt <- Worker artifacts
βββ artifact-rank=1-iter=0.txt
βββ ...
βββ artifact-rank=0-iter=1.txt
βββ artifact-rank=1-iter=1.txt
βββ ...
"""
exp_name = f"trainer_persistence_test-{uuid.uuid4().hex}"
no_checkpoint_ranks = [0]
with _resolve_storage_type(storage_path_type, tmp_path) as (
storage_path,
storage_filesystem,
):
run_config = train.RunConfig(
storage_path=storage_path,
storage_filesystem=storage_filesystem,
name=exp_name,
verbose=0,
checkpoint_config=checkpoint_config,
failure_config=train.FailureConfig(max_failures=1),
sync_config=train.SyncConfig(sync_artifacts=True),
)
trainer = DataParallelTrainer(
train_fn,
train_loop_config={
"in_trainer": True,
"num_iterations": TestConstants.NUM_ITERATIONS,
"fail_iters": [2, 4],
# Test that global rank 0 is not required to checkpoint.
"no_checkpoint_ranks": no_checkpoint_ranks,
},
scaling_config=train.ScalingConfig(num_workers=TestConstants.NUM_WORKERS),
run_config=run_config,
)
print("\nStarting initial run.\n")
with pytest.raises(TrainingFailedError):
result = trainer.fit()
print("\nStarting manually restored run.\n")
restored_trainer = DataParallelTrainer.restore(
path=str(URI(run_config.storage_path) / exp_name),
storage_filesystem=storage_filesystem,
)
result = restored_trainer.fit()
_resume_from_checkpoint(
result.checkpoint,
expected_state={"iter": TestConstants.NUM_ITERATIONS - 1},
)
local_inspect_dir, storage_fs_path = _get_local_inspect_dir(
root_local_path=tmp_path,
storage_path=run_config.storage_path,
storage_filesystem=storage_filesystem,
)
# First, inspect that the result object returns the correct paths.
print(result)
trial_fs_path = result.path
assert trial_fs_path.startswith(storage_fs_path)
for checkpoint, _ in result.best_checkpoints:
assert checkpoint.path.startswith(trial_fs_path)
_assert_storage_contents(
local_inspect_dir,
exp_name,
checkpoint_config,
trainable_name="DataParallelTrainer",
test_trainer=True,
no_checkpoint_ranks=no_checkpoint_ranks,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| ClassTrainable |
python | readthedocs__readthedocs.org | readthedocs/api/v3/permissions.py | {
"start": 1788,
"end": 2022
} | class ____(BasePermission):
def has_permission(self, request, view):
organization = view._get_parent_organization()
if view.is_admin_member(request.user, organization):
return True
| IsOrganizationAdminMember |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 6281,
"end": 7749
} | class ____(db.Model):
__tablename__ = "organization_oidc_issuers"
__table_args__ = (
Index("organization_oidc_issuers_issuer_url_idx", "issuer_url"),
Index("organization_oidc_issuers_organization_id_idx", "organization_id"),
UniqueConstraint(
"organization_id",
"issuer_type",
"issuer_url",
name="_organization_oidc_issuers_org_type_url_uc",
),
)
__repr__ = make_repr("organization_id", "issuer_type", "issuer_url")
organization_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("organizations.id", onupdate="CASCADE", ondelete="CASCADE"),
)
issuer_type: Mapped[OIDCIssuerType] = mapped_column(
Enum(OIDCIssuerType, values_callable=lambda x: [e.value for e in x]),
comment="Type of OIDC issuer",
)
issuer_url: Mapped[str] = mapped_column(
comment="Custom OIDC issuer URL (e.g., https://gitlab.company.com)",
)
created: Mapped[datetime_now] = mapped_column(
comment="Datetime when the issuer was added",
)
created_by_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("users.id"),
comment="Admin user who created the issuer mapping",
)
organization: Mapped[Organization] = relationship(
back_populates="oidc_issuers", lazy=False
)
created_by: Mapped[User] = relationship(lazy=False)
| OrganizationOIDCIssuer |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 3792,
"end": 4028
} | class ____(Mixed):
def mixed(self, first, *args, third, **kwargs):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().mixed(first, *args, third, **kwargs)
| MixedChild2 |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/components/power_bi_workspace/component.py | {
"start": 11971,
"end": 12541
} | class ____(
create_component_translator_cls(PowerBIWorkspaceComponent, DagsterPowerBITranslator),
ComponentTranslator[PowerBIWorkspaceComponent],
):
def __init__(self, component: PowerBIWorkspaceComponent):
self._component = component
def get_asset_spec(self, data: PowerBITranslatorData) -> AssetSpec:
base_asset_spec = super().get_asset_spec(data)
if self.component.translation is None:
return base_asset_spec
else:
return self.component.translation(base_asset_spec, data)
| PowerBIComponentTranslator |
python | tiangolo__fastapi | scripts/sponsors.py | {
"start": 1309,
"end": 1396
} | class ____(BaseModel):
sponsorshipsAsMaintainer: SponsorshipAsMaintainer
| SponsorsUser |
python | modin-project__modin | modin/config/envvars.py | {
"start": 22398,
"end": 23033
} | class ____(EnvironmentVariable, type=bool):
"""
Whether to show progress when switching between backends.
When enabled, progress messages are displayed during backend switches to inform users
about data transfer operations. When disabled, backend switches occur silently.
"""
varname = "MODIN_BACKEND_SWITCH_PROGRESS"
default = True
@classmethod
def enable(cls) -> None:
"""Enable backend switch progress display."""
cls.put(True)
@classmethod
def disable(cls) -> None:
"""Disable backend switch progress display."""
cls.put(False)
| ShowBackendSwitchProgress |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 5695,
"end": 5966
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale = torch.randn(1, 10)
@property
def scale_alias(self):
return self.scale
def forward(self, x):
return x * self.scale_alias
| ModuleProperty |
python | doocs__leetcode | solution/2500-2599/2556.Disconnect Path in a Binary Matrix by at Most One Flip/Solution.py | {
"start": 0,
"end": 484
} | class ____:
def isPossibleToCutPath(self, grid: List[List[int]]) -> bool:
def dfs(i, j):
if i >= m or j >= n or grid[i][j] == 0:
return False
grid[i][j] = 0
if i == m - 1 and j == n - 1:
return True
return dfs(i + 1, j) or dfs(i, j + 1)
m, n = len(grid), len(grid[0])
a = dfs(0, 0)
grid[0][0] = grid[-1][-1] = 1
b = dfs(0, 0)
return not (a and b)
| Solution |
python | GoogleCloudPlatform__python-docs-samples | compute/client_library/sgs.py | {
"start": 2017,
"end": 11157
} | class ____:
"""
This class represents a piece of code that can be used as part of a code snippet.
Each ingredient has a name. It is made of a list of imports that it'll require and
text that will be pasted into the snippet.
"""
simple_imports: list[ImportItem] = field(default_factory=list)
imports_from: list[tuple[str, ImportItem]] = field(default_factory=list)
text: str = ""
name: str = ""
def __repr__(self):
return f"<Ingredient: {self.name}>"
IGNORED_OUTPUT_FILES = (
re.compile(r".*noxfile\.py$"),
re.compile(r".*noxfile_config\.py$"),
re.compile(r".*README\.md$"),
re.compile(r".*requirements\.txt$"),
re.compile(r".*requirements-test\.txt$"),
re.compile(r".*?/tests/.*"),
re.compile(r".*?/__pycache__/.*"),
re.compile(r".*?sponge_log.xml.*"),
)
def parse_imports(script: str) -> tuple[list[ImportItem], list[tuple[str, ImportItem]]]:
"""
Reads a Python script file and analyzes it to extract information
about the various things it imports. Returns a pair of lists containing
information about the "simple imports" (`import abc as xyz`) and "imports from"
(`from collections import deque as ...`).
"""
parsed_script = ast.parse(script)
simple_imports = []
imports_from = []
for node in parsed_script.body:
if isinstance(node, ast.Import):
for alias in node.names:
simple_imports.append(ImportItem(name=alias.name, asname=alias.asname))
elif isinstance(node, ast.ImportFrom):
for alias in node.names:
imports_from.append(
(node.module, ImportItem(name=alias.name, asname=alias.asname))
)
return simple_imports, imports_from
def load_ingredient(path: Path) -> Ingredient:
ingredient_lines = []
in_ingredient = False
ingredient_name = ""
with path.open() as file:
file_content = file.read()
# Read imports
simple_imports, imports_from = parse_imports(file_content)
# Read the script
for line in file_content.splitlines(keepends=True):
if in_ingredient and INGREDIENTS_END.match(line):
break
elif in_ingredient:
ingredient_lines.append(line)
elif INGREDIENTS_START.match(line):
ingredient_name = INGREDIENTS_START.match(line).group(1)
in_ingredient = True
else:
if in_ingredient:
warnings.warn(
f"The ingredient in {path} has no closing tag.", SyntaxWarning
)
return Ingredient(
name=ingredient_name,
text="".join(ingredient_lines),
simple_imports=simple_imports,
imports_from=imports_from,
)
def load_ingredients(path: Path) -> dict:
ingredients = {}
for ipath in path.iterdir():
if ipath.is_dir():
ingredients.update(load_ingredients(ipath))
elif ipath.is_file():
if "__pycache__" in str(ipath.absolute()):
continue
ingredient = load_ingredient(ipath)
ingredients[ingredient.name] = ingredient
return ingredients
def load_recipe(path: Path) -> str:
with path.open() as file:
return file.read()
def load_recipes(path: Path) -> dict:
recipes = {}
for ipath in path.iterdir():
if ipath.is_dir():
recipes.update(load_recipes(ipath))
elif ipath.is_file():
recipes[ipath.absolute()] = load_recipe(ipath)
return recipes
def render_recipe(recipe: str, ingredients: dict) -> str:
"""
Replace all `# IMPORTS` and `# INGREDIENT <name>` occurrences in
the provided recipe, producing a script ready to be saved to a file.
"""
ingredients_used = []
file_lines = recipe.splitlines()
# Scan the file to used ingredients
for line in file_lines:
match = INGREDIENT_FILL.match(line)
if match:
ingredients_used.append(ingredients[match.group(1)])
simple_imports_used = set()
for ingredient in ingredients_used:
for simple_import in ingredient.simple_imports:
simple_imports_used.add(simple_import)
from_imports_used = defaultdict(set)
for ingredient in ingredients_used:
for import_from in ingredient.imports_from:
from_imports_used[import_from[0]].add(import_from[1])
import_lines = set()
for simple_import in simple_imports_used:
if simple_import.asname:
import_lines.add(f"import {simple_import.name} as {simple_import.asname}")
else:
import_lines.add(f"import {simple_import.name}")
for module, from_imports in from_imports_used.items():
names = set()
for from_import in from_imports:
if from_import.asname:
name = f"{from_import.name} as {from_import.asname}"
else:
name = from_import.name
names.add(name)
names = ", ".join(names)
import_lines.add(f"from {module} import {names}")
import_lines = isort.code(
"\n".join(import_lines), config=isort.Config(profile="google")
)
output_file = []
header_added = False
for line in file_lines:
if IMPORTS_FILL.search(line):
output_file.append(import_lines)
elif INGREDIENT_FILL.search(line):
match = INGREDIENT_FILL.search(line)
output_file.append(ingredients[match.group(1)].text)
elif REGION_START.search(line):
# The string has to be broken up, so that the snippet
# machine doesn't recognize it as a valid start of a region
output_file.append(REGION_START.sub("# [" + "START \\1]", line))
elif REGION_END.search(line):
# The string has to be broken up, so that the snippet
# machine doesn't recognize it as a valid start of a region
output_file.append(REGION_END.sub("# [" + "END \\1]", line))
else:
output_file.append(line)
continue
if not header_added:
end = output_file[-1]
output_file[-1] = ""
output_file.append(HEADER)
output_file.append("")
output_file.append(end)
header_added = True
if output_file and not output_file[-1].endswith("\n"):
output_file.append("")
return os.linesep.join(output_file)
def save_rendered_recipe(
recipe_path: Path,
rendered_recipe: str,
output_dir: Path = DEFAULT_OUTPUT_PATH,
recipes_path: Path = RECIPES_PATH,
) -> Path:
output_dir.mkdir(parents=True, exist_ok=True)
output_path = output_dir / recipe_path.relative_to(recipes_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open(mode="w") as out_file:
out_file.write(rendered_recipe)
subprocess.run(
["black", str(output_path)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return output_path
def generate(
args: argparse.Namespace,
ingredients_path: Path = INGREDIENTS_PATH,
recipes_path: Path = RECIPES_PATH,
):
ingredients = load_ingredients(ingredients_path)
recipes = load_recipes(recipes_path)
updated_paths = set()
for path, recipe in recipes.items():
rendered = render_recipe(recipe, ingredients)
out = save_rendered_recipe(
path.absolute(),
rendered,
recipes_path=recipes_path.absolute(),
output_dir=Path(args.output_dir),
)
updated_paths.add(str(out))
print("Generated files:")
for file in sorted(updated_paths):
print(f" - {repr(file)}")
all_files = glob.glob(f"{args.output_dir}/**", recursive=True)
unknown_files = set()
for file in all_files:
if file in updated_paths:
continue
if any(pattern.match(file) for pattern in IGNORED_OUTPUT_FILES):
continue
pfile = Path(file)
if pfile.is_dir() and pfile.iterdir():
# Don't report non-empty dirs.
continue
unknown_files.add(file)
if unknown_files:
print("Found following unknown files: ")
for file in sorted(unknown_files):
print(f" - {repr(file)}")
def verify(args: argparse.Namespace):
# TODO: Needs to check if the files are up to date. Will be used to auto-check every commit.
pass
def parse_arguments():
parser = argparse.ArgumentParser(
description="Generates full code snippets from their recipes."
)
subparsers = parser.add_subparsers()
gen_parser = subparsers.add_parser("generate", help="Generates the code samples.")
gen_parser.set_defaults(func=generate)
gen_parser.add_argument("--output_dir", default=DEFAULT_OUTPUT_PATH)
verify_parser = subparsers.add_parser(
"verify", help="Verify if the generated samples match the sources."
)
verify_parser.set_defaults(func=verify)
return parser.parse_args()
def main():
args = parse_arguments()
args.func(args)
if __name__ == "__main__":
main()
| Ingredient |
python | sphinx-doc__sphinx | sphinx/util/docutils.py | {
"start": 12891,
"end": 13729
} | class ____(Reporter):
@classmethod
def from_reporter(
cls: type[LoggingReporter], reporter: Reporter
) -> LoggingReporter:
"""Create an instance of LoggingReporter from other reporter object."""
return cls(
reporter.source,
reporter.report_level,
reporter.halt_level,
reporter.debug_flag,
reporter.error_handler,
)
def __init__(
self,
source: str,
report_level: int = Reporter.WARNING_LEVEL,
halt_level: int = Reporter.SEVERE_LEVEL,
debug: bool = False,
error_handler: str = 'backslashreplace',
) -> None:
stream = WarningStream()
super().__init__(
source, report_level, halt_level, stream, debug, error_handler=error_handler
)
| LoggingReporter |
python | getsentry__sentry | tests/sentry/rules/filters/test_issue_category.py | {
"start": 290,
"end": 1715
} | class ____(RuleTestCase):
rule_cls = IssueCategoryFilter
def test_valid_input_values(self) -> None:
event = self.get_event()
self.assertPasses(self.get_rule(data={"value": 1}), event)
self.assertPasses(self.get_rule(data={"value": str(GroupCategory.ERROR.value)}), event)
self.assertPasses(self.get_rule(data={"value": GroupCategory.ERROR.value}), event)
def test_no_group_does_not_pass(self) -> None:
event = self.get_event()
event.group_id = None
event.groups = None
self.assertDoesNotPass(self.get_rule(data={"value": GroupCategory.ERROR.value}), event)
def test_fail_on_invalid_data(self) -> None:
event = self.get_event()
data_cases = [
{"value": None},
{},
{"value": GroupCategory.ERROR.name},
{"value": "ERROR"},
{"value": "error"},
]
for data_case in data_cases:
rule = self.get_rule(data=data_case)
self.assertDoesNotPass(rule, event)
def test_group_event(self) -> None:
event = self.get_event()
assert event.group is not None
group_event = event.for_group(event.group)
self.assertPasses(self.get_rule(data={"value": GroupCategory.ERROR.value}), event)
self.assertPasses(self.get_rule(data={"value": GroupCategory.ERROR.value}), group_event)
| IssueCategoryFilterErrorTest |
python | yaml__pyyaml | examples/yaml-highlight/yaml_hl.py | {
"start": 1183,
"end": 4434
} | class ____:
def __init__(self, options):
config = yaml.full_load(file(options.config, 'rb').read())
self.style = config[options.style]
if options.input:
self.input = file(options.input, 'rb')
else:
self.input = sys.stdin
if options.output:
self.output = file(options.output, 'wb')
else:
self.output = sys.stdout
def highlight(self):
input = self.input.read()
if input.startswith(codecs.BOM_UTF16_LE):
input = unicode(input, 'utf-16-le')
elif input.startswith(codecs.BOM_UTF16_BE):
input = unicode(input, 'utf-16-be')
else:
input = unicode(input, 'utf-8')
substitutions = self.style.substitutions
tokens = yaml.scan(input)
events = yaml.parse(input)
markers = []
number = 0
for token in tokens:
number += 1
if token.start_mark.index != token.end_mark.index:
cls = token.__class__
if (cls, -1) in substitutions:
markers.append([token.start_mark.index, +2, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([token.end_mark.index, -2, number, substitutions[cls, +1]])
number = 0
for event in events:
number += 1
cls = event.__class__
if (cls, -1) in substitutions:
markers.append([event.start_mark.index, +1, number, substitutions[cls, -1]])
if (cls, +1) in substitutions:
markers.append([event.end_mark.index, -1, number, substitutions[cls, +1]])
markers.sort()
markers.reverse()
chunks = []
position = len(input)
for index, weight1, weight2, substitution in markers:
if index < position:
chunk = input[index:position]
for substring, replacement in self.style.replaces:
chunk = chunk.replace(substring, replacement)
chunks.append(chunk)
position = index
chunks.append(substitution)
chunks.reverse()
result = u''.join(chunks)
if self.style.header:
self.output.write(self.style.header)
self.output.write(result.encode('utf-8'))
if self.style.footer:
self.output.write(self.style.footer)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-s', '--style', dest='style', default='ascii',
help="specify the highlighting style", metavar='STYLE')
parser.add_option('-c', '--config', dest='config',
default=os.path.join(os.path.dirname(sys.argv[0]), 'yaml_hl.cfg'),
help="set an alternative configuration file", metavar='CONFIG')
parser.add_option('-i', '--input', dest='input', default=None,
help="set the input file (default: stdin)", metavar='FILE')
parser.add_option('-o', '--output', dest='output', default=None,
help="set the output file (default: stdout)", metavar='FILE')
(options, args) = parser.parse_args()
hl = YAMLHighlight(options)
hl.highlight()
| YAMLHighlight |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 44917,
"end": 46586
} | class ____(Request):
"""
Delete metadata from model
:param model: ID of the model
:type model: str
:param keys: The list of metadata keys to delete
:type keys: Sequence[str]
"""
_service = "models"
_action = "delete_metadata"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"keys": {
"description": "The list of metadata keys to delete",
"items": {"type": "string"},
"type": "array",
},
"model": {"description": "ID of the model", "type": "string"},
},
"required": ["model", "keys"],
"type": "object",
}
def __init__(self, model: str, keys: List[str], **kwargs: Any) -> None:
super(DeleteMetadataRequest, self).__init__(**kwargs)
self.model = model
self.keys = keys
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("keys")
def keys(self) -> List[str]:
return self._property_keys
@keys.setter
def keys(self, value: List[str]) -> None:
if value is None:
self._property_keys = None
return
self.assert_isinstance(value, "keys", (list, tuple))
self.assert_isinstance(value, "keys", six.string_types, is_array=True)
self._property_keys = value
| DeleteMetadataRequest |
python | django__django | tests/template_tests/syntax_tests/test_multiline.py | {
"start": 132,
"end": 360
} | class ____(SimpleTestCase):
@setup({"multiline01": multiline_string})
def test_multiline01(self):
output = self.engine.render_to_string("multiline01")
self.assertEqual(output, multiline_string)
| MultilineTests |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 74155,
"end": 74536
} | class ____(CanvasCase):
def test_is_None(self):
assert maybe_signature(None, app=self.app) is None
def test_is_dict(self):
assert isinstance(maybe_signature(dict(self.add.s()), app=self.app),
Signature)
def test_when_sig(self):
s = self.add.s()
assert maybe_signature(s, app=self.app) is s
| test_maybe_signature |
python | explosion__spaCy | spacy/lang/nl/__init__.py | {
"start": 410,
"end": 696
} | class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
syntax_iterators = SYNTAX_ITERATORS
stop_words = STOP_WORDS
| DutchDefaults |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.