language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instigation.py | {
"start": 29778,
"end": 29940
} | class ____(graphene.ObjectType):
results = non_null_list(GrapheneInstigationState)
class Meta:
name = "InstigationStates"
| GrapheneInstigationStates |
python | huggingface__transformers | tests/models/owlvit/test_modeling_owlvit.py | {
"start": 20030,
"end": 22418
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (OwlViTForObjectDetection,) if is_torch_available() else ()
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = OwlViTForObjectDetectionTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="OwlViTModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Test_forward_signature is tested in individual model tests")
def test_forward_signature(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
| OwlViTForObjectDetectionTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 37886,
"end": 38864
} | class ____(PrefectFilterBaseModel):
"""Filter by `Deployment.name`."""
any_: Optional[list[str]] = Field(
default=None,
description="A list of deployment names to include",
examples=[["my-deployment-1", "my-deployment-2"]],
)
like_: Optional[str] = Field(
default=None,
description=(
"A case-insensitive partial match. For example, "
" passing 'marvin' will match "
"'marvin', 'sad-Marvin', and 'marvin-robot'."
),
examples=["marvin"],
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Deployment.name.in_(self.any_))
if self.like_ is not None:
filters.append(db.Deployment.name.ilike(f"%{self.like_}%"))
return filters
| DeploymentFilterName |
python | mlflow__mlflow | mlflow/dspy/wrapper.py | {
"start": 555,
"end": 5477
} | class ____(PythonModel):
"""MLflow PyFunc wrapper class for Dspy models.
This wrapper serves two purposes:
- It stores the Dspy model along with dspy global settings, which are required for seamless
saving and loading.
- It provides a `predict` method so that it can be loaded as an MLflow pyfunc, which is
used at serving time.
"""
def __init__(
self,
model: "dspy.Module",
dspy_settings: dict[str, Any],
model_config: dict[str, Any] | None = None,
):
self.model = model
self.dspy_settings = dspy_settings
self.model_config = model_config or {}
self.output_schema: Schema | None = None
def predict(self, inputs: Any, params: dict[str, Any] | None = None):
import dspy
converted_inputs = self._get_model_input(inputs)
with dspy.context(**self.dspy_settings):
if isinstance(converted_inputs, dict):
# We pass a dict as keyword args and don't allow DSPy models
# to receive a single dict.
result = self.model(**converted_inputs)
else:
result = self.model(converted_inputs)
if isinstance(result, dspy.Prediction):
return result.toDict()
else:
return result
def predict_stream(self, inputs: Any, params=None):
import dspy
converted_inputs = self._get_model_input(inputs)
self._validate_streaming()
stream_listeners = [
dspy.streaming.StreamListener(signature_field_name=spec.name)
for spec in self.output_schema
]
stream_model = dspy.streamify(
self.model,
stream_listeners=stream_listeners,
async_streaming=False,
include_final_prediction_in_output_stream=False,
)
if isinstance(converted_inputs, dict):
outputs = stream_model(**converted_inputs)
else:
outputs = stream_model(converted_inputs)
with dspy.context(**self.dspy_settings):
for output in outputs:
if is_dataclass(output):
yield asdict(output)
elif isinstance(output, dspy.Prediction):
yield output.toDict()
else:
yield output
def _get_model_input(self, inputs: Any) -> str | dict[str, Any]:
"""Convert the PythonModel input into the DSPy program input
Examples of expected conversions:
- str -> str
- dict -> dict
- np.ndarray with one element -> single element
- pd.DataFrame with one row and string column -> single row dict
- pd.DataFrame with one row and non-string column -> single element
- list -> raises an exception
- np.ndarray with more than one element -> raises an exception
- pd.DataFrame with more than one row -> raises an exception
"""
import numpy as np
import pandas as pd
supported_input_types = (np.ndarray, pd.DataFrame, str, dict)
if not isinstance(inputs, supported_input_types):
raise MlflowException(
f"`inputs` must be one of: {[x.__name__ for x in supported_input_types]}, but "
f"received type: {type(inputs)}.",
INVALID_PARAMETER_VALUE,
)
if isinstance(inputs, pd.DataFrame):
if len(inputs) != 1:
raise MlflowException(
_INVALID_SIZE_MESSAGE,
INVALID_PARAMETER_VALUE,
)
if all(isinstance(col, str) for col in inputs.columns):
inputs = inputs.to_dict(orient="records")[0]
else:
inputs = inputs.values[0]
if isinstance(inputs, np.ndarray):
if len(inputs) != 1:
raise MlflowException(
_INVALID_SIZE_MESSAGE,
INVALID_PARAMETER_VALUE,
)
inputs = inputs[0]
return inputs
def _validate_streaming(
self,
):
if Version(importlib.metadata.version("dspy")) <= Version("2.6.23"):
raise MlflowException(
"Streaming API is only supported in dspy 2.6.24 or later. "
"Please upgrade your dspy version."
)
if self.output_schema is None:
raise MlflowException(
"Output schema of the DSPy model is not set. Please log your DSPy "
"model with `signature` or `input_example` to use streaming API."
)
if any(spec.type != DataType.string for spec in self.output_schema):
raise MlflowException(
f"All output fields must be string to use streaming API. Got {self.output_schema}."
)
| DspyModelWrapper |
python | bokeh__bokeh | src/bokeh/models/tiles.py | {
"start": 5747,
"end": 6864
} | class ____(MercatorTileSource):
''' Has the same default tile origin as the ``WMTSTileSource`` but requested
tiles use a ``{XMIN}``, ``{YMIN}``, ``{XMAX}``, ``{YMAX}`` e.g.
``http://your.custom.tile.service?bbox={XMIN},{YMIN},{XMAX},{YMAX}``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
use_latlon = Bool(default=False, help="""
Flag which indicates option to output ``{XMIN}``, ``{YMIN}``, ``{XMAX}``, ``{YMAX}`` in meters or latitude and longitude.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| BBoxTileSource |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/detector/test_stateful.py | {
"start": 3736,
"end": 5992
} | class ____(TestCase):
def setUp(self) -> None:
self.group_key: DetectorGroupKey = None
self.detector = self.create_detector(
name="Stateful Detector",
project=self.project,
)
self.handler = MockDetectorStateHandler(
detector=self.detector,
thresholds={
Level.HIGH: 2,
},
)
def test_increment_detector_thresholds(self) -> None:
state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
self.handler._increment_detector_thresholds(state, Level.HIGH, self.group_key)
self.handler.state_manager.commit_state_updates()
updated_state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
# Default to all states since the detector is not configured with any.
assert updated_state.counter_updates == {
**{level: 1 for level in self.handler._thresholds},
Level.OK: None,
}
def test_increment_detector_thresholds__medium(self) -> None:
state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
self.handler._increment_detector_thresholds(state, Level.MEDIUM, self.group_key)
self.handler.state_manager.commit_state_updates()
updated_state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
# All states, lower than high, should be incremented by 1, except for OK
assert updated_state.counter_updates == {
Level.HIGH: None,
Level.OK: None,
}
def test_increment_detector_thresholds_low(self) -> None:
state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
self.handler._increment_detector_thresholds(state, Level.LOW, self.group_key)
self.handler.state_manager.commit_state_updates()
updated_state = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
# The detector doesn't increment LOW because it's not configured
assert updated_state.counter_updates == {
Level.OK: None,
Level.HIGH: None,
}
| TestStatefulDetectorIncrementThresholds |
python | numba__llvmlite | llvmlite/ir/types.py | {
"start": 17716,
"end": 18490
} | class ____(BaseStructType):
"""
The type of "literal" structs, i.e. structs with a literally-defined
type (by contrast with IdentifiedStructType).
"""
null = 'zeroinitializer'
def __init__(self, elems, packed=False):
"""
*elems* is a sequence of types to be used as members.
*packed* controls the use of packed layout.
"""
self.elements = tuple(elems)
self.packed = packed
def _to_string(self):
return self.structure_repr()
def __eq__(self, other):
if isinstance(other, LiteralStructType):
return (self.elements == other.elements
and self.packed == other.packed)
def __hash__(self):
return hash(LiteralStructType)
| LiteralStructType |
python | vyperlang__vyper | vyper/venom/passes/branch_optimization.py | {
"start": 585,
"end": 2776
} | class ____(IRPass):
"""
This pass optimizes branches inverting jnz instructions where appropriate
"""
cfg: CFGAnalysis
liveness: LivenessAnalysis
dfg: DFGAnalysis
def _optimize_branches(self) -> None:
fn = self.function
for bb in fn.get_basic_blocks():
term_inst = bb.instructions[-1]
if term_inst.opcode != "jnz":
continue
fst, snd = self.cfg.cfg_out(bb)
fst_liveness = self.liveness.live_vars_at(fst.instructions[0])
snd_liveness = self.liveness.live_vars_at(snd.instructions[0])
# heuristic(!) to decide if we should flip the labels or not
cost_a, cost_b = len(fst_liveness), len(snd_liveness)
cond = term_inst.operands[0]
prev_inst = self.dfg.get_producing_instruction(cond)
assert prev_inst is not None
# heuristic: remove the iszero and swap branches
if cost_a >= cost_b and prev_inst.opcode == "iszero":
new_cond = prev_inst.operands[0]
new_operands = [new_cond, term_inst.operands[2], term_inst.operands[1]]
self.updater.update(term_inst, term_inst.opcode, new_operands)
# heuristic: add an iszero and swap branches
elif cost_a > cost_b or (cost_a >= cost_b and prefer_iszero(prev_inst)):
tmp = self.updater.add_before(term_inst, "iszero", [term_inst.operands[0]])
assert tmp is not None # help mypy
new_cond = tmp
new_operands = [new_cond, term_inst.operands[2], term_inst.operands[1]]
self.updater.update(term_inst, term_inst.opcode, new_operands)
def run_pass(self):
self.liveness = self.analyses_cache.request_analysis(LivenessAnalysis)
self.cfg = self.analyses_cache.request_analysis(CFGAnalysis)
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.updater = InstUpdater(self.dfg)
self._optimize_branches()
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
self.analyses_cache.invalidate_analysis(CFGAnalysis)
| BranchOptimizationPass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1013411,
"end": 1013911
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UnminimizeComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "unminimized_comment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
unminimized_comment = sgqlc.types.Field(Minimizable, graphql_name="unminimizedComment")
"""The comment that was unminimized."""
| UnminimizeCommentPayload |
python | pandas-dev__pandas | pandas/tests/dtypes/test_inference.py | {
"start": 61196,
"end": 66809
} | class ____:
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number("x")
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64("2011-01-01"))
assert not is_number(Timestamp("2011-01-01"))
assert not is_number(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta("1 days"))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, "D"))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(False)
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool("x")
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, "D"))
assert not is_bool(Timedelta("1 days"))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(False)
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer("x")
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta("1 days"))
assert not is_integer(np.timedelta64(1, "D"))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(False)
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float("x")
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64("2011-01-01"))
assert not is_float(Timestamp("2011-01-01"))
assert not is_float(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, "D"))
assert not is_float(Timedelta("1 days"))
def test_is_datetime_dtypes(self):
ts = pd.date_range("20130101", periods=3, unit="ns")
tsa = pd.date_range("20130101", periods=3, tz="US/Eastern", unit="ns")
msg = "is_datetime64tz_dtype is deprecated"
assert is_datetime64_dtype("datetime64")
assert is_datetime64_dtype("datetime64[ns]")
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype("datetime64")
assert is_datetime64_ns_dtype("datetime64[ns]")
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype("datetime64")
assert is_datetime64_any_dtype("datetime64[ns]")
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
assert not is_datetime64tz_dtype("datetime64")
assert not is_datetime64tz_dtype("datetime64[ns]")
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
@pytest.mark.parametrize("tz", ["US/Eastern", "UTC"])
def test_is_datetime_dtypes_with_tz(self, tz):
dtype = f"datetime64[ns, {tz}]"
assert not is_datetime64_dtype(dtype)
msg = "is_datetime64tz_dtype is deprecated"
with tm.assert_produces_warning(DeprecationWarning, match=msg):
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype("timedelta64")
assert is_timedelta64_dtype("timedelta64[ns]")
assert not is_timedelta64_ns_dtype("timedelta64")
assert is_timedelta64_ns_dtype("timedelta64[ns]")
tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]")
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]"))
assert not is_timedelta64_ns_dtype(Index([], dtype=np.float64))
assert not is_timedelta64_ns_dtype(Index([], dtype=np.int64))
| TestNumberScalar |
python | huggingface__transformers | src/transformers/models/xlm/modeling_xlm.py | {
"start": 52966,
"end": 57628
} | class ____(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
| XLMForQuestionAnsweringSimple |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 58737,
"end": 60354
} | class ____(BaseModel):
class Config:
extra = Extra.allow
type: Literal["DeclarativeStream"]
retriever: Union[CustomRetriever, SimpleRetriever] = Field(
...,
description="Component used to coordinate how records are extracted across stream slices and request pages.",
title="Retriever",
)
incremental_sync: Optional[Union[CustomIncrementalSync, DatetimeBasedCursor]] = Field(
None,
description="Component used to fetch data incrementally based on a time field in the data.",
title="Incremental Sync",
)
name: Optional[str] = Field("", description="The stream name.", example=["Users"], title="Name")
primary_key: Optional[PrimaryKey] = Field("", description="The primary key of the stream.", title="Primary Key")
schema_loader: Optional[Union[InlineSchemaLoader, JsonFileSchemaLoader, CustomSchemaLoader]] = Field(
None,
description="Component used to retrieve the schema for the current stream.",
title="Schema Loader",
)
transformations: Optional[List[Union[AddFields, CustomTransformation, RemoveFields]]] = Field(
None,
description="A list of transformations to be applied to each output record.",
title="Transformations",
)
state_migrations: Optional[List[Union[LegacyToPerPartitionStateMigration, CustomStateMigration]]] = Field(
[],
description="Array of state migrations to be applied on the input state",
title="State Migrations",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| DeclarativeStream |
python | conda__conda | conda/cli/helpers.py | {
"start": 1762,
"end": 20566
} | class ____(_StoreAction):
"""
Used to validate match specs of packages
"""
@staticmethod
def _validate_no_denylist_channels(packages_specs):
"""
Ensure the packages do not contain denylist_channels
"""
from ..base.context import validate_channels
from ..models.match_spec import MatchSpec
if not isinstance(packages_specs, (list, tuple)):
packages_specs = [packages_specs]
validate_channels(
channel
for spec in map(MatchSpec, packages_specs)
if (channel := spec.get_exact_value("channel"))
)
def __call__(self, parser, namespace, values, option_string=None):
self._validate_no_denylist_channels(values)
super().__call__(parser, namespace, values, option_string)
def add_parser_create_install_update(p, prefix_required=False):
from ..common.constants import NULL
add_parser_prefix(p, prefix_required)
channel_options = add_parser_channels(p)
solver_mode_options = add_parser_solver_mode(p)
package_install_options = add_parser_package_install_options(p)
add_parser_networking(p)
output_and_prompt_options = add_output_and_prompt_options(p)
output_and_prompt_options.add_argument(
"--download-only",
action="store_true",
default=NULL,
help="Solve an environment and ensure package caches are populated, but exit "
"prior to unlinking and linking packages into the prefix.",
)
add_parser_show_channel_urls(output_and_prompt_options)
add_parser_pscheck(p)
add_parser_known(p)
# Add the file kwarg. We don't use {action="store", nargs='*'} as we don't
# want to gobble up all arguments after --file.
p.add_argument(
# "-f", # FUTURE: 26.3: Enable this after deprecating alias in --force
"--file",
default=[],
action="append",
help="Read package versions from the given file. Repeated file "
"specifications can be passed (e.g. --file=file1 --file=file2).",
)
p.add_argument(
"packages",
metavar="package_spec",
action=_ValidatePackages,
nargs="*",
help="List of packages to install or update in the conda environment.",
)
return solver_mode_options, package_install_options, channel_options
def add_parser_pscheck(p: ArgumentParser) -> None:
p.add_argument("--force-pscheck", action="store_true", help=SUPPRESS)
def add_parser_show_channel_urls(p: ArgumentParser | _ArgumentGroup) -> None:
from ..common.constants import NULL
p.add_argument(
"--show-channel-urls",
action="store_true",
dest="show_channel_urls",
default=NULL,
help="Show channel urls. "
"Overrides the value given by `conda config --show show_channel_urls`.",
)
p.add_argument(
"--no-show-channel-urls",
action="store_false",
dest="show_channel_urls",
help=SUPPRESS,
)
def add_parser_help(p: ArgumentParser) -> None:
"""
So we can use consistent capitalization and periods in the help. You must
use the add_help=False argument to ArgumentParser or add_parser to use
this. Add this first to be consistent with the default argparse output.
"""
p.add_argument(
"-h",
"--help",
action=_HelpAction,
help="Show this help message and exit.",
)
def add_parser_prefix(
p: ArgumentParser,
prefix_required: bool = False,
) -> _MutuallyExclusiveGroup:
target_environment_group = p.add_argument_group("Target Environment Specification")
npgroup = target_environment_group.add_mutually_exclusive_group(
required=prefix_required
)
add_parser_prefix_to_group(npgroup)
return npgroup
def add_parser_prefix_to_group(m: _MutuallyExclusiveGroup) -> None:
m.add_argument(
"-n",
"--name",
action="store",
help="Name of environment.",
metavar="ENVIRONMENT",
)
m.add_argument(
"-p",
"--prefix",
action="store",
help="Full path to environment location (i.e. prefix).",
metavar="PATH",
)
def add_parser_json(p: ArgumentParser) -> _ArgumentGroup:
from ..common.constants import NULL
output_and_prompt_options = p.add_argument_group(
"Output, Prompt, and Flow Control Options"
)
output_and_prompt_options.add_argument(
"--json",
action="store_true",
default=NULL,
help="Report all output as json. Suitable for using conda programmatically.",
)
output_and_prompt_options.add_argument(
"--console",
default=NULL,
help="Select the backend to use for normal output rendering.",
)
add_parser_verbose(output_and_prompt_options)
output_and_prompt_options.add_argument(
"-q",
"--quiet",
action="store_true",
default=NULL,
help="Do not display progress bar.",
)
return output_and_prompt_options
def add_output_and_prompt_options(p: ArgumentParser) -> _ArgumentGroup:
from ..common.constants import NULL
output_and_prompt_options = add_parser_json(p)
output_and_prompt_options.add_argument(
"-d",
"--dry-run",
action="store_true",
help="Only display what would have been done.",
)
output_and_prompt_options.add_argument(
"-y",
"--yes",
action="store_true",
default=NULL,
help="Sets any confirmation values to 'yes' automatically. "
"Users will not be asked to confirm any adding, deleting, backups, etc.",
)
return output_and_prompt_options
def add_parser_frozen_env(p: ArgumentParser):
from ..common.constants import NULL
p.add_argument(
"--override-frozen",
action="store_false",
default=NULL,
help="DANGEROUS. Use at your own risk. Ignore protections if the environment is frozen.",
dest="protect_frozen_envs",
)
def add_parser_channels(p: ArgumentParser) -> _ArgumentGroup:
from ..common.constants import NULL
channel_customization_options = p.add_argument_group("Channel Customization")
channel_customization_options.add_argument(
"-c",
"--channel",
# beware conda-build uses this (currently or in the past?)
# if ever renaming to "channels" consider removing context.channels alias to channel
dest="channel",
action="append",
help=(
"Additional channel to search for packages. These are URLs searched in the order "
"they are given (including local directories using the 'file://' syntax or "
"simply a path like '/home/conda/mychan' or '../mychan'). Then, the defaults "
"or channels from .condarc are searched (unless --override-channels is given). "
"You can use 'defaults' to get the default packages for conda. You can also "
"use any name and the .condarc channel_alias value will be prepended. The "
"default channel_alias is https://conda.anaconda.org/."
),
)
channel_customization_options.add_argument(
"--use-local",
action="store_true",
default=NULL,
help="Use locally built packages. Identical to '-c local'.",
)
channel_customization_options.add_argument(
"--override-channels",
action="store_true",
help="""Do not search default or .condarc channels. Requires --channel.""",
)
channel_customization_options.add_argument(
"--repodata-fn",
action="append",
dest="repodata_fns",
help=(
"Specify file name of repodata on the remote server where your channels "
"are configured or within local backups. Conda will try whatever you "
"specify, but will ultimately fall back to repodata.json if your specs are "
"not satisfiable with what you specify here. This is used to employ repodata "
"that is smaller and reduced in time scope. You may pass this flag more than "
"once. Leftmost entries are tried first, and the fallback to repodata.json "
"is added for you automatically. For more information, see "
"conda config --describe repodata_fns."
),
)
channel_customization_options.add_argument(
"--experimental",
action="append",
choices=["jlap", "lock"],
help="jlap: Download incremental package index data from repodata.jlap; implies 'lock'. "
"lock: use locking when reading, updating index (repodata.json) cache. Now enabled.",
)
channel_customization_options.add_argument(
"--no-lock",
action="store_true",
help="Disable locking when reading, updating index (repodata.json) cache. ",
)
channel_customization_options.add_argument(
"--repodata-use-zst",
action=BooleanOptionalAction,
dest="repodata_use_zst",
default=NULL,
help="Check for/do not check for repodata.json.zst. Enabled by default.",
)
return channel_customization_options
def add_parser_solver_mode(p: ArgumentParser) -> _ArgumentGroup:
from ..base.constants import DepsModifier
from ..common.constants import NULL
solver_mode_options = p.add_argument_group("Solver Mode Modifiers")
deps_modifiers = solver_mode_options.add_mutually_exclusive_group()
solver_mode_options.add_argument(
"--strict-channel-priority",
action="store_const",
dest="channel_priority",
default=NULL,
const="strict",
help="Packages in lower priority channels are not considered if a package "
"with the same name appears in a higher priority channel.",
)
solver_mode_options.add_argument(
"--channel-priority",
action="store_true",
dest="channel_priority",
default=NULL,
help=SUPPRESS,
)
solver_mode_options.add_argument(
"--no-channel-priority",
action="store_const",
dest="channel_priority",
default=NULL,
const="disabled",
help="Package version takes precedence over channel priority. "
"Overrides the value given by `conda config --show channel_priority`.",
)
deps_modifiers.add_argument(
"--no-deps",
action="store_const",
const=DepsModifier.NO_DEPS,
dest="deps_modifier",
help="Do not install, update, remove, or change dependencies. This WILL lead "
"to broken environments and inconsistent behavior. Use at your own risk.",
default=NULL,
)
deps_modifiers.add_argument(
"--only-deps",
action="store_const",
const=DepsModifier.ONLY_DEPS,
dest="deps_modifier",
help="Only install dependencies.",
default=NULL,
)
solver_mode_options.add_argument(
"--no-pin",
action="store_true",
dest="ignore_pinned",
default=NULL,
help="Ignore pinned file.",
)
return solver_mode_options
def add_parser_update_modifiers(solver_mode_options: ArgumentParser):
from ..base.constants import UpdateModifier
from ..common.constants import NULL
update_modifiers = solver_mode_options.add_mutually_exclusive_group()
update_modifiers.add_argument(
"--freeze-installed",
"--no-update-deps",
action="store_const",
const=UpdateModifier.FREEZE_INSTALLED,
dest="update_modifier",
default=NULL,
help="Do not update or change already-installed dependencies.",
)
update_modifiers.add_argument(
"--update-deps",
action="store_const",
const=UpdateModifier.UPDATE_DEPS,
dest="update_modifier",
default=NULL,
help="Update dependencies that have available updates.",
)
update_modifiers.add_argument(
"-S",
"--satisfied-skip-solve",
action="store_const",
const=UpdateModifier.SPECS_SATISFIED_SKIP_SOLVE,
dest="update_modifier",
default=NULL,
help="Exit early and do not run the solver if the requested specs are satisfied. "
"Also skips aggressive updates as configured by the "
"'aggressive_update_packages' config setting. Use "
"'conda config --describe aggressive_update_packages' to view your setting. "
"--satisfied-skip-solve is similar to the default behavior of 'pip install'.",
)
update_modifiers.add_argument(
"--update-all",
"--all",
action="store_const",
const=UpdateModifier.UPDATE_ALL,
dest="update_modifier",
help="Update all installed packages in the environment.",
default=NULL,
)
update_modifiers.add_argument(
"--update-specs",
action="store_const",
const=UpdateModifier.UPDATE_SPECS,
dest="update_modifier",
help="Update based on provided specifications.",
default=NULL,
)
def add_parser_prune(p: ArgumentParser) -> None:
from ..common.constants import NULL
p.add_argument(
"--prune",
action="store_true",
default=NULL,
help=SUPPRESS,
)
def add_parser_solver(p: ArgumentParser) -> None:
"""
Add a command-line flag for alternative solver backends.
See ``context.solver`` for more info.
"""
from ..base.context import context
from ..common.constants import NULL
group = p.add_mutually_exclusive_group()
group.add_argument(
"--solver",
dest="solver",
action=LazyChoicesAction,
choices_func=context.plugin_manager.get_solvers,
help="Choose which solver backend to use.",
default=NULL,
)
def add_parser_networking(p: ArgumentParser) -> _ArgumentGroup:
from ..common.constants import NULL
networking_options = p.add_argument_group("Networking Options")
networking_options.add_argument(
"-C",
"--use-index-cache",
action="store_true",
default=False,
help="Use cache of channel index files, even if it has expired. This is useful "
"if you don't want conda to check whether a new version of the repodata "
"file exists, which will save bandwidth.",
)
networking_options.add_argument(
"-k",
"--insecure",
action="store_false",
dest="ssl_verify",
default=NULL,
help='Allow conda to perform "insecure" SSL connections and transfers. '
"Equivalent to setting 'ssl_verify' to 'false'.",
)
networking_options.add_argument(
"--offline",
action="store_true",
default=NULL,
help="Offline mode. Don't connect to the Internet.",
)
return networking_options
def add_parser_package_install_options(p: ArgumentParser) -> _ArgumentGroup:
from ..common.constants import NULL
from ..deprecations import deprecated
package_install_options = p.add_argument_group(
"Package Linking and Install-time Options"
)
package_install_options.add_argument(
"-f",
dest="force",
action=deprecated.action(
"25.9",
"26.3",
_StoreTrueAction,
addendum="Use `--force` instead.",
),
default=NULL,
help=SUPPRESS,
)
package_install_options.add_argument(
"--force",
action="store_true",
default=NULL,
help=SUPPRESS,
)
package_install_options.add_argument(
"--copy",
action="store_true",
default=NULL,
help="Install all packages using copies instead of hard- or soft-linking.",
)
package_install_options.add_argument(
"--shortcuts",
action="store_true",
help=SUPPRESS,
dest="shortcuts",
default=NULL,
)
package_install_options.add_argument(
"--no-shortcuts",
action="store_false",
help="Don't install start menu shortcuts",
dest="shortcuts",
default=NULL,
)
package_install_options.add_argument(
"--shortcuts-only",
action="append",
help="Install shortcuts only for this package name. Can be used several times.",
dest="shortcuts_only",
)
return package_install_options
def add_parser_known(p: ArgumentParser) -> None:
p.add_argument(
"--unknown",
action="store_true",
default=False,
dest="unknown",
help=SUPPRESS,
)
def add_parser_default_packages(p: ArgumentParser) -> None:
p.add_argument(
"--no-default-packages",
action="store_true",
help="Ignore create_default_packages in the .condarc file.",
)
def add_parser_platform(parser):
from ..base.constants import KNOWN_SUBDIRS
from ..common.constants import NULL
parser.add_argument(
"--subdir",
"--platform",
default=NULL,
dest="subdir",
choices=[s for s in KNOWN_SUBDIRS if s != "noarch"],
metavar="SUBDIR",
help="Use packages built for this platform. "
"The new environment will be configured to remember this choice. "
"Should be formatted like 'osx-64', 'linux-32', 'win-64', and so on. "
"Defaults to the current (native) platform.",
)
def add_parser_verbose(parser: ArgumentParser | _ArgumentGroup) -> None:
from ..common.constants import NULL
from .actions import NullCountAction
parser.add_argument(
"-v",
"--verbose",
action=NullCountAction,
help=(
"Can be used multiple times. Once for detailed output, twice for INFO logging, "
"thrice for DEBUG logging, four times for TRACE logging."
),
dest="verbosity",
default=NULL,
)
parser.add_argument(
"--debug",
action="store_true",
help=SUPPRESS,
default=NULL,
)
parser.add_argument(
"--trace",
action="store_true",
help=SUPPRESS,
default=NULL,
)
def add_parser_environment_specifier(p: ArgumentParser) -> None:
from ..base.context import context
from ..common.constants import NULL
p.add_argument(
"--environment-specifier",
"--env-spec", # for brevity
action=LazyChoicesAction,
choices_func=context.plugin_manager.get_environment_specifiers,
default=NULL,
help="(EXPERIMENTAL) Specify the environment specifier plugin to use.",
)
def comma_separated_stripped(value: str) -> list[str]:
"""
Custom type for argparse to handle comma-separated strings with stripping
"""
return [item.strip() for item in value.split(",")]
| _ValidatePackages |
python | pytorch__pytorch | torch/distributed/fsdp/_optim_utils.py | {
"start": 1564,
"end": 1882
} | class ____:
state: _FSDPState
handle: FlatParamHandle
param_indices: dict[str, int]
param_requires_grad: list[bool]
def sorted_items(dictionary: dict[str, Any]) -> Iterator[tuple[str, Any]]:
keys = sorted(dictionary.keys())
for k in keys:
yield k, dictionary[k]
@dataclass
| FSDPParamInfo |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_tagkey_values.py | {
"start": 1491,
"end": 19441
} | class ____(OrganizationTagKeyTestCase):
def test_simple(self) -> None:
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"fruit": "apple"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "tags": {"fruit": "orange"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "tags": {"some_tag": "some_value"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "tags": {"fruit": "orange"}},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-tagkey-values",
kwargs={"organization_id_or_slug": self.org.slug, "key": "fruit"},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
self.run_test("fruit", expected=[("orange", 2), ("apple", 1)])
def test_env(self) -> None:
env2 = self.create_environment()
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"fruit": "apple"}},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": self.day_ago.isoformat(),
"tags": {"fruit": "apple"},
"environment": self.environment.name,
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": self.day_ago.isoformat(),
"tags": {"fruit": "apple"},
"environment": env2.name,
},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "tags": {"fruit": "orange"}},
project_id=self.project.id,
)
self.run_test(
"fruit",
environment=self.environment.name,
expected=[("apple", 1)],
)
def test_env_with_order_by_count(self) -> None:
# this set of tags has count 5 and but very old
for minute in range(1, 6):
self.store_event(
data={
"timestamp": before_now(minutes=minute * 10).isoformat(),
"tags": {"fruit": "apple"},
"environment": self.environment.name,
},
project_id=self.project.id,
)
# this set of tags has count 4 and but more fresh
for minute in range(1, 5):
self.store_event(
data={
"timestamp": self.min_ago.isoformat(),
"tags": {"fruit": "orange"},
"environment": self.environment.name,
},
project_id=self.project.id,
)
# default test ignore count just use timestamp
self.run_test(
"fruit",
environment=self.environment.name,
expected=[("orange", 4), ("apple", 5)],
)
# check new sorting but count
self.run_test(
"fruit",
environment=self.environment.name,
expected=[("apple", 5), ("orange", 4)],
sort="-count",
)
def test_invalid_sort_field(self) -> None:
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"fruit": "apple"}},
project_id=self.project.id,
)
response = self.get_response("fruit", sort="invalid_field")
assert response.status_code == 400
assert response.data == {
"detail": "Invalid sort parameter. Please use one of: -last_seen or -count"
}
def test_semver_with_env(self) -> None:
env = self.create_environment(name="dev", project=self.project)
env1 = self.create_environment(name="prod", project=self.project)
self.create_release(version="test@1.0.0.0", environments=[env])
self.create_release(version="test@2.0.0.0")
self.run_test(
SEMVER_ALIAS,
qs_params={"query": "1.", "environment": [env.name]},
expected=[("1.0.0.0", None)],
)
self.run_test(
SEMVER_ALIAS, qs_params={"query": "1.", "environment": [env1.name]}, expected=[]
)
def test_bad_key(self) -> None:
response = self.get_response("fr uit")
assert response.status_code == 400, response.content
assert response.data == {"detail": 'Invalid tag key format for "fr uit"'}
def test_snuba_column(self) -> None:
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "user": {"email": "foo@example.com"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "user": {"email": "bar@example.com"}},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"user": {"email": "baz@example.com"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"user": {"email": "baz@example.com"},
},
project_id=self.project.id,
)
self.run_test(
"user.email",
expected=[("baz@example.com", 2), ("bar@example.com", 1), ("foo@example.com", 1)],
)
def test_release(self) -> None:
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"sentry:release": "3.1.2"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "tags": {"sentry:release": "4.1.2"}},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"sentry:release": "3.1.2"}},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"tags": {"sentry:release": "5.1.2"},
},
project_id=self.project.id,
)
self.run_test("release", expected=[("5.1.2", 1), ("4.1.2", 1), ("3.1.2", 2)])
def test_user_tag(self) -> None:
self.store_event(
data={"tags": {"sentry:user": "1"}, "timestamp": self.day_ago.isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"tags": {"sentry:user": "2"}, "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"tags": {"sentry:user": "1"}, "timestamp": self.day_ago.isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"tags": {"sentry:user": "3"}, "timestamp": before_now(seconds=10).isoformat()},
project_id=self.project.id,
)
self.run_test("user", expected=[("3", 1), ("2", 1), ("1", 2)])
def test_project_id(self) -> None:
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=self.project.id)
self.store_event(data={"timestamp": self.min_ago.isoformat()}, project_id=self.project.id)
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=other_project.id)
self.run_test("project.id", expected=[])
def test_project_name(self) -> None:
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=self.project.id)
self.store_event(data={"timestamp": self.min_ago.isoformat()}, project_id=self.project.id)
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=other_project.id)
# without the includeTransactions flag, this will continue to search the Events Dataset for the
# projects tag, which doesn't exist here
self.run_test("project", expected=[])
# with the includeTransactions flag, this will search in the Discover Dataset where project
# has special meaning to refer to the sentry project rather than the project tag
self.run_test(
"project", qs_params={"includeTransactions": "1"}, expected=[(self.project.slug, 2)]
)
def test_project_name_with_query(self) -> None:
other_project = self.create_project(organization=self.org, name="test1")
other_project2 = self.create_project(organization=self.org, name="test2")
self.create_project(organization=self.org, name="test3")
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=other_project.id)
self.store_event(data={"timestamp": self.min_ago.isoformat()}, project_id=other_project.id)
self.store_event(data={"timestamp": self.day_ago.isoformat()}, project_id=other_project2.id)
# without the includeTransactions flag, this will continue to search the Events Dataset for the
# projects tag, which doesn't exist here
self.run_test("project", qs_params={"query": "test"}, expected=[])
# with the includeTransactions flag, this will search in the Discover Dataset where project
# has special meaning to refer to the sentry project rather than the project tag
self.run_test(
"project",
qs_params={"includeTransactions": "1", "query": "test"},
expected=[("test1", 2), ("test2", 1)],
)
self.run_test(
"project",
qs_params={"includeTransactions": "1", "query": "1"},
expected=[("test1", 2)],
)
self.run_test(
"project", qs_params={"includeTransactions": "1", "query": "test3"}, expected=[]
)
self.run_test("project", qs_params={"includeTransactions": "1", "query": "z"}, expected=[])
def test_array_column(self) -> None:
for i in range(3):
self.store_event(
data={"timestamp": self.day_ago.isoformat()}, project_id=self.project.id
)
self.run_test("error.type", expected=[])
def test_no_projects(self) -> None:
self.run_test("fruit", expected=[])
def test_disabled_tag_keys(self) -> None:
self.store_event(
data={"timestamp": self.day_ago.isoformat(), "tags": {"fruit": "apple"}},
project_id=self.project.id,
)
self.run_test("id", expected=[])
self.run_test("id", qs_params={"query": "z"}, expected=[])
self.run_test("timestamp", expected=[])
self.run_test("timestamp", qs_params={"query": "z"}, expected=[])
self.run_test("time", expected=[])
self.run_test("time", qs_params={"query": "z"}, expected=[])
def test_group_id_tag(self) -> None:
self.store_event(
data={
"timestamp": (self.day_ago - timedelta(minutes=1)).isoformat(),
"tags": {"group_id": "not-a-group-id-but-a-string"},
},
project_id=self.project.id,
)
self.run_test("group_id", expected=[("not-a-group-id-but-a-string", 1)])
def test_user_display(self) -> None:
self.store_event(
data={
"timestamp": (self.day_ago - timedelta(minutes=1)).isoformat(),
"user": {"email": "foo@example.com", "ip_address": "127.0.0.1"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": (self.day_ago - timedelta(minutes=2)).isoformat(),
"user": {"username": "bazz", "ip_address": "192.168.0.1"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": (self.day_ago - timedelta(minutes=3)).isoformat(),
"user": {"ip_address": "127.0.0.1"},
},
project_id=self.project.id,
)
self.run_test(
"user.display",
qs_params={"includeTransactions": "1"},
expected=[("foo@example.com", 1), ("bazz", 1), ("127.0.0.1", 1)],
)
self.run_test(
"user.display",
qs_params={"includeTransactions": "1", "query": "foo"},
expected=[("foo@example.com", 1)],
)
self.run_test(
"user.display",
qs_params={"includeTransactions": "1", "query": "zz"},
expected=[("bazz", 1)],
)
self.run_test(
"user.display",
qs_params={"includeTransactions": "1", "query": "1"},
expected=[("127.0.0.1", 1)],
)
self.run_test(
"user.display", qs_params={"includeTransactions": "1", "query": "bar"}, expected=[]
)
def test_semver(self) -> None:
self.create_release(version="test@1.0.0.0")
self.create_release(version="test@2.0.0.0")
self.run_test(SEMVER_ALIAS, expected=[("2.0.0.0", None), ("1.0.0.0", None)])
self.run_test(SEMVER_ALIAS, query="1.", expected=[("1.0.0.0", None)])
self.run_test(SEMVER_ALIAS, query="test@1.", expected=[("test@1.0.0.0", None)])
self.run_test(
SEMVER_ALIAS, query="test", expected=[("test@2.0.0.0", None), ("test@1.0.0.0", None)]
)
def test_release_filter_for_all_releases(self) -> None:
self.create_release(version="aaa@1.0")
self.create_release(version="aab@1.0")
self.create_release(version="aba@1.0")
self.create_release(version="abc@1.0")
self.create_release(version="bac@1.0")
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1"},
expected=[
("aaa@1.0", None),
("aab@1.0", None),
("aba@1.0", None),
("abc@1.0", None),
("bac@1.0", None),
],
)
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1", "query": "a"},
expected=[("aaa@1.0", None), ("aab@1.0", None), ("aba@1.0", None), ("abc@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1", "query": "b"},
expected=[("bac@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1", "query": "aa"},
expected=[("aaa@1.0", None), ("aab@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1", "query": "aba"},
expected=[("aba@1.0", None)],
)
def test_release_filter_for_all_releases_with_env_and_project_filters(self) -> None:
proj2 = self.create_project()
env1 = self.create_environment(name="dev", project=self.project)
env2 = self.create_environment(name="prod", project=self.project)
env3 = self.create_environment(name="test", project=proj2)
self.create_release(version="aaa@1.0", environments=[env1, env2])
self.create_release(version="aab@1.0", environments=[env1])
self.create_release(version="aba@1.0", project=proj2, environments=[env3])
self.run_test(
RELEASE_ALIAS,
qs_params={"includeSessions": "1", "project": [self.project.id]},
expected=[("aaa@1.0", None), ("aab@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={
"includeSessions": "1",
"project": [self.project.id],
"environment": [env1.name],
},
expected=[("aaa@1.0", None), ("aab@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={
"includeSessions": "1",
"project": [self.project.id],
"environment": [env2.name],
},
expected=[("aaa@1.0", None)],
)
self.run_test(
RELEASE_ALIAS,
qs_params={
"includeSessions": "1",
"project": [self.project.id, proj2.id],
"environment": [env2.name, env3.name],
},
expected=[("aaa@1.0", None), ("aba@1.0", None)],
)
def test_simple_flags(self) -> None:
self.store_event(
data={
"contexts": {"flags": {"values": [{"flag": "abc", "result": True}]}},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"contexts": {"flags": {"values": [{"flag": "abc", "result": False}]}},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-tagkey-values",
kwargs={"organization_id_or_slug": self.org.slug, "key": "abc"},
)
response = self.client.get(url + "?useFlagsBackend=1")
assert response.status_code == 200
assert len(response.data) == 2
results = sorted(response.data, key=lambda i: i["value"])
assert results[0]["value"] == "false"
assert results[1]["value"] == "true"
assert results[0]["count"] == 1
assert results[1]["count"] == 1
| OrganizationTagKeyValuesTest |
python | walkccc__LeetCode | solutions/2579. Count Total Number of Colored Cells/2579.py | {
"start": 0,
"end": 86
} | class ____:
def coloredCells(self, n: int) -> int:
return n**2 + (n - 1)**2
| Solution |
python | django__django | django/contrib/gis/forms/fields.py | {
"start": 4196,
"end": 4255
} | class ____(GeometryField):
geom_type = "POINT"
| PointField |
python | xlwings__xlwings | tests/reports/test_report.py | {
"start": 11970,
"end": 15505
} | class ____(unittest.TestCase):
def tearDown(self):
xw.Book(this_dir / "output.xlsx").app.quit()
def test_df_filters(self):
wb = render_template(
this_dir / "template1.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(
wb.sheets["df_filters"]["A1:E140"].value,
wb.sheets["df_filters"]["G1:K140"].value,
)
def test_df_filters_in_frames(self):
wb = render_template(
this_dir / "df_filter_frame.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(
wb.sheets["Sheet1"]["A1:E10"].value, wb.sheets["expected"]["A1:E10"].value
)
self.assertEqual(
wb.sheets["Sheet1"]["A3"].color, wb.sheets["expected"]["A3"].color
)
self.assertEqual(
wb.sheets["Sheet1"]["A4:A5"].color, wb.sheets["expected"]["A4:A5"].color
)
self.assertIsNone(wb.sheets["Sheet1"]["A6"].color)
self.assertEqual(
wb.sheets["Sheet1"]["A7"].color, wb.sheets["expected"]["A7"].color
)
self.assertEqual(
wb.sheets["Sheet1"]["A8"].color, wb.sheets["expected"]["A8"].color
)
self.assertIsNone(wb.sheets["Sheet1"]["A7"].color)
self.assertIsNone(wb.sheets["Sheet1"]["A10"].color)
def test_df_filter_vmerge(self):
wb = render_template(
this_dir / "df_filter_frame.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(
wb.sheets["Sheet1"]["A13:B21"].value, wb.sheets["expected"]["A13:B21"].value
)
def test_df_filter_formatter(self):
wb = render_template(
this_dir / "df_filter_frame.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(
wb.sheets["Sheet1"]["A23:B23"].color, wb.sheets["expected"]["A23:B23"].color
)
self.assertEqual(
wb.sheets["Sheet1"]["A24:B24"].color, wb.sheets["expected"]["A24:B24"].color
)
self.assertEqual(
wb.sheets["Sheet1"]["A25:B25"].color, wb.sheets["expected"]["A25:B25"].color
)
def test_pic_filters(self):
wb = render_template(
this_dir / "template1.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(wb.sheets["pic_filters"].pictures[0].width, 397)
self.assertEqual(wb.sheets["pic_filters"].pictures[0].height, 139)
self.assertEqual(wb.sheets["pic_filters"].pictures[1].width, 120)
self.assertEqual(int(wb.sheets["pic_filters"].pictures[1].height), 42)
self.assertEqual(int(wb.sheets["pic_filters"].pictures[2].width), 371)
self.assertEqual(wb.sheets["pic_filters"].pictures[2].height, 130)
self.assertEqual(int(wb.sheets["pic_filters"].pictures[3].width), 476)
self.assertEqual(int(wb.sheets["pic_filters"].pictures[3].height), 166)
def test_datetime_filters(self):
wb = render_template(
this_dir / "template1.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(wb.sheets["dt"]["A1:A7"].value, wb.sheets["dt"]["E1:E7"].value)
self.assertEqual(wb.sheets["dt"].shapes["Rectangle 1"].text, "December 1, 2010")
def test_fontcolor_filter(self):
wb = render_template(
this_dir / "template1.xlsx", this_dir / "output.xlsx", **data
)
self.assertEqual(wb.sheets["Sheet1"]["Q1"].font.color, (255, 255, 255))
if __name__ == "__main__":
unittest.main()
| TestDataFrameFilters |
python | sympy__sympy | sympy/polys/matrices/exceptions.py | {
"start": 976,
"end": 1066
} | class ____(DMShapeError):
"""The matrix is not square"""
pass
| DMNonSquareMatrixError |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 8399,
"end": 8865
} | class ____:
test_cbc = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cbc.txt"],
lambda key, **kwargs: IDEA(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
IDEA(b"\x00" * 16), OFB(b"\x00" * 8)
),
skip_message="Does not support IDEA OFB",
)
| TestIDEAModeCBC |
python | pandas-dev__pandas | pandas/tests/indexes/datetimelike_/test_drop_duplicates.py | {
"start": 2705,
"end": 2999
} | class ____(DropDuplicates):
@pytest.fixture
def idx(self, freq_sample):
"""
Fixture to get TimedeltaIndex for 10 periods for different frequencies.
"""
return timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
| TestDropDuplicatesTimedeltaIndex |
python | pypa__packaging | src/packaging/specifiers.py | {
"start": 3537,
"end": 26668
} | class ____(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
__slots__ = ("_prereleases", "_spec", "_spec_version")
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"\s*" + _operator_regex_str + _version_regex_str + r"\s*",
re.VERBOSE | re.IGNORECASE,
)
_operators: Final = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: bool | None = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.fullmatch(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: {spec!r}")
self._spec: tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# Specifier version cache
self._spec_version: tuple[str, Version] | None = None
def _get_spec_version(self, version: str) -> Version:
"""One element cache, as only one spec Version is needed per Specifier."""
if self._spec_version is not None and self._spec_version[0] == version:
return self._spec_version[1]
version_specifier = Version(version)
self._spec_version = (version, version_specifier)
return version_specifier
@property
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Only the "!=" operator does not imply prereleases when
# the version in the specifier is a prerelease.
operator, version = self._spec
if operator != "!=":
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool | None) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> tuple[str, str]:
operator, version = self._spec
if operator == "===":
return operator, version
canonical_version = canonicalize_version(
version,
strip_trailing_zero=(operator != "~="),
)
return operator, canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = _version_join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by bangs and dots, and pretend that there is
# an implicit dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by bangs and dots, and pretend
# that there is an implicit dot in between a release segment and
# a pre-release segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = self._get_spec_version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = _public_version(prospective)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return _public_version(prospective) <= self._get_spec_version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return _public_version(prospective) >= self._get_spec_version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = self._get_spec_version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if (
not spec.is_prerelease
and prospective.is_prerelease
and Version(prospective.base_version) == _base_version(spec)
):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = self._get_spec_version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if (
not spec.is_postrelease
and prospective.is_postrelease
and Version(prospective.base_version) == _base_version(spec)
):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None and Version(
prospective.base_version
) == _base_version(spec):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: str | Version) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
True
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it will follow the recommendation from
:pep:`440` and match prereleases, as there are no other versions.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
True
>>> Specifier(">=1.2.3", prereleases=False).contains("1.3.0a1")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
True
"""
return bool(list(self.filter([item], prereleases=prereleases)))
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will follow the recommendation from :pep:`440`
and match prereleases if there are no other versions.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
prereleases_versions = []
found_non_prereleases = False
# Determine if to include prereleases by default
include_prereleases = (
prereleases if prereleases is not None else self.prereleases
)
# Get the matching operator
operator_callable = self._get_operator(self.operator)
# Filter versions
for version in iterable:
parsed_version = _coerce_version(version)
if parsed_version is None:
continue
if operator_callable(parsed_version, self.version):
# If it's not a prerelease or prereleases are allowed, yield it directly
if not parsed_version.is_prerelease or include_prereleases:
found_non_prereleases = True
yield version
# Otherwise collect prereleases for potential later use
elif prereleases is None and self._prereleases is not False:
prereleases_versions.append(version)
# If no non-prereleases were found and prereleases weren't
# explicitly forbidden, yield the collected prereleases
if (
not found_non_prereleases
and prereleases is None
and self._prereleases is not False
):
yield from prereleases_versions
_prefix_regex = re.compile(r"([0-9]+)((?:a|b|c|rc)[0-9]+)")
def _version_split(version: str) -> list[str]:
"""Split version into components.
The split components are intended for version comparison. The logic does
not attempt to retain the original version string, so joining the
components back with :func:`_version_join` may not produce the original
version string.
"""
result: list[str] = []
epoch, _, rest = version.rpartition("!")
result.append(epoch or "0")
for item in rest.split("."):
match = _prefix_regex.fullmatch(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _version_join(components: list[str]) -> str:
"""Join split version components into a version string.
This function assumes the input came from :func:`_version_split`, where the
first component must be the epoch (either empty or numeric), and all other
components numeric.
"""
epoch, *rest = components
return f"{epoch}!{'.'.join(rest)}"
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (
list(itertools.chain.from_iterable(left_split)),
list(itertools.chain.from_iterable(right_split)),
)
| Specifier |
python | vyperlang__vyper | vyper/semantics/environment.py | {
"start": 853,
"end": 1058
} | class ____(_EnvType):
_id = "msg"
_type_members = {
"data": BytesT(),
"gas": UINT256_T,
"mana": UINT256_T,
"sender": AddressT(),
"value": UINT256_T,
}
| _Msg |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 31270,
"end": 33203
} | class ____(NonStrictDataModel):
"""
:param task: Task ID
:type task: str
:param iterations:
:type iterations: Sequence[dict]
"""
_schema = {
"properties": {
"iterations": {
"items": {
"properties": {
"events": {
"items": {"description": "Plot event", "type": "object"},
"type": "array",
},
"iter": {"description": "Iteration number", "type": "integer"},
},
"type": "object",
},
"type": ["array", "null"],
},
"task": {"description": "Task ID", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, task: Optional[str] = None, iterations: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(PlotsResponseTaskMetrics, self).__init__(**kwargs)
self.task = task
self.iterations = iterations
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iterations")
def iterations(self) -> Optional[List[dict]]:
return self._property_iterations
@iterations.setter
def iterations(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_iterations = None
return
self.assert_isinstance(value, "iterations", (list, tuple))
self.assert_isinstance(value, "iterations", (dict,), is_array=True)
self._property_iterations = value
| PlotsResponseTaskMetrics |
python | astropy__astropy | astropy/table/index.py | {
"start": 9886,
"end": 11051
} | class ____(Protocol):
"""Protocol defining an index engine class"""
# Taken from soco.py
def __init__(self, data, row_index, unique=False): ...
# using *args and **kwargs here so existing implementations,
# while slightly inconsistent with one another, remain conform without change.
def add(self, key: tuple, *args: int | None, **kwargs: int | None) -> None: ...
# if we manage to remove the one implementation that's inconsistent with it,
# the actual signature should be:
# def add(self, key: tuple, row: int) -> None: ...
def find(self, key: tuple) -> Sequence[Integral]: ...
def remove(self, key: tuple, data: int) -> bool: ...
def shift_left(self, row: int) -> None: ...
def shift_right(self, row: int) -> None: ...
def items(self) -> list[tuple[Hashable, list[Integral]]]: ...
def sort(self) -> None: ...
def sorted_data(self) -> None: ...
def range(
self,
lower: tuple[Hashable, ...] | None,
upper: tuple[Hashable, ...] | None,
bounds: tuple[bool, bool],
) -> list[int]: ...
def replace_rows(self, row_map: Mapping[int, int]) -> None: ...
| IndexEngine |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 41111,
"end": 64246
} | class ____(
roles.ColumnArgumentOrKeyRole,
roles.StatementOptionRole,
roles.WhereHavingRole,
roles.BinaryElementRole[_T],
roles.OrderByRole,
roles.ColumnsClauseRole,
roles.LimitOffsetRole,
roles.DMLColumnRole,
roles.DDLConstraintColumnRole,
roles.DDLExpressionRole,
SQLColumnExpression[_T],
DQLDMLClauseElement,
):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`_expression.ColumnElement` is the
:class:`_schema.Column` object, :class:`_expression.ColumnElement`
serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`_expression.ColumnElement`
is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of
:class:`_expression.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`_expression.ColumnElement` object,
or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`_expression.ColumnElement`.
The Python value will ultimately be sent
to the DBAPI at execution time as a parameterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature an accessor called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`_expression.ColumnElement` and sometimes a
:class:`_expression.SelectBase` expression.
It is used within the ORM to
convert from ORM-specific objects like mapped classes and
mapped attributes into Core expression objects.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`_expression.ColumnElement` provides the ability to generate new
:class:`_expression.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`_expression.ColumnElement`
instances
which are composed from other, more fundamental
:class:`_expression.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`_expression.ColumnElement`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import column
>>> column("a") + column("b")
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column("a") + column("b"))
{printsql}a + b
.. seealso::
:class:`_schema.Column`
:func:`_expression.column`
"""
__visit_name__ = "column_element"
primary_key: bool = False
_is_clone_of: Optional[ColumnElement[_T]]
_is_column_element = True
_insert_sentinel: bool = False
_omit_from_statements = False
_is_collection_aggregate = False
foreign_keys: AbstractSet[ForeignKey] = frozenset()
@util.memoized_property
def _proxies(self) -> List[ColumnElement[Any]]:
return []
@util.non_memoized_property
def _tq_label(self) -> Optional[str]:
"""The named label that can be used to target
this column in a result set in a "table qualified" context.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement when using
the LABEL_STYLE_TABLENAME_PLUS_COL label style, which is what the
legacy ORM ``Query`` object uses as well.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
.. versionchanged:: 1.4.21 renamed from ``._label``
"""
return None
key: Optional[str] = None
"""The 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. ``sometable.c["somekey"]`` would
return a :class:`_schema.Column` with a ``.key`` of "somekey".
"""
@HasMemoized.memoized_attribute
def _tq_key_label(self) -> Optional[str]:
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_tq_key_label comes into play when a select() statement is constructed
with apply_labels(); in this case, all Column objects in the ``.c``
collection are rendered as <tablename>_<columnname> in SQL; this is
essentially the value of ._label. But to locate those columns in the
``.c`` collection, the name is along the lines of <tablename>_<key>;
that's the typical value of .key_label.
.. versionchanged:: 1.4.21 renamed from ``._key_label``
"""
return self._proxy_key
@property
def _key_label(self) -> Optional[str]:
"""legacy; renamed to _tq_key_label"""
return self._tq_key_label
@property
def _label(self) -> Optional[str]:
"""legacy; renamed to _tq_label"""
return self._tq_label
@property
def _non_anon_label(self) -> Optional[str]:
"""the 'name' that naturally applies this element when rendered in
SQL.
Concretely, this is the "name" of a column or a label in a
SELECT statement; ``<columnname>`` and ``<labelname>`` below:
.. sourcecode:: sql
SELECT <columnmame> FROM table
SELECT column AS <labelname> FROM table
Above, the two names noted will be what's present in the DBAPI
``cursor.description`` as the names.
If this attribute returns ``None``, it means that the SQL element as
written does not have a 100% fully predictable "name" that would appear
in the ``cursor.description``. Examples include SQL functions, CAST
functions, etc. While such things do return names in
``cursor.description``, they are only predictable on a
database-specific basis; e.g. an expression like ``MAX(table.col)`` may
appear as the string ``max`` on one database (like PostgreSQL) or may
appear as the whole expression ``max(table.col)`` on SQLite.
The default implementation looks for a ``.name`` attribute on the
object, as has been the precedent established in SQLAlchemy for many
years. An exception is made on the ``FunctionElement`` subclass
so that the return value is always ``None``.
.. versionadded:: 1.4.21
"""
return getattr(self, "name", None)
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name.
The joined eager loader strategy in the ORM uses this, for example.
"""
_is_implicitly_boolean = False
_alt_names: Sequence[str] = ()
if TYPE_CHECKING:
def _ungroup(self) -> ColumnElement[_T]: ...
@overload
def self_group(self, against: None = None) -> ColumnElement[_T]: ...
@overload
def self_group(
self, against: Optional[OperatorType] = None
) -> ColumnElement[Any]: ...
def self_group(
self, against: Optional[OperatorType] = None
) -> ColumnElement[Any]:
if (
against in (operators.and_, operators.or_, operators._asbool)
and self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity
):
return AsBoolean(self, operators.is_true, operators.is_false)
elif against in (operators.any_op, operators.all_op):
return Grouping(self)
else:
return self
@overload
def _negate(self: ColumnElement[bool]) -> ColumnElement[bool]: ...
@overload
def _negate(self: ColumnElement[_T]) -> ColumnElement[_T]: ...
def _negate(self) -> ColumnElement[Any]:
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.is_false, operators.is_true)
else:
grouped = self.self_group(against=operators.inv)
assert isinstance(grouped, ColumnElement)
return UnaryExpression(
grouped,
operator=operators.inv,
)
type: TypeEngine[_T]
if not TYPE_CHECKING:
@util.memoized_property
def type(self) -> TypeEngine[_T]: # noqa: A001
# used for delayed setup of
# type_api
return type_api.NULLTYPE
@HasMemoized.memoized_attribute
def comparator(self) -> TypeEngine.Comparator[_T]:
try:
comparator_factory = self.type.comparator_factory
except AttributeError as err:
raise TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type
) from err
else:
return comparator_factory(self)
def __setstate__(self, state):
self.__dict__.update(state)
def __getattr__(self, key: str) -> Any:
try:
return getattr(self.comparator, key)
except AttributeError as err:
raise AttributeError(
"Neither %r object nor %r object has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
key,
)
) from err
def operate(
self,
op: operators.OperatorType,
*other: Any,
**kwargs: Any,
) -> ColumnElement[Any]:
return op(self.comparator, *other, **kwargs) # type: ignore[no-any-return] # noqa: E501
def reverse_operate(
self, op: operators.OperatorType, other: Any, **kwargs: Any
) -> ColumnElement[Any]:
return op(other, self.comparator, **kwargs) # type: ignore[no-any-return] # noqa: E501
def _bind_param(
self,
operator: operators.OperatorType,
obj: Any,
type_: Optional[TypeEngine[_T]] = None,
expanding: bool = False,
) -> BindParameter[_T]:
return BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
expanding=expanding,
)
@property
def expression(self) -> ColumnElement[Any]:
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self) -> _SelectIterable:
return (self,)
@util.memoized_property
def base_columns(self) -> FrozenSet[ColumnElement[Any]]:
return frozenset(c for c in self.proxy_set if not c._proxies)
@util.memoized_property
def proxy_set(self) -> FrozenSet[ColumnElement[Any]]:
"""set of all columns we are proxying
as of 2.0 this is explicitly deannotated columns. previously it was
effectively deannotated columns but wasn't enforced. annotated
columns should basically not go into sets if at all possible because
their hashing behavior is very non-performant.
"""
return frozenset([self._deannotate()]).union(
itertools.chain(*[c.proxy_set for c in self._proxies])
)
@util.memoized_property
def _expanded_proxy_set(self) -> FrozenSet[ColumnElement[Any]]:
return frozenset(_expand_cloned(self.proxy_set))
def _uncached_proxy_list(self) -> List[ColumnElement[Any]]:
"""An 'uncached' version of proxy set.
This list includes annotated columns which perform very poorly in
set operations.
"""
return [self] + list(
itertools.chain(*[c._uncached_proxy_list() for c in self._proxies])
)
def shares_lineage(self, othercolumn: ColumnElement[Any]) -> bool:
"""Return True if the given :class:`_expression.ColumnElement`
has a common ancestor to this :class:`_expression.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other: ColumnElement[Any]) -> bool:
"""Return True if the given column element compares to this one
when targeting within a result row."""
return (
hasattr(other, "name")
and hasattr(self, "name")
and other.name == self.name
)
@HasMemoized.memoized_attribute
def _proxy_key(self) -> Optional[str]:
if self._annotations and "proxy_key" in self._annotations:
return cast(str, self._annotations["proxy_key"])
name = self.key
if not name:
# there's a bit of a seeming contradiction which is that the
# "_non_anon_label" of a column can in fact be an
# "_anonymous_label"; this is when it's on a column that is
# proxying for an anonymous expression in a subquery.
name = self._non_anon_label
if isinstance(name, _anonymous_label):
return None
else:
return name
@HasMemoized.memoized_attribute
def _expression_label(self) -> Optional[str]:
"""a suggested label to use in the case that the column has no name,
which should be used if possible as the explicit 'AS <label>'
where this expression would normally have an anon label.
this is essentially mostly what _proxy_key does except it returns
None if the column has a normal name that can be used.
"""
if getattr(self, "name", None) is not None:
return None
elif self._annotations and "proxy_key" in self._annotations:
return cast(str, self._annotations["proxy_key"])
else:
return None
def _make_proxy(
self,
selectable: FromClause,
*,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
compound_select_cols: Optional[Sequence[ColumnElement[Any]]] = None,
**kw: Any,
) -> typing_Tuple[str, ColumnClause[_T]]:
"""Create a new :class:`_expression.ColumnElement` representing this
:class:`_expression.ColumnElement` as it appears in the select list of
a descending selectable.
"""
if name is None:
name = self._anon_name_label
if key is None:
key = self._proxy_key
else:
key = name
assert key is not None
co: ColumnClause[_T] = ColumnClause(
(
coercions.expect(roles.TruncatedLabelRole, name)
if name_is_truncatable
else name
),
type_=getattr(self, "type", None),
_selectable=selectable,
)
co._propagate_attrs = selectable._propagate_attrs
if compound_select_cols:
co._proxies = list(compound_select_cols)
else:
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = selectable._is_clone_of.columns.get(key)
return key, co
def cast(self, type_: _TypeEngineArgument[_OPT]) -> Cast[_OPT]:
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`_expression.cast` function.
.. seealso::
:ref:`tutorial_casts`
:func:`_expression.cast`
:func:`_expression.type_coerce`
"""
return Cast(self, type_)
def label(self, name: Optional[str]) -> Label[_T]:
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`_expression.label` function.
If 'name' is ``None``, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
def _anon_label(
self, seed: Optional[str], add_hash: Optional[int] = None
) -> _anonymous_label:
while self._is_clone_of is not None:
self = self._is_clone_of
# as of 1.4 anonymous label for ColumnElement uses hash(), not id(),
# as the identifier, because a column and its annotated version are
# the same thing in a SQL statement
hash_value = hash(self)
if add_hash:
# this path is used for disambiguating anon labels that would
# otherwise be the same name for the same element repeated.
# an additional numeric value is factored in for each label.
# shift hash(self) (which is id(self), typically 8 byte integer)
# 16 bits leftward. fill extra add_hash on right
assert add_hash < (2 << 15)
assert seed
hash_value = (hash_value << 16) | add_hash
# extra underscore is added for labels with extra hash
# values, to isolate the "deduped anon" namespace from the
# regular namespace. eliminates chance of these
# manufactured hash values overlapping with regular ones for some
# undefined python interpreter
seed = seed + "_"
if isinstance(seed, _anonymous_label):
# NOTE: the space after the hash is required
return _anonymous_label(f"{seed}%({hash_value} )s")
return _anonymous_label.safe_construct(hash_value, seed or "anon")
@util.memoized_property
def _anon_name_label(self) -> str:
"""Provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time ``anon_label`` is called so
that expressions can reference ``anon_label`` multiple times,
producing the same label name at compile time.
The compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
.. versionchanged:: 1.4.9 - this attribute was not intended to be
public and is renamed to _anon_name_label. anon_name exists
for backwards compat
"""
name = getattr(self, "name", None)
return self._anon_label(name)
@util.memoized_property
def _anon_key_label(self) -> _anonymous_label:
"""Provides a constant 'anonymous key label' for this ColumnElement.
Compare to ``anon_label``, except that the "key" of the column,
if available, is used to generate the label.
This is used when a deduplicating key is placed into the columns
collection of a selectable.
.. versionchanged:: 1.4.9 - this attribute was not intended to be
public and is renamed to _anon_key_label. anon_key_label exists
for backwards compat
"""
return self._anon_label(self._proxy_key)
@property
@util.deprecated(
"1.4",
"The :attr:`_expression.ColumnElement.anon_label` attribute is now "
"private, and the public accessor is deprecated.",
)
def anon_label(self) -> str:
return self._anon_name_label
@property
@util.deprecated(
"1.4",
"The :attr:`_expression.ColumnElement.anon_key_label` attribute is "
"now private, and the public accessor is deprecated.",
)
def anon_key_label(self) -> str:
return self._anon_key_label
def _dedupe_anon_label_idx(self, idx: int) -> str:
"""label to apply to a column that is anon labeled, but repeated
in the SELECT, so that we have to make an "extra anon" label that
disambiguates it from the previous appearance.
these labels come out like "foo_bar_id__1" and have double underscores
in them.
"""
label = getattr(self, "name", None)
# current convention is that if the element doesn't have a
# ".name" (usually because it is not NamedColumn), we try to
# use a "table qualified" form for the "dedupe anon" label,
# based on the notion that a label like
# "CAST(casttest.v1 AS DECIMAL) AS casttest_v1__1" looks better than
# "CAST(casttest.v1 AS DECIMAL) AS anon__1"
if label is None:
return self._dedupe_anon_tq_label_idx(idx)
else:
return self._anon_label(label, add_hash=idx)
@util.memoized_property
def _anon_tq_label(self) -> _anonymous_label:
return self._anon_label(getattr(self, "_tq_label", None))
@util.memoized_property
def _anon_tq_key_label(self) -> _anonymous_label:
return self._anon_label(getattr(self, "_tq_key_label", None))
def _dedupe_anon_tq_label_idx(self, idx: int) -> _anonymous_label:
label = getattr(self, "_tq_label", None) or "anon"
return self._anon_label(label, add_hash=idx)
| ColumnElement |
python | ray-project__ray | python/ray/serve/tests/test_https_proxy.py | {
"start": 14961,
"end": 17031
} | class ____:
def test_https_with_custom_port(self, ssl_cert_and_key):
"""Test HTTPS on custom port."""
# Ensure Ray is shutdown before starting
try:
ray.shutdown()
except Exception:
pass
# Disable dashboard to prevent SSL conflicts and disable runtime env upload
ray.init(include_dashboard=False, runtime_env={"working_dir": None})
try:
serve.start(
http_options=HTTPOptions(
host="127.0.0.1",
port=8443,
ssl_keyfile=ssl_cert_and_key["key_path"],
ssl_certfile=ssl_cert_and_key["cert_path"],
)
)
@serve.deployment
def custom_port_handler():
return "custom port"
serve.run(custom_port_handler.bind())
response = requests.get(
"https://127.0.0.1:8443/custom_port_handler", verify=False
)
assert response.status_code == 200
assert response.text == "custom port"
finally:
try:
serve.shutdown()
except Exception:
pass
ray.shutdown()
def test_https_deployment_update(self, https_serve_instance):
"""Test deployment updates work correctly with HTTPS."""
@serve.deployment
def updatable():
return "version 1"
serve.run(updatable.bind())
# Test initial version
response = requests.get("https://localhost:8000/updatable", verify=False)
assert response.text == "version 1"
# Update deployment
@serve.deployment
def updatable():
return "version 2"
serve.run(updatable.bind())
# Test updated version
response = requests.get("https://localhost:8000/updatable", verify=False)
assert response.text == "version 2"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestHTTPSIntegration |
python | getsentry__sentry | tests/snuba/api/endpoints/test_group_event_details.py | {
"start": 185,
"end": 5196
} | class ____(APITestCase, SnubaTestCase, PerformanceIssueTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
project = self.create_project()
min_ago = before_now(minutes=1).isoformat()
two_min_ago = before_now(minutes=2).isoformat()
self.event1 = self.store_event(
data={
"event_id": "a" * 32,
"environment": "staging",
"fingerprint": ["group_1"],
"timestamp": two_min_ago,
},
project_id=project.id,
)
self.event2 = self.store_event(
data={
"event_id": "b" * 32,
"environment": "production",
"fingerprint": ["group_1"],
"timestamp": min_ago,
},
project_id=project.id,
)
self.group = Group.objects.first()
def test_snuba_no_environment_latest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/latest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == str(self.event2.event_id)
def test_snuba_no_environment_oldest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/oldest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == str(self.event1.event_id)
def test_snuba_no_environment_event_id(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/{self.event1.event_id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == str(self.event1.event_id)
def test_snuba_environment_latest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/latest/"
response = self.client.get(url, format="json", data={"environment": ["production"]})
assert response.status_code == 200
assert response.data["id"] == str(self.event2.event_id)
def test_snuba_environment_oldest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/oldest/"
response = self.client.get(url, format="json", data={"environment": ["production"]})
assert response.status_code == 200
assert response.data["id"] == str(self.event2.event_id)
def test_snuba_environment_event_id(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/{self.event2.event_id}/"
response = self.client.get(url, format="json", data={"environment": ["production"]})
assert response.status_code == 200
assert response.data["id"] == str(self.event2.event_id)
def test_simple_latest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/latest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["eventID"] == str(self.event2.event_id)
def test_simple_oldest(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/oldest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == str(self.event1.event_id)
def test_simple_event_id(self) -> None:
url = f"/api/0/issues/{self.group.id}/events/{self.event1.event_id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == str(self.event1.event_id)
def test_perf_issue_latest(self) -> None:
event = self.create_performance_issue()
assert event.group is not None
url = f"/api/0/issues/{event.group.id}/events/latest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["eventID"] == event.event_id
def test_perf_issue_oldest(self) -> None:
event = self.create_performance_issue()
assert event.group is not None
url = f"/api/0/issues/{event.group.id}/events/oldest/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["eventID"] == event.event_id
def test_perf_issue_event_id(self) -> None:
event = self.create_performance_issue()
assert event.group is not None
url = f"/api/0/issues/{event.group.id}/events/{event.event_id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["eventID"] == event.event_id
def test_invalid_query(self) -> None:
event = self.create_performance_issue()
assert event.group is not None
url = f"/api/0/issues/{event.group.id}/events/{event.event_id}/"
response = self.client.get(url, format="json", data={"query": "release.version:foobar"})
assert response.status_code == 400
| GroupEventDetailsTest |
python | dagster-io__dagster | python_modules/dagster/dagster/components/testing/test_cases.py | {
"start": 763,
"end": 3890
} | class ____(NamedTuple):
name: str
attributes: dict[str, Any]
assertion: Callable[[AssetSpec], bool]
key_modifier: Optional[Callable[[AssetKey], AssetKey]] = None
test_cases = [
TranslationTestCase(
name="group_name",
attributes={"group_name": "group"},
assertion=lambda asset_spec: asset_spec.group_name == "group",
),
TranslationTestCase(
name="owners",
attributes={"owners": ["team:analytics"]},
assertion=lambda asset_spec: asset_spec.owners == ["team:analytics"],
),
TranslationTestCase(
name="tags",
attributes={"tags": {"foo": "bar"}},
assertion=lambda asset_spec: asset_spec.tags.get("foo") == "bar",
),
TranslationTestCase(
name="kinds",
attributes={"kinds": ["snowflake", "dbt"]},
assertion=lambda asset_spec: "snowflake" in asset_spec.kinds and "dbt" in asset_spec.kinds,
),
TranslationTestCase(
name="tags-and-kinds",
attributes={"tags": {"foo": "bar"}, "kinds": ["snowflake", "dbt"]},
assertion=lambda asset_spec: "snowflake" in asset_spec.kinds
and "dbt" in asset_spec.kinds
and asset_spec.tags.get("foo") == "bar",
),
TranslationTestCase(
name="code-version",
attributes={"code_version": "1"},
assertion=lambda asset_spec: asset_spec.code_version == "1",
),
TranslationTestCase(
name="description",
attributes={"description": "some description"},
assertion=lambda asset_spec: asset_spec.description == "some description",
),
TranslationTestCase(
name="metadata",
attributes={"metadata": {"foo": "bar"}},
assertion=lambda asset_spec: asset_spec.metadata.get("foo") == "bar",
),
TranslationTestCase(
name="deps",
attributes={"deps": ["nonexistent"]},
assertion=lambda asset_spec: len(asset_spec.deps) == 1
and asset_spec.deps[0].asset_key == AssetKey("nonexistent"),
),
TranslationTestCase(
name="automation_condition",
attributes={"automation_condition": "{{ automation_condition.eager() }}"},
assertion=lambda asset_spec: asset_spec.automation_condition is not None,
),
TranslationTestCase(
name="key",
attributes={"key": "{{ spec.key.to_user_string() + '_suffix' }}"},
assertion=lambda asset_spec: asset_spec.key.path[-1].endswith("_suffix"),
key_modifier=lambda key: AssetKey(path=list(key.path[:-1]) + [f"{key.path[-1]}_suffix"]),
),
TranslationTestCase(
name="key_prefix",
attributes={"key_prefix": "cool_prefix"},
assertion=lambda asset_spec: asset_spec.key.has_prefix(["cool_prefix"]),
key_modifier=lambda key: AssetKey(path=["cool_prefix"] + list(key.path)),
),
TranslationTestCase(
name="partitions_defs",
attributes={"partitions_def": {"type": "static", "partition_keys": ["foo", "bar"]}},
assertion=lambda asset_spec: isinstance(
asset_spec.partitions_def, StaticPartitionsDefinition
),
),
]
| TranslationTestCase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI013.py | {
"start": 50,
"end": 101
} | class ____:
...
value: int
| OneAttributeClass2 |
python | getsentry__sentry | src/sentry/integrations/gitlab/integration.py | {
"start": 9245,
"end": 11825
} | class ____(forms.Form):
url = forms.CharField(
label=_("GitLab URL"),
help_text=_(
"The base URL for your GitLab instance, including the host and protocol. "
"Do not include the group path."
"<br>"
"If using gitlab.com, enter https://gitlab.com/"
),
widget=forms.TextInput(attrs={"placeholder": "https://gitlab.example.com"}),
)
group = forms.CharField(
label=_("GitLab Group Path"),
help_text=_(
"This can be found in the URL of your group's GitLab page."
"<br>"
"For example, if your group URL is "
"https://gitlab.com/my-group/my-subgroup, enter `my-group/my-subgroup`."
"<br>"
"If you are trying to integrate an entire self-managed GitLab instance, "
"leave this empty. Doing so will also allow you to select projects in "
"all group and user namespaces (such as users' personal repositories and forks)."
),
widget=forms.TextInput(attrs={"placeholder": _("my-group/my-subgroup")}),
required=False,
)
include_subgroups = forms.BooleanField(
label=_("Include Subgroups"),
help_text=_(
"Include projects in subgroups of the GitLab group."
"<br>"
"Not applicable when integrating an entire GitLab instance. "
"All groups are included for instance-level integrations."
),
widget=forms.CheckboxInput(),
required=False,
initial=False,
)
verify_ssl = forms.BooleanField(
label=_("Verify SSL"),
help_text=_(
"By default, we verify SSL certificates "
"when delivering payloads to your GitLab instance, "
"and request GitLab to verify SSL when it delivers "
"webhooks to Sentry."
),
widget=forms.CheckboxInput(),
required=False,
initial=True,
)
client_id = forms.CharField(
label=_("GitLab Application ID"),
widget=forms.TextInput(
attrs={
"placeholder": _("5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737")
}
),
)
client_secret = forms.CharField(
label=_("GitLab Application Secret"),
widget=forms.PasswordInput(attrs={"placeholder": _("***********************")}),
)
def clean_url(self):
"""Strip off trailing / as they cause invalid URLs downstream"""
return self.cleaned_data["url"].rstrip("/")
| InstallationForm |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 16063,
"end": 19522
} | class ____(PreTrainedModel):
config: CLIPConfig
base_model_prefix = "clip"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": CLIPEncoderLayer,
"attentions": CLIPAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPTextEmbeddings):
init.normal_(module.token_embedding.weight, mean=0.0, std=factor * 0.02)
init.normal_(module.position_embedding.weight, mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPVisionEmbeddings):
factor = self.config.initializer_factor
init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
init.normal_(module.q_proj.weight, std=in_proj_std)
init.normal_(module.k_proj.weight, std=in_proj_std)
init.normal_(module.v_proj.weight, std=in_proj_std)
init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
init.normal_(module.fc1.weight, std=fc_std)
init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, CLIPModel):
init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPVisionModelWithProjection):
init.normal_(
module.visual_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPTextModelWithProjection):
init.normal_(
module.text_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPForImageClassification):
init.normal_(
module.classifier.weight,
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
if isinstance(module, nn.Linear) and module.bias is not None:
init.zeros_(module.bias)
| CLIPPreTrainedModel |
python | doocs__leetcode | solution/1800-1899/1844.Replace All Digits with Characters/Solution.py | {
"start": 0,
"end": 194
} | class ____:
def replaceDigits(self, s: str) -> str:
s = list(s)
for i in range(1, len(s), 2):
s[i] = chr(ord(s[i - 1]) + int(s[i]))
return ''.join(s)
| Solution |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 162540,
"end": 166900
} | class ____:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_lcm_object(self):
self._test_lcm_inner(np.object_)
def test_gcd(self):
self._test_gcd_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_gcd_object(self):
self._test_gcd_inner(np.object_)
def _test_lcm_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.lcm(a, b), [60, 600])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.lcm(a, b), [60] * 4)
# reduce
a = np.array([3, 12, 20], dtype=dtype)
assert_equal(np.lcm.reduce([3, 12, 20]), 60)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
def _test_gcd_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.gcd(a, b), [4, 40])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.gcd(a, b), [4] * 4)
# reduce
a = np.array([15, 25, 35], dtype=dtype)
assert_equal(np.gcd.reduce(a), 5)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
def test_lcm_overflow(self):
# verify that we don't overflow when a*b does overflow
big = np.int32(np.iinfo(np.int32).max // 11)
a = 2 * big
b = 5 * big
assert_equal(np.lcm(a, b), 10 * big)
def test_gcd_overflow(self):
for dtype in (np.int32, np.int64):
# verify that we don't overflow when taking abs(x)
# not relevant for lcm, where the result is unrepresentable anyway
a = dtype(np.iinfo(dtype).min) # negative power of two
q = -(a // 4)
assert_equal(np.gcd(a, q * 3), q)
assert_equal(np.gcd(a, -q * 3), q)
def test_decimal(self):
from decimal import Decimal
a = np.array([1, 1, -1, -1]) * Decimal('0.20')
b = np.array([1, -1, 1, -1]) * Decimal('0.12')
assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')])
assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')])
def test_float(self):
# not well-defined on float due to rounding errors
assert_raises(TypeError, np.gcd, 0.3, 0.4)
assert_raises(TypeError, np.lcm, 0.3, 0.4)
def test_huge_integers(self):
# Converting to an array first is a bit different as it means we
# have an explicit object dtype:
assert_equal(np.array(2**200), 2**200)
# Special promotion rules should ensure that this also works for
# two Python integers (even if slow).
# (We do this for comparisons, as the result is always bool and
# we also special case array comparisons with Python integers)
np.equal(2**200, 2**200)
# But, we cannot do this when it would affect the result dtype:
with pytest.raises(OverflowError):
np.gcd(2**100, 3**100)
# Asking for `object` explicitly is fine, though:
assert np.gcd(2**100, 3**100, dtype=object) == 1
# As of now, the below work, because it is using arrays (which
# will be object arrays)
a = np.array(2**100 * 3**5)
b = np.array([2**100 * 5**7, 2**50 * 3**10])
assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
def test_inf_and_nan(self):
inf = np.array([np.inf], dtype=np.object_)
assert_raises(ValueError, np.gcd, inf, 1)
assert_raises(ValueError, np.gcd, 1, inf)
assert_raises(ValueError, np.gcd, np.nan, inf)
assert_raises(TypeError, np.gcd, 4, float(np.inf))
| TestRationalFunctions |
python | streamlit__streamlit | lib/tests/streamlit/url_util_test.py | {
"start": 2394,
"end": 2895
} | class ____(unittest.TestCase):
def test_github_url_is_replaced(self):
for target, processed in GITHUB_URLS:
assert url_util.process_gitblob_url(target) == processed
def test_gist_url_is_replaced(self):
for target, processed in GIST_URLS:
assert url_util.process_gitblob_url(target) == processed
def test_nonmatching_url_is_not_replaced(self):
for url in INVALID_URLS:
assert url == url_util.process_gitblob_url(url)
| GitHubUrlTest |
python | huggingface__transformers | src/transformers/models/mpnet/modeling_mpnet.py | {
"start": 1691,
"end": 3974
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.padding_idx = 1
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs):
if position_ids is None:
if input_ids is not None:
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
| MPNetEmbeddings |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 2258,
"end": 2335
} | class ____(ASTNode):
test: ASTNode
msg: Optional[Str]
@dataclass
| Assert |
python | getsentry__sentry | src/sentry/monitors/endpoints/base.py | {
"start": 3204,
"end": 4412
} | class ____(ProjectEndpoint):
"""
Base endpoint class for monitors which will look up the monitor and
convert it to a Monitor object.
"""
permission_classes: tuple[type[BasePermission], ...] = (ProjectAlertRulePermission,)
def convert_args(
self,
request: Request,
monitor_id_or_slug: str,
*args,
**kwargs,
):
args, kwargs = super().convert_args(request, *args, **kwargs)
# Try lookup by slug
try:
kwargs["monitor"] = Monitor.objects.get(
project_id=kwargs["project"].id, slug=monitor_id_or_slug
)
return args, kwargs
except Monitor.DoesNotExist:
pass
# Try lookup by GUID if the monitor_id_or_slug looks like a UUID
try:
UUID(monitor_id_or_slug, version=4)
kwargs["monitor"] = Monitor.objects.get(
project_id=kwargs["project"].id, guid=monitor_id_or_slug
)
return args, kwargs
except (ValueError, Monitor.DoesNotExist):
# ValueError when the provided ID isn't a UUID
pass
raise ResourceDoesNotExist
| ProjectMonitorEndpoint |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 314147,
"end": 319154
} | class ____(Request):
"""
Adds a task into a queue.
Fails if task state is not 'created'.
Fails if the following parameters in the task were not filled:
* execution.script.repository
* execution.script.entrypoint
:param queue: Queue id. If not provided and no queue name is passed then task
is added to the default queue.
:type queue: str
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param queue_name: The name of the queue. If the queue does not exist then it
is auto-created. Cannot be used together with the queue id
:type queue_name: str
:param verify_watched_queue: If passed then check wheter there are any workers
watiching the queue
:type verify_watched_queue: bool
"""
_service = "tasks"
_action = "enqueue"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"queue": {
"description": (
"Queue id. If not provided and no queue name is passed then task is added to the default queue."
),
"type": ["string", "null"],
},
"queue_name": {
"description": (
"The name of the queue. If the queue does not exist then it is auto-created. Cannot be used "
"together with the queue id"
),
"type": "string",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
"verify_watched_queue": {
"default": False,
"description": "If passed then check wheter there are any workers watiching the queue",
"type": "boolean",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
queue=None,
status_reason=None,
status_message=None,
queue_name=None,
verify_watched_queue=False,
**kwargs
):
super(EnqueueRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.status_reason = status_reason
self.status_message = status_message
self.queue_name = queue_name
self.verify_watched_queue = verify_watched_queue
@schema_property("queue")
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("queue_name")
def queue_name(self):
return self._property_queue_name
@queue_name.setter
def queue_name(self, value):
if value is None:
self._property_queue_name = None
return
self.assert_isinstance(value, "queue_name", six.string_types)
self._property_queue_name = value
@schema_property("verify_watched_queue")
def verify_watched_queue(self):
return self._property_verify_watched_queue
@verify_watched_queue.setter
def verify_watched_queue(self, value):
if value is None:
self._property_verify_watched_queue = None
return
self.assert_isinstance(value, "verify_watched_queue", (bool,))
self._property_verify_watched_queue = value
| EnqueueRequest |
python | marshmallow-code__marshmallow | tests/test_schema.py | {
"start": 45920,
"end": 49150
} | class ____(Schema):
name = fields.String()
email = fields.Email()
age = fields.Integer()
def handle_error(self, error, data, *args, **kwargs):
raise CustomError("Something bad happened")
def test_load_with_custom_error_handler(self):
in_data = {"email": "invalid"}
class MySchema3(Schema):
email = fields.Email()
def handle_error(self, error, data, **kwargs):
assert type(error) is ValidationError
assert "email" in error.messages
assert isinstance(error.messages, dict)
assert list(error.messages.keys()) == ["email"]
assert data == in_data
raise CustomError("Something bad happened")
with pytest.raises(CustomError):
MySchema3().load(in_data)
def test_load_with_custom_error_handler_and_partially_valid_data(self):
in_data = {"email": "invalid", "url": "http://valid.com"}
class MySchema(Schema):
email = fields.Email()
url = fields.URL()
def handle_error(self, error, data, **kwargs):
assert type(error) is ValidationError
assert "email" in error.messages
assert isinstance(error.messages, dict)
assert list(error.messages.keys()) == ["email"]
assert data == in_data
raise CustomError("Something bad happened")
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_custom_error_handler_with_validates_decorator(self):
in_data = {"num": -1}
class MySchema(Schema):
num = fields.Int()
@validates("num")
def validate_num(self, value):
if value < 0:
raise ValidationError("Must be greater than 0.")
def handle_error(self, error, data, **kwargs):
assert type(error) is ValidationError
assert "num" in error.messages
assert isinstance(error.messages, dict)
assert list(error.messages.keys()) == ["num"]
assert data == in_data
raise CustomError("Something bad happened")
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_custom_error_handler_with_validates_schema_decorator(self):
in_data = {"num": -1}
class MySchema(Schema):
num = fields.Int()
@validates_schema
def validates_schema(self, data, **kwargs):
raise ValidationError("Invalid schema!")
def handle_error(self, error, data, **kwargs):
assert type(error) is ValidationError
assert isinstance(error.messages, dict)
assert list(error.messages.keys()) == ["_schema"]
assert data == in_data
raise CustomError("Something bad happened")
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_validate_with_custom_error_handler(self):
with pytest.raises(CustomError):
MySchema().validate({"age": "notvalid", "email": "invalid"})
| MySchema |
python | vyperlang__vyper | vyper/codegen/stmt.py | {
"start": 742,
"end": 14126
} | class ____:
def __init__(self, node: vy_ast.VyperNode, context: Context) -> None:
self.stmt = node
self.context = context
fn_name = f"parse_{type(node).__name__}"
with tag_exceptions(node, fallback_exception_type=CodegenPanic, note=fn_name):
fn = getattr(self, fn_name)
with context.internal_memory_scope():
self.ir_node = fn()
assert isinstance(self.ir_node, IRnode), self.ir_node
self.ir_node.annotation = self.stmt.get("node_source_code")
self.ir_node.ast_source = self.stmt
def parse_Expr(self):
return Expr(self.stmt.value, self.context, is_stmt=True).ir_node
def parse_Pass(self):
return IRnode.from_list("pass")
def parse_Name(self):
if self.stmt.id == "vdb":
return IRnode("debugger")
else:
raise StructureException(f"Unsupported statement type: {type(self.stmt)}", self.stmt)
def parse_AnnAssign(self):
ltyp = self.stmt.target._metadata["type"]
varname = self.stmt.target.id
lhs = self.context.new_variable(varname, ltyp)
assert self.stmt.value is not None
rhs = Expr(self.stmt.value, self.context).ir_node
return make_setter(lhs, rhs)
def parse_Assign(self):
# Assignment (e.g. x[4] = y)
src = Expr(self.stmt.value, self.context).ir_node
dst = self._get_target(self.stmt.target)
ret = ["seq"]
if potential_overlap(dst, src):
# there is overlap between the lhs and rhs, and the type is
# complex - i.e., it spans multiple words. for safety, we
# copy to a temporary buffer before copying to the destination.
tmp = self.context.new_internal_variable(src.typ)
ret.append(make_setter(tmp, src))
src = tmp
ret.append(make_setter(dst, src))
return IRnode.from_list(ret)
def parse_If(self):
with self.context.block_scope():
test_expr = Expr.parse_value_expr(self.stmt.test, self.context)
body = ["if", test_expr, parse_body(self.stmt.body, self.context)]
if self.stmt.orelse:
with self.context.block_scope():
body.extend([parse_body(self.stmt.orelse, self.context)])
return IRnode.from_list(body)
def parse_Log(self):
event = self.stmt._metadata["type"]
if len(self.stmt.value.keywords) > 0:
# keyword arguments
to_compile = [arg.value for arg in self.stmt.value.keywords]
else:
# positional arguments
to_compile = self.stmt.value.args
args = [Expr(arg, self.context).ir_node for arg in to_compile]
topic_ir = []
data_ir = []
for arg, is_indexed in zip(args, event.indexed):
if is_indexed:
topic_ir.append(arg)
else:
data_ir.append(arg)
return events.ir_node_for_log(self.stmt, event, topic_ir, data_ir, self.context)
def _assert_reason(self, test_expr, msg):
# from parse_Raise: None passed as the assert condition
is_raise = test_expr is None
if isinstance(msg, vy_ast.Name) and msg.id == "UNREACHABLE":
if is_raise:
return IRnode.from_list(["invalid"], error_msg="raise unreachable")
else:
return IRnode.from_list(
["assert_unreachable", test_expr], error_msg="assert unreachable"
)
# set constant so that revert reason str is well behaved
try:
tmp = self.context.constancy
self.context.constancy = Constancy.Constant
msg_ir = Expr(msg, self.context).ir_node
finally:
self.context.constancy = tmp
msg_ir = wrap_value_for_external_return(msg_ir)
bufsz = 64 + msg_ir.typ.memory_bytes_required
buf = self.context.new_internal_variable(get_type_for_exact_size(bufsz))
# offset of bytes in (bytes,)
method_id = util.method_id_int("Error(string)")
# abi encode method_id + bytestring to `buf+32`, then
# write method_id to `buf` and get out of here
payload_buf = add_ofst(buf, 32)
bufsz -= 32 # reduce buffer by size of `method_id` slot
encoded_length = abi_encode(payload_buf, msg_ir, self.context, bufsz, returns_len=True)
with encoded_length.cache_when_complex("encoded_len") as (b1, encoded_length):
revert_seq = [
"seq",
["mstore", buf, method_id],
["revert", add_ofst(buf, 28), ["add", 4, encoded_length]],
]
revert_seq = b1.resolve(revert_seq)
if is_raise:
ir_node = revert_seq
else:
ir_node = ["if", ["iszero", test_expr], revert_seq]
return IRnode.from_list(ir_node, error_msg="user revert with reason")
def parse_Assert(self):
test_expr = Expr.parse_value_expr(self.stmt.test, self.context)
if self.stmt.msg:
return self._assert_reason(test_expr, self.stmt.msg)
else:
return IRnode.from_list(["assert", test_expr], error_msg="user assert")
def parse_Raise(self):
if self.stmt.exc:
return self._assert_reason(None, self.stmt.exc)
else:
return IRnode.from_list(["revert", 0, 0], error_msg="user raise")
def parse_For(self):
with self.context.block_scope():
if self.stmt.get("iter.func.id") == "range":
return self._parse_For_range()
else:
return self._parse_For_list()
def _parse_For_range(self):
assert "type" in self.stmt.target.target._metadata
target_type = self.stmt.target.target._metadata["type"]
range_call: vy_ast.Call = self.stmt.iter
assert isinstance(range_call, vy_ast.Call)
with self.context.range_scope():
args = [Expr.parse_value_expr(arg, self.context) for arg in range_call.args]
if len(args) == 1:
start = IRnode.from_list(0, typ=target_type)
end = args[0]
elif len(args) == 2:
start, end = args
else: # pragma: nocover
raise TypeCheckFailure("unreachable")
kwargs = {
s.arg: Expr.parse_value_expr(s.value, self.context) for s in range_call.keywords
}
# sanity check that the following `end - start` is a valid operation
assert start.typ == end.typ == target_type
with start.cache_when_complex("start") as (b1, start):
if "bound" in kwargs:
with end.cache_when_complex("end") as (b2, end):
# note: the check for rounds<=rounds_bound happens in asm
# generation for `repeat`.
clamped_start = clamp_le(start, end, target_type.is_signed)
rounds = b2.resolve(IRnode.from_list(["sub", end, clamped_start]))
rounds_bound = kwargs.pop("bound").int_value()
else:
rounds = end.int_value() - start.int_value()
rounds_bound = rounds
assert len(kwargs) == 0 # sanity check stray keywords
if rounds_bound < 1: # pragma: nocover
raise TypeCheckFailure("unreachable: unchecked 0 bound")
varname = self.stmt.target.target.id
i = IRnode.from_list(self.context.fresh_varname("range_ix"), typ=target_type)
iptr = self.context.new_variable(varname, target_type)
self.context.forvars[varname] = True
loop_body = ["seq"]
# store the current value of i so it is accessible to userland
loop_body.append(["mstore", iptr, i])
loop_body.append(parse_body(self.stmt.body, self.context))
del self.context.forvars[varname]
# NOTE: codegen for `repeat` inserts an assertion that
# (gt rounds_bound rounds). note this also covers the case where
# rounds < 0.
# if we ever want to remove that, we need to manually add the assertion
# where it makes sense.
loop = ["repeat", i, start, rounds, rounds_bound, loop_body]
return b1.resolve(IRnode.from_list(loop, error_msg="range() bounds check"))
def _parse_For_list(self):
with self.context.range_scope():
iter_list = Expr(self.stmt.iter, self.context).ir_node
target_type = self.stmt.target.target._metadata["type"]
assert target_type.compare_type(iter_list.typ.value_type)
# user-supplied name for loop variable
varname = self.stmt.target.target.id
loop_var = self.context.new_variable(varname, target_type)
i = IRnode.from_list(self.context.fresh_varname("for_list_ix"), typ=UINT256_T)
self.context.forvars[varname] = True
ret = ["seq"]
# if it's a list literal, force it to memory first
if not iter_list.is_pointer:
tmp_list = self.context.new_internal_variable(iter_list.typ)
ret.append(make_setter(tmp_list, iter_list))
iter_list = tmp_list
with iter_list.cache_when_complex("list_iter") as (b1, iter_list):
# set up the loop variable
e = get_element_ptr(iter_list, i, array_bounds_check=False)
body = ["seq", make_setter(loop_var, e), parse_body(self.stmt.body, self.context)]
repeat_bound = iter_list.typ.count
if isinstance(iter_list.typ, DArrayT):
array_len = get_dyn_array_count(iter_list)
else:
array_len = repeat_bound
ret.append(["repeat", i, 0, array_len, repeat_bound, body])
del self.context.forvars[varname]
return b1.resolve(IRnode.from_list(ret))
def parse_AugAssign(self):
target = self._get_target(self.stmt.target)
right = Expr.parse_value_expr(self.stmt.value, self.context)
if not target.typ._is_prim_word:
# because of this check, we do not need to check for
# make_setter references lhs<->rhs as in parse_Assign -
# single word load/stores are atomic.
raise TypeCheckFailure("unreachable")
for var in target.referenced_variables:
if var.typ._is_prim_word:
continue
# oob - GHSA-4w26-8p97-f4jp
if var in right.variable_writes or (
var.is_state_variable() and right.contains_writeable_call
):
raise CodegenPanic("unreachable")
with target.cache_when_complex("_loc") as (b, target):
left = IRnode.from_list(LOAD(target), typ=target.typ)
new_val = Expr.handle_binop(self.stmt.op, left, right, self.context)
return b.resolve(STORE(target, new_val))
def parse_Continue(self):
return IRnode.from_list("continue")
def parse_Break(self):
return IRnode.from_list("break")
def parse_Return(self):
ir_val = None
if self.stmt.value is not None:
ir_val = Expr(self.stmt.value, self.context).ir_node
return make_return_stmt(ir_val, self.stmt, self.context)
def _get_target(self, target):
_dbg_expr = target
if isinstance(target, vy_ast.Name) and target.id in self.context.forvars: # pragma: nocover
raise TypeCheckFailure(f"Failed constancy check\n{_dbg_expr}")
if isinstance(target, vy_ast.Tuple):
target = Expr(target, self.context).ir_node
items = target.args
if any(not writeable(self.context, item) for item in items): # pragma: nocover
raise TypeCheckFailure(f"Failed constancy check\n{_dbg_expr}")
return target
target = Expr.parse_pointer_expr(target, self.context)
if not writeable(self.context, target): # pragma: nocover
raise TypeCheckFailure(f"Failed constancy check\n{_dbg_expr}")
return target
# Parse a statement (usually one line of code but not always)
def parse_stmt(stmt, context):
return Stmt(stmt, context).ir_node
# check if a function body is "terminated"
# a function is terminated if it ends with a return stmt, OR,
# it ends with an if/else and both branches are terminated.
# (if not, we need to insert a terminator so that the IR is well-formed)
def _is_terminated(code):
last_stmt = code[-1]
if last_stmt.is_terminus:
return True
if isinstance(last_stmt, vy_ast.If):
if last_stmt.orelse:
return _is_terminated(last_stmt.body) and _is_terminated(last_stmt.orelse)
return False
# codegen a list of statements
def parse_body(code, context, ensure_terminated=False):
ir_node = ["seq"]
for stmt in code:
ir = parse_stmt(stmt, context)
ir_node.append(ir)
# force using the return routine / exit_to cleanup for end of function
if ensure_terminated and context.return_type is None and not _is_terminated(code):
ir_node.append(parse_stmt(vy_ast.Return(value=None), context))
# force zerovalent, even last statement
ir_node.append("pass") # CMC 2022-01-16 is this necessary?
return IRnode.from_list(ir_node)
| Stmt |
python | ray-project__ray | python/ray/dashboard/utils.py | {
"start": 13029,
"end": 14664
} | class ____(dict):
"""A dict with attribute-access."""
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
"""
https://docs.python.org/3/library/json.html?highlight=json#json.JSONEncoder
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
"""
_json_compatible_types = {dict, list, tuple, str, int, float, bool, type(None), bytes}
def is_immutable(self):
raise TypeError("%r objects are immutable" % self.__class__.__name__)
def make_immutable(value, strict=True):
value_type = type(value)
if value_type is dict:
return ImmutableDict(value)
if value_type is list:
return ImmutableList(value)
if strict:
if value_type not in _json_compatible_types:
raise TypeError("Type {} can't be immutable.".format(value_type))
return value
| Bunch |
python | facebook__pyre-check | client/language_server/tests/protocol_test.py | {
"start": 2849,
"end": 6589
} | class ____(testslide.TestCase):
@setup.async_test
async def test_read_json_rpc(self) -> None:
async def assert_parses(input: str, expected: json_rpc.Request) -> None:
actual = await read_json_rpc(create_memory_text_reader(input))
self.assertEqual(actual, expected)
async def assert_not_parsed(
input: str, exception_type: Type[Exception] = json_rpc.ParseError
) -> None:
with self.assertRaises(exception_type):
await read_json_rpc(create_memory_text_reader(input))
await assert_not_parsed("", exception_type=ReadChannelClosedError)
await assert_not_parsed("derp")
await assert_not_parsed("Invalid-Header: \r\n\r\n{}")
await assert_not_parsed("Not-Content-Length: 42\r\n\r\n{}")
await assert_not_parsed("Content-Length: derp\r\n\r\n{}")
await assert_not_parsed(
'Content-Length: 4\r\n\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}'
)
await assert_parses(
'Content-Length: 27\r\n\r\n{"jsonrpc": "2.0", "id": 0}'
'Content-Length: 44\r\n\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}',
expected=json_rpc.Request(id=0, method="foo"),
)
await assert_parses(
'Content-Length: 44\r\n\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}',
expected=json_rpc.Request(id=0, method="foo"),
)
await assert_parses(
'CONTENT-LENGTH: 44\r\n\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}',
expected=json_rpc.Request(id=0, method="foo"),
)
await assert_parses(
(
"Content-Length: 44\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n"
'\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}'
),
expected=json_rpc.Request(id=0, method="foo"),
)
await assert_parses(
(
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n"
"Content-Length: 44\r\n"
'\r\n{"jsonrpc": "2.0", "id": 0, "method": "foo"}'
),
expected=json_rpc.Request(id=0, method="foo"),
)
@setup.async_test
async def test_write_lsp(self) -> None:
async def assert_write(response: json_rpc.Response, expected: str) -> None:
bytes_writer = MemoryBytesWriter()
await write_json_rpc(AsyncTextWriter(bytes_writer), response)
actual = bytes_writer.items()[0].decode("utf-8")
self.assertEqual(actual, expected)
await assert_write(
json_rpc.SuccessResponse(id=0, result=42),
expected=(
"Content-Length: 41\r\n\r\n"
+ json.dumps({"jsonrpc": "2.0", "id": 0, "result": 42})
),
)
await assert_write(
json_rpc.ErrorResponse(id=0, code=42, message="derp"),
expected=(
"Content-Length: 69\r\n\r\n"
+ json.dumps(
{
"jsonrpc": "2.0",
"id": 0,
"error": {"code": 42, "message": "derp"},
}
)
),
)
@setup.async_test
async def test_write_json_rpc_ignore_connection_error(self) -> None:
# This invocation should not raise
await write_json_rpc_ignore_connection_error(
AsyncTextWriter(ExceptionRaisingBytesWriter(ConnectionResetError())),
json_rpc.ErrorResponse(
id=None,
code=42,
message="dummy message",
),
)
| LSPInputOutputTest |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 21931,
"end": 28514
} | class ____(CpmAntPreTrainedModel):
def __init__(self, config: CpmAntConfig):
super().__init__(config)
self.encoder = CpmAntEncoder(config)
self.segment_embedding = nn.Embedding(config.segment_types, config.hidden_size)
self.input_embedding = nn.Embedding(
config.vocab_size + config.prompt_types * config.prompt_length, config.hidden_size
)
self.position_bias = CpmAntSegmentPositionEmbedding(config)
self.prompt_length = config.prompt_length
self.vocab_size = config.vocab_size
self.post_init()
def get_input_embeddings(self):
return self.input_embedding
def set_input_embeddings(self, embeddings, **kwargs):
self.input_embedding = embeddings
def _prepare_attention_mask(self, input_ids, span, context, length):
batch = input_ids.size(0)
seqlen = input_ids.size(1)
device = input_ids.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(seqlen, device=device).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
# mask for left padding
mask_1d = (
torch.tensor(list(range(seqlen - self.prompt_length))[::-1], device=device)[None, :].repeat(batch, 1)
< length[:, None]
)
mask_1d = torch.cat((torch.ones(batch, self.prompt_length, device=device).bool(), mask_1d), dim=1)
attention_mask = mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
return attention_mask
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]:
r"""
input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
# add prompts ahead
if input_ids.dtype != torch.int32:
input_ids = input_ids.to(torch.int32)
dtype, device = input_ids.dtype, input_ids.device
segment = torch.where(input_ids != 0, 2, 0).to(dtype=dtype, device=device)
length = (segment != 0).sum(-1).to(dtype=dtype, device=device)
input_ids = torch.cat(
(
torch.arange(
self.prompt_length * 2 + self.vocab_size,
self.prompt_length * 3 + self.vocab_size,
dtype=dtype,
device=device,
).repeat(input_ids.size(0), 1),
input_ids,
),
dim=1,
)
batch, seq_length = input_ids.size()
segment = torch.cat((torch.zeros(batch, self.prompt_length, dtype=dtype, device=device), segment), dim=1)
context = torch.full((batch, seq_length), 1, dtype=dtype, device=device)
position = torch.arange(seq_length, dtype=dtype, device=device).repeat(batch, 1)
span = torch.full((batch, seq_length), 0, dtype=dtype, device=device)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
past_length = past_key_values.get_seq_length() if past_key_values is not None else 0
input_ids = input_ids.contiguous()
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
if past_length != 0:
segment_states = segment_states[:, -1:, :]
hidden_states = hidden_states + segment_states
attention_mask = self._prepare_attention_mask(input_ids, span, context, length)
position_bias = self.position_bias(position, position, segment, segment)
attention_mask = attention_mask[:, past_length:, :]
position_bias = position_bias[:, :, past_length:, :]
hidden_states = hidden_states[:, past_length:, :]
hidden_states, all_hidden_states, all_attentions = self.encoder(
hidden_states,
attention_mask,
position_bias,
output_attentions,
output_hidden_states,
past_key_values,
use_cache,
cache_position,
)
if past_length == 0:
hidden_states = hidden_states[:, self.prompt_length :, :]
# drop the prompt
if all_attentions is not None:
new_attentions = ()
for attention in all_attentions:
new_attentions += (attention[:, :, self.prompt_length :, self.prompt_length :],)
all_attentions = new_attentions
if all_hidden_states is not None:
new_hidden_states = ()
for hidden_state in all_hidden_states:
new_hidden_states += (hidden_state[:, self.prompt_length :, :],)
all_hidden_states = new_hidden_states
if not return_dict:
return tuple(
v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@auto_docstring(
custom_intro="""
The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings).
"""
)
| CpmAntModel |
python | python-visualization__folium | folium/features.py | {
"start": 65869,
"end": 67559
} | class ____(MacroElement):
"""
When one clicks on a Map that contains a ClickForMarker,
a Marker is created at the pointer's position.
Parameters
----------
popup: str or IFrame or Html, default None
Text to display in the markers' popups.
This can also be an Element like IFrame or Html.
If None, the popups will display the marker's latitude and longitude.
You can include the latitude and longitude with ${lat} and ${lng}.
Examples
--------
>>> ClickForMarker("<b>Lat:</b> ${lat}<br /><b>Lon:</b> ${lng}")
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
function newMarker(e){
var new_mark = L.marker().setLatLng(e.latlng).addTo({{this._parent.get_name()}});
new_mark.dragging.enable();
new_mark.on('dblclick', function(e){ {{this._parent.get_name()}}.removeLayer(e.target)})
var lat = e.latlng.lat.toFixed(4),
lng = e.latlng.lng.toFixed(4);
new_mark.bindPopup({{ this.popup }});
};
{{this._parent.get_name()}}.on('click', newMarker);
{% endmacro %}
"""
) # noqa
def __init__(self, popup: Union[IFrame, Html, str, None] = None):
super().__init__()
self._name = "ClickForMarker"
if isinstance(popup, Element):
popup = popup.render()
if popup:
self.popup = "`" + escape_backticks(popup) + "`" # type: ignore
else:
self.popup = '"Latitude: " + lat + "<br>Longitude: " + lng '
| ClickForMarker |
python | getsentry__sentry | src/sentry/services/eventstore/base.py | {
"start": 4452,
"end": 13273
} | class ____(Service):
__all__ = (
"minimal_columns",
"create_event",
"get_event_by_id",
"get_events",
"get_events_snql",
"get_unfetched_events",
"get_adjacent_event_ids",
"get_adjacent_event_ids_snql",
"bind_nodes",
"get_unfetched_transactions",
)
# The minimal list of columns we need to get from snuba to bootstrap an
# event. If the client is planning on loading the entire event body from
# nodestore anyway, we may as well only fetch the minimum from snuba to
# avoid duplicated work.
minimal_columns = {
Dataset.Events: [
Columns.EVENT_ID,
Columns.GROUP_ID,
Columns.PROJECT_ID,
Columns.TIMESTAMP,
Columns.TIMESTAMP_MS,
],
Dataset.Transactions: [
Columns.EVENT_ID,
Columns.GROUP_IDS,
Columns.PROJECT_ID,
Columns.TIMESTAMP,
],
Dataset.IssuePlatform: [
Columns.EVENT_ID,
Columns.GROUP_ID,
Columns.PROJECT_ID,
Columns.TIMESTAMP,
Columns.OCCURRENCE_ID,
],
}
def get_events(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = 100,
offset: int = 0,
referrer: str = "eventstore.get_events",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Fetches a list of events given a set of criteria.
Searches for error events, including security and default messages, but not for
transaction events. Returns an empty list if no events match the filter.
Arguments:
snuba_filter (Filter): Filter
orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id']
limit (int): Query limit - default 100
offset (int): Query offset - default 0
referrer (string): Referrer - default "eventstore.get_events"
"""
raise NotImplementedError
def get_events_snql(
self,
organization_id: int,
group_id: int,
start: datetime | None,
end: datetime | None,
conditions: Sequence[Condition],
orderby: Sequence[str],
limit: int = 100,
inner_limit: int | None = None,
offset: int = 0,
referrer: str = "eventstore.get_events_snql",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
raise NotImplementedError
def get_unfetched_events(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = 100,
offset: int = 0,
referrer: str = "eventstore.get_unfetched_events",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Same as get_events but returns events without their node datas loaded.
Only the event ID, projectID, groupID and timestamp field will be present without
an additional fetch to nodestore.
Used for fetching large volumes of events that do not need data loaded
from nodestore. Currently this is just used for event data deletions where
we just need the event IDs in order to process the deletions.
Arguments:
snuba_filter (Filter): Filter
orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id']
limit (int): Query limit - default 100
offset (int): Query offset - default 0
referrer (string): Referrer - default "eventstore.get_unfetched_events"
"""
raise NotImplementedError
@overload
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: Literal[True],
) -> Event | None: ...
@overload
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: bool = False,
) -> Event | GroupEvent | None: ...
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: bool = False,
) -> Event | GroupEvent | None:
"""
Gets a single event of any event type given a project_id and event_id.
Returns None if an event cannot be found.
Arguments:
project_id (int): Project ID
event_id (str): Event ID
group_id (Optional[int]): If the group ID for this event is already known, pass
it here to save one Snuba query.
"""
raise NotImplementedError
def get_adjacent_event_ids_snql(
self,
organization_id: int,
project_id: int,
group_id: int | None,
environments: Sequence[str],
event: Event | GroupEvent,
start: datetime | None = None,
end: datetime | None = None,
conditions: list[Any] | None = None,
) -> list[tuple[str, str] | None]:
raise NotImplementedError
def get_adjacent_event_ids(
self, event: Event | GroupEvent, filter: Filter
) -> tuple[tuple[str, str] | None, tuple[str, str] | None]:
"""
Gets the previous and next event IDs given a current event and some conditions/filters.
Returns a tuple of (project_id, event_id) for (prev_ids, next_ids)
Arguments:
event (Event): Event object
snuba_filter (Filter): Filter
"""
raise NotImplementedError
def create_event(
self,
*,
project_id: int,
event_id: str | None = None,
group_id: int | None = None,
data: Mapping[str, Any] | None = None,
) -> Event:
"""
Returns an Event from processed data
"""
return Event(
project_id=project_id,
event_id=event_id or str(uuid.uuid4()),
group_id=group_id,
data=data,
)
def bind_nodes(self, object_list: Sequence[Event]) -> None:
"""
For a list of Event objects, and a property name where we might find an
(unfetched) NodeData on those objects, fetch all the data blobs for
those NodeDatas with a single multi-get command to nodestore, and bind
the returned blobs to the NodeDatas.
It's not necessary to bind a single Event object since data will be lazily
fetched on any attempt to access a property.
"""
sentry_sdk.set_tag("eventstore.backend", "nodestore")
with sentry_sdk.start_span(op="eventstore.base.bind_nodes"):
object_node_list = [(i, i.data) for i in object_list if i.data.id]
# Remove duplicates from the list of nodes to be fetched
node_ids = list({n.id for _, n in object_node_list})
if not node_ids:
return
node_results = nodestore.backend.get_multi(node_ids)
for item, node in object_node_list:
data = node_results.get(node.id) or {}
node.bind_data(data, ref=node.get_ref(item))
def get_unfetched_transactions(
self,
snuba_filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = 100,
offset: int = 0,
referrer: str = "eventstore.get_unfetched_transactions",
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Same as get_unfetched_events but returns transactions.
Only the event ID, projectID and timestamp field will be present without
an additional fetch to nodestore.
Used for fetching large volumes of transactions that do not need data
loaded from nodestore. Currently this is just used for transaction
data deletions where we just need the transactions IDs in order to
process the deletions.
Arguments:
snuba_filter (Filter): Filter
orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id']
limit (int): Query limit - default 100
offset (int): Query offset - default 0
referrer (string): Referrer - default "eventstore.get_unfetched_transactions"
"""
raise NotImplementedError
| EventStorage |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 6886,
"end": 7849
} | class ____(Regression):
"""Also referred to as Tikhonov regularization. Linear regression model with a regularization factor.
Model that tries to balance the fit of the model with respect to the training data and the complexity
of the model. A large regularization factor with decreases the variance of the model.
Parameters:
-----------
reg_factor: float
The factor that will determine the amount of regularization and feature
shrinkage.
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, reg_factor, n_iterations=1000, learning_rate=0.001):
self.regularization = l2_regularization(alpha=reg_factor)
super(RidgeRegression, self).__init__(n_iterations,
learning_rate)
| RidgeRegression |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 251531,
"end": 254114
} | class ____(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
def hp_space(trial):
return {
"method": "random",
"metric": {},
"parameters": {
"a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"b": {"distribution": "int_uniform", "min": 1, "max": 6},
},
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config).to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
)
sweep_kwargs = {
"direction": "minimize",
"hp_space": hp_space,
"backend": "wandb",
"n_trials": 4,
}
best_run = trainer.hyperparameter_search(**sweep_kwargs)
self.assertIsNotNone(best_run.run_id)
self.assertIsNotNone(best_run.run_summary)
hp_keys = set(best_run.hyperparameters.keys())
self.assertSetEqual(hp_keys, {"a", "b", "assignments", "metric"})
# pretend restarting the process purged the environ
import os
del os.environ["WANDB_ENTITY"]
del os.environ["WANDB_PROJECT"]
sweep_kwargs["sweep_id"] = best_run.run_summary
updated_best_run = trainer.hyperparameter_search(**sweep_kwargs)
self.assertIsNotNone(updated_best_run.run_id)
self.assertEqual(updated_best_run.run_summary, best_run.run_summary)
updated_hp_keys = set(updated_best_run.hyperparameters.keys())
self.assertSetEqual(updated_hp_keys, {"a", "b", "assignments", "metric"})
| TrainerHyperParameterWandbIntegrationTest |
python | redis__redis-py | tests/test_asyncio/test_pubsub.py | {
"start": 18022,
"end": 22502
} | class ____:
"""These tests only validate that we get unicode values back"""
channel = "uni" + chr(4456) + "code"
pattern = "uni" + chr(4456) + "*"
data = "abc" + chr(4458) + "123"
def make_message(self, type, channel, data, pattern=None):
return {"type": type, "channel": channel, "pattern": pattern, "data": data}
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
@pytest_asyncio.fixture()
async def r(self, create_redis):
return await create_redis(decode_responses=True)
async def test_channel_subscribe_unsubscribe(self, pubsub):
p = pubsub
await p.subscribe(self.channel)
assert await wait_for_message(p) == self.make_message(
"subscribe", self.channel, 1
)
await p.unsubscribe(self.channel)
assert await wait_for_message(p) == self.make_message(
"unsubscribe", self.channel, 0
)
async def test_pattern_subscribe_unsubscribe(self, pubsub):
p = pubsub
await p.psubscribe(self.pattern)
assert await wait_for_message(p) == self.make_message(
"psubscribe", self.pattern, 1
)
await p.punsubscribe(self.pattern)
assert await wait_for_message(p) == self.make_message(
"punsubscribe", self.pattern, 0
)
async def test_channel_publish(self, r: redis.Redis, pubsub):
p = pubsub
await p.subscribe(self.channel)
assert await wait_for_message(p) == self.make_message(
"subscribe", self.channel, 1
)
await r.publish(self.channel, self.data)
assert await wait_for_message(p) == self.make_message(
"message", self.channel, self.data
)
@pytest.mark.onlynoncluster
async def test_pattern_publish(self, r: redis.Redis, pubsub):
p = pubsub
await p.psubscribe(self.pattern)
assert await wait_for_message(p) == self.make_message(
"psubscribe", self.pattern, 1
)
await r.publish(self.channel, self.data)
assert await wait_for_message(p) == self.make_message(
"pmessage", self.channel, self.data, pattern=self.pattern
)
async def test_channel_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
await p.subscribe(**{self.channel: self.message_handler})
assert await wait_for_message(p) is None
await r.publish(self.channel, self.data)
assert await wait_for_message(p) is None
assert self.message == self.make_message("message", self.channel, self.data)
# test that we reconnected to the correct channel
self.message = None
await p.connection.disconnect()
assert await wait_for_message(p) is None # should reconnect
new_data = self.data + "new data"
await r.publish(self.channel, new_data)
assert await wait_for_message(p) is None
assert self.message == self.make_message("message", self.channel, new_data)
await p.aclose()
async def test_pattern_message_handler(self, r: redis.Redis):
p = r.pubsub(ignore_subscribe_messages=True)
await p.psubscribe(**{self.pattern: self.message_handler})
assert await wait_for_message(p) is None
await r.publish(self.channel, self.data)
assert await wait_for_message(p) is None
assert self.message == self.make_message(
"pmessage", self.channel, self.data, pattern=self.pattern
)
# test that we reconnected to the correct pattern
self.message = None
await p.connection.disconnect()
assert await wait_for_message(p) is None # should reconnect
new_data = self.data + "new data"
await r.publish(self.channel, new_data)
assert await wait_for_message(p) is None
assert self.message == self.make_message(
"pmessage", self.channel, new_data, pattern=self.pattern
)
await p.aclose()
async def test_context_manager(self, r: redis.Redis):
async with r.pubsub() as pubsub:
await pubsub.subscribe("foo")
assert pubsub.connection is not None
assert pubsub.connection is None
assert pubsub.channels == {}
assert pubsub.patterns == {}
await pubsub.aclose()
@pytest.mark.onlynoncluster
| TestPubSubAutoDecoding |
python | lazyprogrammer__machine_learning_examples | unsupervised_class2/gaussian_nb.py | {
"start": 599,
"end": 1845
} | class ____(object):
def fit(self, X, Y, smoothing=1e-2):
self.gaussians = dict()
self.priors = dict()
labels = set(Y)
for c in labels:
current_x = X[Y == c]
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'var': current_x.var(axis=0) + smoothing,
}
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
P = self.predict(X)
return np.mean(P == Y)
def predict(self, X):
N, D = X.shape
K = len(self.gaussians)
P = np.zeros((N, K))
for c, g in iteritems(self.gaussians):
mean, var = g['mean'], g['var']
P[:,c] = mvn.logpdf(X, mean=mean, cov=var) + np.log(self.priors[c])
return np.argmax(P, axis=1)
# get data
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
# try NB by itself
model1 = GaussianNB()
model1.fit(Xtrain, Ytrain)
print("NB train score:", model1.score(Xtrain, Ytrain))
print("NB test score:", model1.score(Xtest, Ytest))
# try NB with PCA first
pca = PCA(n_components=50)
Ztrain = pca.fit_transform(Xtrain)
Ztest = pca.transform(Xtest)
model2 = GaussianNB()
model2.fit(Ztrain, Ytrain)
print("NB+PCA train score:", model2.score(Ztrain, Ytrain))
print("NB+PCA test score:", model2.score(Ztest, Ytest))
| GaussianNB |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 335799,
"end": 335981
} | class ____(CompositeMark):
"""ErrorBar schema wrapper."""
_schema = {"$ref": "#/definitions/ErrorBar"}
def __init__(self, *args):
super().__init__(*args)
| ErrorBar |
python | PrefectHQ__prefect | tests/runtime/test_deployment.py | {
"start": 4097,
"end": 4904
} | class ____:
async def test_name_is_attribute(self):
assert "name" in dir(deployment)
async def test_name_is_none_when_not_set(self, monkeypatch, prefect_client):
assert deployment.name is None
run = await prefect_client.create_flow_run(flow=flow(lambda: None, name="test"))
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert deployment.name is None
async def test_name_is_loaded_when_run_name_known(
self, deployment_id, monkeypatch, prefect_client
):
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert deployment.name is None
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(flow_run.id))
assert deployment.name == "My Deployment"
| TestName |
python | fluentpython__example-code-2e | 05-data-classes/typing_namedtuple/coordinates.py | {
"start": 224,
"end": 483
} | class ____(NamedTuple):
lat: float
lon: float
def __str__(self):
ns = 'N' if self.lat >= 0 else 'S'
we = 'E' if self.lon >= 0 else 'W'
return f'{abs(self.lat):.1f}°{ns}, {abs(self.lon):.1f}°{we}'
# end::COORDINATE[]
| Coordinate |
python | aio-libs__aiohttp | aiohttp/web_runner.py | {
"start": 1908,
"end": 3351
} | class ____(BaseSite):
__slots__ = ("_host", "_port", "_reuse_address", "_reuse_port")
def __init__(
self,
runner: "BaseRunner[Any]",
host: str | None = None,
port: int | None = None,
*,
ssl_context: SSLContext | None = None,
backlog: int = 128,
reuse_address: bool | None = None,
reuse_port: bool | None = None,
) -> None:
super().__init__(
runner,
ssl_context=ssl_context,
backlog=backlog,
)
self._host = host
if port is None:
port = 8443 if self._ssl_context else 8080
self._port = port
self._reuse_address = reuse_address
self._reuse_port = reuse_port
@property
def name(self) -> str:
scheme = "https" if self._ssl_context else "http"
host = "0.0.0.0" if not self._host else self._host
return str(URL.build(scheme=scheme, host=host, port=self._port))
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_server(
server,
self._host,
self._port,
ssl=self._ssl_context,
backlog=self._backlog,
reuse_address=self._reuse_address,
reuse_port=self._reuse_port,
)
| TCPSite |
python | spack__spack | lib/spack/spack/llnl/util/lang.py | {
"start": 33831,
"end": 36737
} | class ____(Mapping[KT, VT]):
"""Mapping that iterates over key according to an integer priority. If the priority is
the same for two keys, insertion order is what matters.
The priority is set when the key/value pair is added. If not set, the highest current priority
is used.
"""
_data: Dict[KT, VT]
_priorities: List[Tuple[int, KT]]
def __init__(self) -> None:
self._data = {}
# Tuple of (priority, key)
self._priorities = []
def __getitem__(self, key: KT) -> VT:
return self._data[key]
def __len__(self) -> int:
return len(self._data)
def __iter__(self):
yield from (key for _, key in self._priorities)
def __reversed__(self):
yield from (key for _, key in reversed(self._priorities))
def reversed_keys(self):
"""Iterates over keys from the highest priority, to the lowest."""
return reversed(self)
def reversed_values(self):
"""Iterates over values from the highest priority, to the lowest."""
yield from (self._data[key] for _, key in reversed(self._priorities))
def priority_values(self, priority: int):
"""Iterate over values of a given priority."""
if not any(p == priority for p, _ in self._priorities):
raise KeyError(f"No such priority in PriorityOrderedMapping: {priority}")
yield from (self._data[k] for p, k in self._priorities if p == priority)
def _highest_priority(self) -> int:
if not self._priorities:
return 0
result, _ = self._priorities[-1]
return result
def add(self, key: KT, *, value: VT, priority: Optional[int] = None) -> None:
"""Adds a key/value pair to the mapping, with a specific priority.
If the priority is None, then it is assumed to be the highest priority value currently
in the container.
Raises:
ValueError: when the same priority is already in the mapping
"""
if priority is None:
priority = self._highest_priority()
if key in self._data:
self.remove(key)
self._priorities.append((priority, key))
# We rely on sort being stable
self._priorities.sort(key=lambda x: x[0])
self._data[key] = value
assert len(self._data) == len(self._priorities)
def remove(self, key: KT) -> VT:
"""Removes a key from the mapping.
Returns:
The value associated with the key being removed
Raises:
KeyError: if the key is not in the mapping
"""
if key not in self._data:
raise KeyError(f"cannot find {key}")
popped_item = self._data.pop(key)
self._priorities = [(p, k) for p, k in self._priorities if k != key]
assert len(self._data) == len(self._priorities)
return popped_item
| PriorityOrderedMapping |
python | explosion__spaCy | spacy/lang/ht/lemmatizer.py | {
"start": 128,
"end": 1591
} | class ____(Lemmatizer):
"""
Minimal Haitian Creole lemmatizer.
Returns a word's base form based on rules and lookup,
or defaults to the original form.
"""
def is_base_form(self, token: Token) -> bool:
morph = token.morph.to_dict()
upos = token.pos_.lower()
# Consider unmarked forms to be base
if upos in {"noun", "verb", "adj", "adv"}:
if not morph:
return True
if upos == "noun" and morph.get("Number") == "Sing":
return True
if upos == "verb" and morph.get("VerbForm") == "Inf":
return True
if upos == "adj" and morph.get("Degree") == "Pos":
return True
return False
def rule_lemmatize(self, token: Token) -> List[str]:
string = token.text.lower()
pos = token.pos_.lower()
cache_key = (token.orth, token.pos)
if cache_key in self.cache:
return self.cache[cache_key]
forms = []
# fallback rule: just return lowercased form
forms.append(string)
self.cache[cache_key] = forms
return forms
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
return (required, [])
return super().get_lookups_config(mode)
| HaitianCreoleLemmatizer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 11487,
"end": 13361
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook",
**{"return_value.create_tag.return_value": TEST_TAG},
)
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogCreateTagOperator(
task_id="task_id",
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG,
template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
if not AIRFLOW_V_3_0_PLUS:
mock_context["task"] = task # type: ignore[assignment]
result = task.execute(context=mock_context) # type: ignore[arg-type]
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_tag.assert_called_once_with(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG,
template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_ti.xcom_push.assert_any_call(
key="tag_id",
value=TEST_TAG_ID,
)
assert result == TEST_TAG_DICT
| TestCloudDataCatalogCreateTagOperator |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_search_tool_result_error.py | {
"start": 234,
"end": 484
} | class ____(BaseModel):
error_code: Literal["invalid_tool_input", "unavailable", "too_many_requests", "execution_time_exceeded"]
error_message: Optional[str] = None
type: Literal["tool_search_tool_result_error"]
| BetaToolSearchToolResultError |
python | walkccc__LeetCode | solutions/2105. Watering Plants II/2105.py | {
"start": 0,
"end": 526
} | class ____:
def minimumRefill(
self,
plants: list[int],
capacityA: int,
capacityB: int,
) -> int:
ans = 0
i = 0
j = len(plants) - 1
canA = capacityA
canB = capacityB
while i < j:
ans += (canA < plants[i]) + (canB < plants[j])
if canA < plants[i]:
canA = capacityA
if canB < plants[j]:
canB = capacityB
canA -= plants[i]
canB -= plants[j]
i += 1
j -= 1
return ans + (i == j and max(canA, canB) < plants[i])
| Solution |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_change_between.py | {
"start": 742,
"end": 2661
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/metrics.html#metrics
# for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.change_between"
condition_value_keys = (
"from_value",
"to_value",
)
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, from_value, to_value, **kwargs):
# throw an error if one of the values is not numeric
if not pd.to_numeric(column, errors="coerce").notnull().all():
raise TypeError("Column values must be numeric !") # noqa: TRY003
# calculate the difference of the current row with the previous.
# If previous is NaN fills with the initial value "from_value" to consider it true
difference = (column - column.shift()).fillna(from_value)
def is_change_rate_compliant(value: int):
return True if from_value <= abs(value) <= to_value else False
return difference.map(lambda x: is_change_rate_compliant(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesToChangeBetween |
python | django__django | tests/test_utils/test_simpletestcase.py | {
"start": 148,
"end": 596
} | class ____(SimpleTestCase):
def raising_test(self):
self._pre_setup.assert_called_once_with()
raise Exception("debug() bubbles up exceptions before cleanup.")
def simple_test(self):
self._pre_setup.assert_called_once_with()
@unittest.skip("Skip condition.")
def skipped_test(self):
pass
@mock.patch.object(ErrorTestCase, "_post_teardown")
@mock.patch.object(ErrorTestCase, "_pre_setup")
| ErrorTestCase |
python | langchain-ai__langchain | libs/core/langchain_core/agents.py | {
"start": 3294,
"end": 4455
} | class ____(AgentAction):
"""Representation of an action to be executed by an agent.
This is similar to `AgentAction`, but includes a message log consisting of
chat messages.
This is useful when working with `ChatModels`, and is used to reconstruct
conversation history from the agent's perspective.
"""
message_log: Sequence[BaseMessage]
"""Similar to log, this can be used to pass along extra information about what exact
messages were predicted by the LLM before parsing out the `(tool, tool_input)`.
This is again useful if `(tool, tool_input)` cannot be used to fully recreate the
LLM prediction, and you need that LLM prediction (for future agent iteration).
Compared to `log`, this is useful when the underlying LLM is a
chat model (and therefore returns messages rather than a string).
"""
# Ignoring type because we're overriding the type from AgentAction.
# And this is the correct thing to do in this case.
# The type literal is used for serialization purposes.
type: Literal["AgentActionMessageLog"] = "AgentActionMessageLog" # type: ignore[assignment]
| AgentActionMessageLog |
python | django__django | django/contrib/gis/gdal/field.py | {
"start": 427,
"end": 4129
} | class ____(GDALBase):
"""
Wrap an OGR Field. Needs to be instantiated from a Feature object.
"""
def __init__(self, feat, index):
"""
Initialize on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException("Cannot create OGR Field, invalid pointer given.")
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
def __str__(self):
"Return the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieve the Field's value as a double (float)."
return (
capi.get_field_as_double(self._feat.ptr, self._index)
if self.is_set
else None
)
def as_int(self, is_64=False):
"Retrieve the Field's value as an integer."
if is_64:
return (
capi.get_field_as_integer64(self._feat.ptr, self._index)
if self.is_set
else None
)
else:
return (
capi.get_field_as_integer(self._feat.ptr, self._index)
if self.is_set
else None
)
def as_string(self):
"Retrieve the Field's value as a string."
if not self.is_set:
return None
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_str(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieve the Field's value as a tuple of date & time components."
if not self.is_set:
return None
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr,
self._index,
byref(yy),
byref(mm),
byref(dd),
byref(hh),
byref(mn),
byref(ss),
byref(tz),
)
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException(
"Unable to retrieve date & time information from the field."
)
# #### Field Properties ####
@property
def is_set(self):
"Return True if the value of this field isn't null, False otherwise."
return capi.is_field_set(self._feat.ptr, self._index)
@property
def name(self):
"Return the name of this Field."
name = capi.get_field_name(self.ptr)
return force_str(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Return the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Return the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Return the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Return the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
| Field |
python | sympy__sympy | sympy/functions/special/polynomials.py | {
"start": 23541,
"end": 24850
} | class ____(DefinedFunction):
r"""
``chebyshevu_root(n, k)`` returns the $k$th root (indexed from zero) of the
$n$th Chebyshev polynomial of the second kind; that is, if $0 \le k < n$,
``chebyshevu(n, chebyshevu_root(n, k)) == 0``.
Examples
========
>>> from sympy import chebyshevu, chebyshevu_root
>>> chebyshevu_root(3, 2)
-sqrt(2)/2
>>> chebyshevu(3, chebyshevu_root(3, 2))
0
See Also
========
chebyshevt, chebyshevt_root, chebyshevu,
legendre, assoc_legendre,
hermite, hermite_prob,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.hermite_prob_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
"""
@classmethod
def eval(cls, n, k):
if not ((0 <= k) and (k < n)):
raise ValueError("must have 0 <= k < n, "
"got k = %s and n = %s" % (k, n))
return cos(S.Pi*(k + 1)/(n + 1))
#----------------------------------------------------------------------------
# Legendre polynomials and Associated Legendre polynomials
#
| chebyshevu_root |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 43611,
"end": 43843
} | class ____(BaseModel, extra="forbid"):
"""
Ordered sequence of GeoPoints representing the line
"""
points: List["GeoPoint"] = Field(..., description="Ordered sequence of GeoPoints representing the line")
| GeoLineString |
python | python-poetry__poetry | src/poetry/repositories/parsers/html_page_parser.py | {
"start": 73,
"end": 596
} | class ____(HTMLParser):
def __init__(self) -> None:
super().__init__()
self.base_url: str | None = None
self.anchors: list[dict[str, str | None]] = []
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
if tag == "base" and self.base_url is None:
base_url = dict(attrs).get("href")
if base_url is not None:
self.base_url = base_url
elif tag == "a":
self.anchors.append(dict(attrs))
| HTMLPageParser |
python | ray-project__ray | rllib/algorithms/impala/impala_learner.py | {
"start": 16346,
"end": 21534
} | class ____(threading.Thread):
def __init__(
self,
*,
update_method,
in_queue: Union[deque, CircularBuffer],
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# out_queue: deque,
learner,
):
super().__init__(name="_LearnerThread")
self.daemon = True
self.learner = learner
self.stopped = False
self._update_method = update_method
self._in_queue: Union[deque, CircularBuffer] = in_queue
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# self._out_queue: deque = out_queue
# Ray metrics
self._metrics_learner_impala_thread_step = Histogram(
name="rllib_learner_impala_learner_thread_step_time",
description="Time taken in seconds for learner thread _step.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_thread_step.set_default_tags(
{"rllib": "IMPALA/LearnerThread"}
)
self._metrics_learner_impala_thread_step_update = Histogram(
name="rllib_learner_impala_learner_thread_step_update_time",
description="Time taken in seconds for learner thread _step update.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_thread_step_update.set_default_tags(
{"rllib": "IMPALA/LearnerThread"}
)
def run(self) -> None:
while not self.stopped:
with TimerAndPrometheusLogger(self._metrics_learner_impala_thread_step):
self.step()
def step(self):
global _CURRENT_GLOBAL_TIMESTEPS
# Get a new batch from the GPU-data (deque.pop -> newest item first).
with self.learner.metrics.log_time(
(ALL_MODULES, LEARNER_THREAD_IN_QUEUE_WAIT_TIMER)
):
# Get a new batch from the GPU-data (learner queue OR circular buffer).
if isinstance(self._in_queue, CircularBuffer):
ma_batch_on_gpu = self._in_queue.sample()
else:
# Queue is empty: Sleep a tiny bit to avoid CPU-thrashing.
while not self._in_queue:
time.sleep(0.0001)
# Consume from the left (oldest batches first).
# If we consumed from the right, we would run into the danger of
# learning from newer batches (left side) most times, BUT sometimes
# grabbing older batches (right area of deque).
ma_batch_on_gpu = self._in_queue.popleft()
# Add this check here in case thread has been stopped while we were waiting for
# a batch from the queue/buffer.
if self.stopped:
return
# Call the update method on the batch.
with self.learner.metrics.log_time((ALL_MODULES, LEARNER_THREAD_UPDATE_TIMER)):
# TODO (sven): For multi-agent AND SGD iter > 1, we need to make sure
# this thread has the information about the min minibatches necessary
# (due to different agents taking different steps in the env, e.g.
# MA-CartPole).
with TimerAndPrometheusLogger(
self._metrics_learner_impala_thread_step_update
):
self._update_method(
self=self.learner,
training_data=TrainingData(batch=ma_batch_on_gpu),
timesteps=_CURRENT_GLOBAL_TIMESTEPS,
_no_metrics_reduce=True,
)
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# self._out_queue.append(results)
with self.learner._num_updates_lock:
self.learner._num_updates += 1
@staticmethod
def enqueue(learner_queue: deque, batch, metrics):
# Right-append to learner queue (a deque). If full, drops the leftmost
# (oldest) item in the deque.
# Note that we consume from the left (oldest first), which is why the queue size
# should probably always be small'ish (<< 10), otherwise we run into the danger
# of training with very old samples.
# If we consumed from the right, we would run into the danger of learning
# from newer batches (left side) most times, BUT sometimes grabbing a
# really old batches (right area of deque).
if len(learner_queue) == learner_queue.maxlen:
metrics.log_value(
(ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED),
learner_queue.popleft().env_steps(),
reduce="sum",
)
learner_queue.append(batch)
# Log current queue size.
metrics.log_value(
(ALL_MODULES, QUEUE_SIZE_LEARNER_THREAD_QUEUE),
len(learner_queue),
)
| _LearnerThread |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 104590,
"end": 106536
} | class ____(TestCase):
flag_names = [
"C",
"C_CONTIGUOUS",
"CONTIGUOUS",
"F",
"F_CONTIGUOUS",
"FORTRAN",
"A",
"ALIGNED",
"W",
"WRITEABLE",
"O",
"OWNDATA",
]
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [("junk", "i1"), ("a", dtype)])
arr.setflags(write=False)
a = arr["a"]
assert_(not a.flags["C"])
assert_(not a.flags["F"])
assert_(not a.flags["O"])
assert_(not a.flags["W"])
assert_(not a.flags["A"])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != "O":
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ["f8", "i4"]
fd = [None, "f8", "c16"]
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false("f8")
assert_raises(KeyError, np.require, a, None, "Q")
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], "i4", ["C", "A", "O"])
assert_(a.flags["O"])
assert_(a.flags["C"])
assert_(a.flags["A"])
assert_(a.dtype == "i4")
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false("f8")
assert_raises(ValueError, np.require, a, None, ["C", "F"])
@xpassIfTorchDynamo_np # (reason="TODO")
| TestRequire |
python | pypa__warehouse | warehouse/forms.py | {
"start": 3333,
"end": 3644
} | class ____(BaseForm):
__params__ = ["locale_id"]
locale_id = StringField(validators=[InputRequired(message="Missing locale ID")])
def validate_locale_id(self, field):
if field.data not in KNOWN_LOCALES.keys():
raise ValidationError(f"Unknown locale ID: {field.data}")
| SetLocaleForm |
python | huggingface__transformers | src/transformers/models/sam/modeling_sam.py | {
"start": 25648,
"end": 30454
} | class ____(nn.Module):
def __init__(self, config: SamConfig):
super().__init__()
self.shared_embedding = SamPositionalEmbedding(config.vision_config)
config = config.prompt_encoder_config
self.mask_embed = SamMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
self.input_image_size = config.image_size
self.point_embed = nn.ModuleList(
[nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)]
)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1])
target_labels_shape = (points.shape[0], points.shape[1], 1)
padding_point = torch.zeros(target_point_shape, device=points.device)
padding_label = -torch.ones(target_labels_shape, device=labels.device)
points = torch.cat([points, padding_point], dim=2)
labels = torch.cat([labels, padding_label], dim=2)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding))
point_embedding = torch.where(
(labels == 0)[:, :, :, None],
point_embedding + self.point_embed[0].weight[None, None, :, :],
point_embedding,
)
point_embedding = torch.where(
(labels == 1)[:, :, :, None],
point_embedding + self.point_embed[1].weight[None, None, :, :],
point_embedding,
)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
batch_size, nb_boxes = boxes.shape[:2]
coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
input_shape = (self.input_image_size, self.input_image_size)
corner_embedding = self.shared_embedding(coords, input_shape)
corner_embedding[:, :, 0, :] += self.point_embed[2].weight
corner_embedding[:, :, 1, :] += self.point_embed[3].weight
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
| SamPromptEncoder |
python | cython__cython | Cython/Compiler/Builtin.py | {
"start": 2634,
"end": 3353
} | class ____:
def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
self.py_name = py_name
self.cname = cname or py_name
self.field_type_name = field_type_name # can't do the lookup before the type is declared!
self.field_type = field_type
def declare_in_type(self, self_type):
if self.field_type_name is not None:
# lazy type lookup
field_type = builtin_scope.lookup(self.field_type_name).type
else:
field_type = self.field_type or PyrexTypes.py_object_type
entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
entry.is_variable = True
| BuiltinAttribute |
python | nedbat__coveragepy | coverage/lcovreport.py | {
"start": 5872,
"end": 7874
} | class ____:
"""A reporter for writing LCOV coverage reports."""
report_type = "LCOV report"
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = coverage.config
self.total = Numbers(self.coverage.config.precision)
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Renders the full lcov report.
`morfs` is a list of modules or filenames
outfile is the file object to write the file into.
"""
self.coverage.get_data()
outfile = outfile or sys.stdout
# ensure file records are sorted by the _relative_ filename, not the full path
to_report = [
(fr.relative_filename(), fr, analysis)
for fr, analysis in get_analysis_to_report(self.coverage, morfs)
]
to_report.sort()
for fname, fr, analysis in to_report:
self.total += analysis.numbers
self.lcov_file(fname, fr, analysis, outfile)
return self.total.n_statements and self.total.pc_covered
def lcov_file(
self,
rel_fname: str,
fr: FileReporter,
analysis: Analysis,
outfile: IO[str],
) -> None:
"""Produces the lcov data for a single file.
This currently supports both line and branch coverage,
however function coverage is not supported.
"""
if analysis.numbers.n_statements == 0:
if self.config.skip_empty:
return
outfile.write(f"SF:{rel_fname}\n")
lines = sorted(analysis.statements)
if self.config.lcov_line_checksums:
source_lines = fr.source().splitlines()
else:
source_lines = []
lcov_lines(analysis, lines, source_lines, outfile)
lcov_functions(fr, analysis, outfile)
if analysis.has_arcs:
lcov_arcs(fr, analysis, lines, outfile)
outfile.write("end_of_record\n")
| LcovReporter |
python | facebookresearch__faiss | tests/test_index_binary.py | {
"start": 2315,
"end": 4653
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 0
nb = 1500
nq = 500
(_, self.xb, self.xq) = make_binary_dataset(d, nt, nb, nq)
def test_flat(self):
d = self.xq.shape[1] * 8
nq = self.xq.shape[0]
index = faiss.IndexBinaryFlat(d)
index.add(self.xb)
D, I = index.search(self.xq, 3)
I2 = index.assign(x=self.xq, k=3, labels=None)
assert np.all(I == I2)
for i in range(nq):
for j, dj in zip(I[i], D[i]):
ref_dis = binary_dis(self.xq[i], self.xb[j])
assert dj == ref_dis
# test reconstruction
assert np.all(index.reconstruct(12) == self.xb[12])
def test_empty_flat(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryFlat(d)
for use_heap in [True, False]:
index.use_heap = use_heap
Dflat, Iflat = index.search(self.xq, 10)
assert(np.all(Iflat == -1))
assert(np.all(Dflat == 2147483647)) # NOTE(hoss): int32_t max
def test_range_search(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryFlat(d)
index.add(self.xb)
D, I = index.search(self.xq, 10)
thresh = int(np.median(D[:, -1]))
lims, D2, I2 = index.range_search(self.xq, thresh)
nt1 = nt2 = 0
for i in range(len(self.xq)):
range_res = I2[lims[i]:lims[i + 1]]
if thresh > D[i, -1]:
self.assertTrue(set(I[i]) <= set(range_res))
nt1 += 1
elif thresh < D[i, -1]:
self.assertTrue(set(range_res) <= set(I[i]))
nt2 += 1
# in case of equality we have a problem with ties
# nb tests is actually low...
self.assertTrue(nt1 > 19 and nt2 > 19)
def test_reconstruct(self):
index = faiss.IndexBinaryFlat(64)
input_vector = np.random.randint(0, 255, size=(10, index.code_size)).astype("uint8")
index.add(input_vector)
reconstructed_vector = index.reconstruct_n(0, 4)
assert reconstructed_vector.shape == (4, index.code_size)
assert np.all(input_vector[:4] == reconstructed_vector)
| TestBinaryFlat |
python | Textualize__textual | src/textual/demo/widgets.py | {
"start": 14998,
"end": 17127
} | class ____(containers.VerticalGroup):
"""Demonstrates sparklines."""
DEFAULT_CLASSES = "column"
LOGS_MD = """\
## Sparklines
A low-res summary of time-series data.
For detailed graphs, see [textual-plotext](https://github.com/Textualize/textual-plotext).
"""
DEFAULT_CSS = """
Sparklines {
Sparkline {
width: 1fr;
margin: 1;
&#first > .sparkline--min-color { color: $success; }
&#first > .sparkline--max-color { color: $warning; }
&#second > .sparkline--min-color { color: $warning; }
&#second > .sparkline--max-color { color: $error; }
&#third > .sparkline--min-color { color: $primary; }
&#third > .sparkline--max-color { color: $accent; }
}
VerticalScroll {
height: auto;
border: heavy transparent;
&:focus { border: heavy $border; }
}
}
"""
count = var(0)
data: reactive[list[float]] = reactive(list)
def compose(self) -> ComposeResult:
yield Markdown(self.LOGS_MD)
with containers.VerticalScroll(
id="container", can_focus=True, can_maximize=True
):
yield Sparkline([], summary_function=max, id="first").data_bind(
Sparklines.data,
)
yield Sparkline([], summary_function=max, id="second").data_bind(
Sparklines.data,
)
yield Sparkline([], summary_function=max, id="third").data_bind(
Sparklines.data,
)
def on_mount(self) -> None:
self.set_interval(0.1, self.update_sparks)
def update_sparks(self) -> None:
"""Update the sparks data."""
if self.is_scrolling:
return
if (
not self.app.screen.can_view_partial(self)
and not self.query_one(Sparkline).is_in_maximized_view
):
return
self.count += 1
offset = self.count * 40
self.data = [abs(sin(x / 3.14)) for x in range(offset, offset + 360 * 6, 20)]
| Sparklines |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 23815,
"end": 23923
} | class ____(sqltypes.NCHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_NCHAR
| _OracleNChar |
python | google__pytype | pytype/tests/test_variable_annotations.py | {
"start": 1175,
"end": 11138
} | class ____(test_base.BaseTest):
"""Tests for PEP526 variable annotations."""
def test_infer_types(self):
ty = self.Infer("""
from typing import List
lst: List[int] = []
x: int = 1
y = 2
class A:
a: int = 1
b = 2
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
lst: List[int]
x: int
y: int
class A:
a: int
b: int
""",
)
def test_illegal_annotations(self):
errors = self.CheckWithErrors("""
from typing import List, TypeVar, NoReturn
T = TypeVar('T')
a: "abc" = "1" # name-error[e1]
b: 123 = "2" # invalid-annotation[e2]
c: List[int] = []
d: List[T] = [] # invalid-annotation[e3]
e: int if __random__ else str = 123 # invalid-annotation[e4]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Name \'abc\' is not defined",
"e2": r"Not a type",
"e3": r"'T' not in scope",
"e4": r"Must be constant",
},
)
def test_never(self):
errors = self.CheckWithErrors("""
from typing import NoReturn
x: NoReturn = 0 # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
errors, {"e": ["Annotation: Never", "Assignment: int"]}
)
def test_uninitialized_class_annotation(self):
ty = self.Infer("""
class Foo:
bar: int
def baz(self):
return self.bar
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
bar: int
def baz(self) -> int: ...
""",
)
def test_uninitialized_module_annotation(self):
ty = self.Infer("""
foo: int
bar = foo
""")
self.assertTypesMatchPytd(
ty,
"""
foo: int
bar: int
""",
)
def test_overwrite_annotations_dict(self):
errors = self.CheckWithErrors("""
__annotations__ = None
foo: int # unsupported-operands[e]
""")
self.assertErrorRegexes(errors, {"e": r"None.*__setitem__"})
def test_shadow_none(self):
ty = self.Infer("""
v: int = None
""")
self.assertTypesMatchPytd(
ty,
"""
v: int
""",
)
def test_overwrite_annotation(self):
ty, errors = self.InferWithErrors("""
x: int
x = "" # annotation-type-mismatch[e]
""")
self.assertTypesMatchPytd(ty, "x: int")
self.assertErrorRegexes(errors, {"e": r"Annotation: int.*Assignment: str"})
def test_overwrite_annotation_in_class(self):
ty, errors = self.InferWithErrors("""
class Foo:
x: int
x = "" # annotation-type-mismatch[e]
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
x: int
""",
)
self.assertErrorRegexes(errors, {"e": r"Annotation: int.*Assignment: str"})
def test_class_variable_forward_reference(self):
ty = self.Infer("""
class A:
a: 'A' = ...
x = 42
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
a: A
x: int
""",
)
def test_callable_forward_reference(self):
# Callable[['A']...] creates an instance of A during output generation,
# which previously caused a crash when iterating over existing instances.
ty = self.Infer("""
from typing import Callable
class A:
def __init__(self, fn: Callable[['A'], bool]):
self.fn = fn
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable
class A:
fn: Callable[[A], bool]
def __init__(self, fn: Callable[[A], bool]) -> None: ...
""",
)
def test_multiple_forward_reference(self):
ty = self.Infer("""
from typing import Dict
class A:
x: Dict['A', 'B']
class B:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
class A:
x: Dict[A, B]
class B: ...
""",
)
def test_non_annotations_dict(self):
# Regression test to make sure `x` isn't confused with `__annotations__`.
self.Check("""
class K(dict):
pass
x = K()
y: int = 9
x['z'] = 5
""")
def test_function_local_annotation(self):
ty = self.Infer("""
def f():
x: int = None
return x
""")
self.assertTypesMatchPytd(ty, "def f() -> int: ...")
@test_base.skip("b/167613685")
def test_function_local_annotation_no_assignment(self):
ty = self.Infer("""
def f():
x: int
return x
""")
self.assertTypesMatchPytd(ty, "def f() -> int: ...")
def test_multi_statement_line(self):
ty = self.Infer("""
def f():
if __random__: v: int = None
else: v = __any_object__
return v
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def f() -> Any: ...
""",
)
def test_multi_line_assignment(self):
ty = self.Infer("""
v: int = (
None)
""")
self.assertTypesMatchPytd(ty, "v: int")
def test_complex_assignment(self):
# Tests that when an assignment contains multiple STORE_* opcodes on
# different lines, we associate the annotation with the right one.
ty = self.Infer("""
from typing import Dict
def f():
column_map: Dict[str, Dict[str, bool]] = {
column: {
'visible': True
} for column in __any_object__.intersection(
__any_object__)
}
return column_map
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
def f() -> Dict[str, Dict[str, bool]]: ...
""",
)
def test_none_or_ellipsis_assignment(self):
self.Check("""
v1: int = None
v2: str = ...
""")
def test_any(self):
self.Check("""
from typing import Any
def f():
x: Any = None
print(x.upper())
x = None
print(x.upper())
""")
def test_uninitialized_variable_container_check(self):
self.CheckWithErrors("""
from typing import List
x: List[str]
x.append(0) # container-type-mismatch
""")
def test_uninitialized_attribute_container_check(self):
self.CheckWithErrors("""
from typing import List
class Foo:
x: List[str]
def __init__(self):
self.x.append(0) # container-type-mismatch
""")
def test_any_container(self):
ty = self.Infer("""
from typing import Any, Dict
def f():
x: Dict[str, Any] = {}
x['a'] = 'b'
for v in x.values():
print(v.whatever)
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
def f() -> Dict[str, Any]: ...
""",
)
def test_function_parameter(self):
self.Check("""
from typing import TypeVar
T = TypeVar('T')
def f(x: T, y: T):
z: T = x
return z
assert_type(f(0, 1), int)
""")
def test_illegal_parameter(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar('T')
S = TypeVar('S')
def f(x: T, y: T):
z: S = x # invalid-annotation[e]
return z
""")
self.assertErrorRegexes(errors, {"e": r"'S' not in scope for method 'f'"})
def test_callable_parameters(self):
errors = self.CheckWithErrors("""
from typing import Callable, TypeVar
T = TypeVar('T')
f: Callable[[T, T], T]
assert_type(f(0, 1), int)
f(0, '1') # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Expected.*int.*Actual.*str"})
def test_nested_callable_parameters(self):
self.Check("""
from typing import Callable, List, TypeVar
T = TypeVar('T')
fs: List[Callable[[T], T]]
assert_type(fs[0]('hello world'), str)
""")
def test_callable_parameters_in_method(self):
self.Check("""
from typing import Callable, TypeVar
T = TypeVar('T')
def f():
g: Callable[[T], T] = None
assert_type(g(0), int)
""")
def test_class_and_callable_parameters(self):
errors = self.CheckWithErrors("""
from typing import Callable, Generic, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
class Foo(Generic[T1]):
x: Callable[[T1, T2], T2]
def f(self):
x: Callable[[T1, T2], T2] = None
return x
foo = Foo[int]()
assert_type(foo.x(0, 'hello world'), str)
assert_type(foo.f()(0, 4.2), float)
foo.x(None, 'hello world') # wrong-arg-types[e1]
foo.f()('oops', 4.2) # wrong-arg-types[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Expected.*int.*Actual.*None",
"e2": r"Expected.*int.*Actual.*str",
},
)
def test_invalid_callable_parameter(self):
# Do not allow TypeVars that appear only once in a Callable signature.
self.CheckWithErrors("""
from typing import Callable, TypeVar
T = TypeVar('T')
f: Callable[[T], int] # invalid-annotation
def g(x: T, y: T):
f2: Callable[[T], int] # ok, since T is from the signature of g
""")
def test_typevar_annot_and_list_comprehension(self):
# Regression test for https://github.com/google/pytype/issues/1083.
self.Check("""
from collections import defaultdict
from typing import Generic, TypeVar
T = TypeVar('T')
class Min(Generic[T]):
def __init__(self, items: list[T]):
self.min = 2
self.items = items
def __call__(self) -> list[T]:
counts: defaultdict[T, int] = defaultdict(int)
return [b for b in self.items if counts[b] >= self.min]
""")
if __name__ == "__main__":
test_base.main()
| VariableAnnotationsFeatureTest |
python | python-openxml__python-docx | src/docx/enum/text.py | {
"start": 153,
"end": 1712
} | class ____(BaseXmlEnum):
"""Alias: **WD_ALIGN_PARAGRAPH**
Specifies paragraph justification type.
Example::
from docx.enum.text import WD_ALIGN_PARAGRAPH
paragraph = document.add_paragraph()
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
"""
LEFT = (0, "left", "Left-aligned")
"""Left-aligned"""
CENTER = (1, "center", "Center-aligned.")
"""Center-aligned."""
RIGHT = (2, "right", "Right-aligned.")
"""Right-aligned."""
JUSTIFY = (3, "both", "Fully justified.")
"""Fully justified."""
DISTRIBUTE = (
4,
"distribute",
"Paragraph characters are distributed to fill entire width of paragraph.",
)
"""Paragraph characters are distributed to fill entire width of paragraph."""
JUSTIFY_MED = (
5,
"mediumKashida",
"Justified with a medium character compression ratio.",
)
"""Justified with a medium character compression ratio."""
JUSTIFY_HI = (
7,
"highKashida",
"Justified with a high character compression ratio.",
)
"""Justified with a high character compression ratio."""
JUSTIFY_LOW = (8, "lowKashida", "Justified with a low character compression ratio.")
"""Justified with a low character compression ratio."""
THAI_JUSTIFY = (
9,
"thaiDistribute",
"Justified according to Thai formatting layout.",
)
"""Justified according to Thai formatting layout."""
WD_ALIGN_PARAGRAPH = WD_PARAGRAPH_ALIGNMENT
| WD_PARAGRAPH_ALIGNMENT |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 25573,
"end": 26295
} | class ____(VariableCoder):
"""Encode Enum into variable dtype metadata."""
def encode(self, variable: Variable, name: T_Name = None) -> Variable:
if (
"dtype" in variable.encoding
and np.dtype(variable.encoding["dtype"]).metadata
and "enum" in variable.encoding["dtype"].metadata
):
dims, data, attrs, encoding = unpack_for_encoding(variable)
data = data.astype(dtype=variable.encoding.pop("dtype"))
return Variable(dims, data, attrs, encoding, fastpath=True)
else:
return variable
def decode(self, variable: Variable, name: T_Name = None) -> Variable:
raise NotImplementedError()
| NativeEnumCoder |
python | pytorch__pytorch | torch/nn/attention/bias.py | {
"start": 857,
"end": 2274
} | class ____(IntEnum):
r"""
Enum for causal variants used in attention mechanisms.
Defines two types of causal biases:
``UPPER_LEFT``: Represents upper-left triangular bias for standard causal attention.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
torch.tril(torch.ones(size, dtype=torch.bool))
For instance, with ``shape=(3,4)``, the materialized bias tensor will be:
.. code-block:: text
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]
``LOWER_RIGHT``: Represents lower-right triangular bias, the include values are aligned to the lower
right corner of the matrix.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
diagonal_offset = size[1] - size[0]
torch.tril(
torch.ones(size, dtype=torch.bool),
diagonal=diagonal_offset,
)
For instance, with ``shape=(3,4)``, the materialized bias tensor will be:
.. code-block:: text
[[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]]
Note that these variants are equivalent to each other when the sequence lengths of the query and key/value
tensors are equal since the triangular matrix is square.
.. warning:: This enum is a prototype and subject to change.
"""
UPPER_LEFT = auto()
LOWER_RIGHT = auto()
| CausalVariant |
python | OmkarPathak__pygorithm | tests/test_searching.py | {
"start": 2590,
"end": 2759
} | class ____(TestSearchingAlgorithm):
def test_interpolation_search(self):
self.assertEqual(interpolation_search.search(self.array, 7), 7)
| TestInterpolationSearch |
python | spack__spack | lib/spack/spack/vendor/macholib/MachOStandalone.py | {
"start": 374,
"end": 997
} | class ____(MachOGraph):
def __init__(self, delegate, *args, **kwargs):
super(FilteredMachOGraph, self).__init__(*args, **kwargs)
self.delegate = delegate
def createNode(self, cls, name):
cls = self.delegate.getClass(name, cls)
res = super(FilteredMachOGraph, self).createNode(cls, name)
return self.delegate.update_node(res)
def locate(self, filename, loader=None):
newname = super(FilteredMachOGraph, self).locate(filename, loader)
if newname is None:
return None
return self.delegate.locate(newname, loader=loader)
| FilteredMachOGraph |
python | getsentry__sentry | src/sentry/seer/endpoints/seer_rpc.py | {
"start": 10687,
"end": 37474
} | class ____(TypedDict):
org_ids: list[int]
org_slugs: list[str]
def get_sentry_organization_ids(
*, external_id: str, provider: str = "integrations:github", **kwargs
) -> SentryOrganizaionIdsAndSlugs:
"""
Get the Sentry organization ID for a given Repository.
Args:
external_id: The id of the repo in the provider's system
provider: The provider of the repository (e.g. "integrations:github")
"""
# It's possible that multiple orgs will be returned for a given repo.
repositories = Repository.objects.filter(
provider=provider,
external_id=external_id,
status=ObjectStatus.ACTIVE,
)
repo_ids = repositories.values_list("id", flat=True)
# Filter to only repositories that have code mappings.
repo_ids_with_config = (
RepositoryProjectPathConfig.objects.filter(repository_id__in=repo_ids)
.values_list("repository_id", flat=True)
.distinct()
)
organization_ids = repositories.filter(id__in=repo_ids_with_config).values_list(
"organization_id", flat=True
)
organizations = Organization.objects.filter(id__in=organization_ids)
# We then filter out all orgs that didn't give us consent to use AI features.
orgs_with_consent = [org for org in organizations if _can_use_prevent_ai_features(org)]
return {
"org_ids": [organization.id for organization in orgs_with_consent],
"org_slugs": [organization.slug for organization in orgs_with_consent],
}
def get_organization_autofix_consent(*, org_id: int) -> dict:
org: Organization = Organization.objects.get(id=org_id)
seer_org_acknowledgement = get_seer_org_acknowledgement(org)
github_extension_enabled = org_id in options.get("github-extension.enabled-orgs")
return {
"consent": seer_org_acknowledgement or github_extension_enabled,
}
# Used by the seer GH app to check for permissions before posting to an org
def get_organization_seer_consent_by_org_name(
*, org_name: str, provider: str = "github"
) -> dict[str, bool | str | None]:
org_integrations = integration_service.get_organization_integrations(
providers=[provider], name=org_name
)
# The URL where an org admin can enable Prevent-AI features
# Only returned if the org is not already consented
consent_url = None
for org_integration in org_integrations:
try:
org = Organization.objects.get(id=org_integration.organization_id)
if _can_use_prevent_ai_features(org):
return {"consent": True}
# If this is the last org we will return this URL as the consent URL
consent_url = org.absolute_url("/settings/organization/")
except Organization.DoesNotExist:
continue
return {"consent": False, "consent_url": consent_url}
def get_attributes_and_values(
*,
org_id: int,
project_ids: list[int],
stats_period: str,
max_values: int = 100,
max_attributes: int = 1000,
sampled: bool = True,
attributes_ignored: list[str] | None = None,
) -> dict:
"""
Fetches all string attributes and the corresponding values with counts for a given period.
"""
period = parse_stats_period(stats_period)
if period is None:
period = datetime.timedelta(days=7)
end = datetime.datetime.now()
start = end - period
start_time_proto = ProtobufTimestamp()
start_time_proto.FromDatetime(start)
end_time_proto = ProtobufTimestamp()
end_time_proto.FromDatetime(end)
sampling_mode = (
DownsampledStorageConfig.MODE_NORMAL
if sampled
else DownsampledStorageConfig.MODE_HIGHEST_ACCURACY
)
meta = RequestMeta(
organization_id=org_id,
cogs_category="events_analytics_platform",
referrer=Referrer.SEER_RPC.value,
project_ids=project_ids,
start_timestamp=start_time_proto,
end_timestamp=end_time_proto,
trace_item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
downsampled_storage_config=DownsampledStorageConfig(mode=sampling_mode),
)
if attributes_ignored:
filter = TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="attr_key",
type=AttributeKey.TYPE_STRING,
),
op=ComparisonFilter.OP_NOT_IN,
value=AttributeValue(
val_str_array=StrArray(
values=attributes_ignored,
),
),
),
)
else:
filter = TraceItemFilter()
stats_type = StatsType(
attribute_distributions=AttributeDistributionsRequest(
max_buckets=max_values,
max_attributes=max_attributes,
)
)
rpc_request = TraceItemStatsRequest(
filter=filter,
meta=meta,
stats_types=[stats_type],
)
rpc_response = snuba_rpc.trace_item_stats_rpc(rpc_request)
resolver = SearchResolver(
params=SnubaParams(
start=start,
end=end,
),
config=SearchResolverConfig(),
definitions=SPAN_DEFINITIONS,
)
attributes_and_values: dict[str, list[dict[str, Any]]] = {}
for result in rpc_response.results:
for attribute in result.attribute_distributions.attributes:
try:
resolved_attribute, _ = resolver.resolve_attribute(attribute.attribute_name)
attribute_name = resolved_attribute.public_alias
except InvalidSearchQuery:
attribute_name = attribute.attribute_name
if attribute.buckets:
if attribute_name not in attributes_and_values:
attributes_and_values[attribute_name] = []
attributes_and_values[attribute_name].extend(
[
{
"value": value.label,
"count": value.value,
}
for value in attribute.buckets
]
)
return {"attributes_and_values": attributes_and_values}
def get_attributes_for_span(
*,
org_id: int,
project_id: int,
trace_id: str,
span_id: str,
) -> dict[str, Any]:
"""
Fetch all attributes for a given span.
"""
start_datetime = datetime.datetime.fromtimestamp(0, tz=datetime.UTC)
end_datetime = datetime.datetime.now(datetime.UTC) + datetime.timedelta(days=7)
start_timestamp_proto = ProtobufTimestamp()
start_timestamp_proto.FromDatetime(start_datetime)
end_timestamp_proto = ProtobufTimestamp()
end_timestamp_proto.FromDatetime(end_datetime)
trace_item_type = TraceItemType.TRACE_ITEM_TYPE_SPAN
request_meta = RequestMeta(
organization_id=org_id,
cogs_category="events_analytics_platform",
referrer=Referrer.SEER_RPC.value,
project_ids=[project_id],
start_timestamp=start_timestamp_proto,
end_timestamp=end_timestamp_proto,
trace_item_type=trace_item_type,
request_id=str(uuid.uuid4()),
)
request = TraceItemDetailsRequest(
item_id=span_id,
trace_id=trace_id,
meta=request_meta,
)
response = snuba_rpc.trace_item_details_rpc(request)
response_dict = MessageToDict(response)
attributes = convert_rpc_attribute_to_json(
response_dict.get("attributes", []),
SupportedTraceItemType.SPANS,
use_sentry_conventions=False,
include_internal=False,
)
return {
"attributes": attributes,
}
def _parse_spans_response(
response, columns: list[ColumnDict], resolver: SearchResolver
) -> list[dict[str, Any]]:
"""
Parse protobuf response from TraceItemTable into a readable format.
The protobuf response has a structure like:
column_values {
attribute_name: "sentry.transaction" # This is the internal name
results { val_str: "foo" }
results { val_str: "bar" }
}
This function converts it to:
[
{"transaction": "foo"}, # Using the user-facing column name
{"transaction": "bar"}
]
"""
if not hasattr(response, "column_values") or not response.column_values:
return []
column_data = {}
num_rows = 0
for column_values in response.column_values:
internal_column_name = column_values.attribute_name
values: list[str | float | None] = []
for result in column_values.results:
if hasattr(result, "is_null") and result.is_null:
values.append(None)
elif result.HasField("val_str"):
values.append(result.val_str)
elif result.HasField("val_double"):
values.append(result.val_double)
else:
values.append(None)
column_data[internal_column_name] = values
num_rows = max(num_rows, len(values))
internal_to_user_name: dict[str, str] = {}
for column in columns:
user_column_name = column["name"]
try:
resolved_column, _ = resolver.resolve_attribute(user_column_name)
internal_to_user_name[resolved_column.internal_name] = user_column_name
except Exception:
internal_to_user_name[user_column_name] = user_column_name
user_to_internal_name = {
user_name: internal_name for internal_name, user_name in internal_to_user_name.items()
}
ordered_column_data = []
for column in columns:
user_column_name = column["name"]
internal_column_name = user_to_internal_name.get(user_column_name)
if internal_column_name and internal_column_name in column_data:
ordered_column_data.append(column_data[internal_column_name])
else:
ordered_column_data.append([None] * num_rows)
spans = []
if ordered_column_data:
from itertools import zip_longest
for row_values in zip_longest(*ordered_column_data, fillvalue=None):
span = {}
for column, value in zip(columns, row_values):
span[column["name"]] = value
spans.append(span)
return spans
def get_spans(
*,
org_id: int,
project_ids: list[int],
query: str = "",
sort: list[SortDict] | None = None,
stats_period: str = "7d",
columns: list[ColumnDict],
limit: int = 10,
) -> dict[str, Any]:
"""
Get spans using the TraceItemTable endpoint.
Args:
org_id: Organization ID
project_ids: List of project IDs to query
query: Search query string (optional) - will be converted to a TraceItemFilter
sort: Field to sort by (default: first column provided)
stats_period: Time period to query (default: 7d)
columns: List of columns with their type
limit: Maximum number of results to return
Returns:
Dictionary containing the spans data
"""
if not columns:
raise ValidationError("At least one column must be provided")
period = parse_stats_period(stats_period)
if period is None:
period = datetime.timedelta(days=7)
end = datetime.datetime.now()
start = end - period
start_time_proto = ProtobufTimestamp()
start_time_proto.FromDatetime(start)
end_time_proto = ProtobufTimestamp()
end_time_proto.FromDatetime(end)
resolver = SearchResolver(
params=SnubaParams(
start=start,
end=end,
),
config=SearchResolverConfig(),
definitions=SPAN_DEFINITIONS,
)
request_columns = []
for column in columns:
column_name = column["name"]
column_type = column["type"]
try:
resolved_column, _ = resolver.resolve_attribute(column_name)
internal_name = resolved_column.internal_name
except (InvalidSearchQuery, Exception):
internal_name = column_name
request_columns.append(
Column(
key=AttributeKey(
name=internal_name,
type=(
AttributeKey.Type.TYPE_STRING
if column_type == "TYPE_STRING"
else AttributeKey.Type.TYPE_DOUBLE
),
)
)
)
order_by_list = []
if sort:
# Process all sort criteria in the order they are provided
for sort_item in sort:
sort_column_name = sort_item["name"]
resolved_column, _ = resolver.resolve_attribute(sort_column_name)
sort_column_name = resolved_column.internal_name
sort_column_type = (
AttributeKey.Type.TYPE_STRING
if sort_item["type"] == "TYPE_STRING"
else AttributeKey.Type.TYPE_DOUBLE
)
order_by_list.append(
TraceItemTableRequest.OrderBy(
column=Column(
key=AttributeKey(
name=sort_column_name,
type=sort_column_type,
)
),
descending=sort_item["descending"],
)
)
else: # Default to first column if no sort is provided
column_name = columns[0]["name"]
resolved_column, _ = resolver.resolve_attribute(column_name)
sort_column_name = resolved_column.internal_name
sort_column_type = (
AttributeKey.Type.TYPE_STRING
if columns[0]["type"] == "TYPE_STRING"
else AttributeKey.Type.TYPE_DOUBLE
)
order_by_list = [
TraceItemTableRequest.OrderBy(
column=Column(
key=AttributeKey(
name=sort_column_name,
type=sort_column_type,
)
),
descending=True, # Default descending behavior
)
]
query_filter = None
if query and query.strip():
query_filter, _, _ = resolver.resolve_query(query.strip())
meta = RequestMeta(
organization_id=org_id,
project_ids=project_ids,
cogs_category="events_analytics_platform",
referrer=Referrer.SEER_RPC.value,
start_timestamp=start_time_proto,
end_timestamp=end_time_proto,
trace_item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
)
rpc_request = TraceItemTableRequest(
meta=meta,
columns=request_columns,
order_by=order_by_list,
filter=query_filter,
limit=min(max(limit, 1), 100), # Force the upper limit to 100 to avoid abuse
)
responses = table_rpc([rpc_request])
if not responses:
return {"data": [], "meta": {}}
response = responses[0]
parsed_data = _parse_spans_response(response, columns, resolver)
return {
"data": parsed_data,
"meta": {
"columns": columns,
"total_rows": len(parsed_data),
},
}
def get_github_enterprise_integration_config(
*, organization_id: int, integration_id: int
) -> dict[str, Any]:
if not settings.SEER_GHE_ENCRYPT_KEY:
logger.error("Cannot encrypt access token without SEER_GHE_ENCRYPT_KEY")
return {"success": False}
integration = integration_service.get_integration(
integration_id=integration_id,
provider=IntegrationProviderSlug.GITHUB_ENTERPRISE.value,
organization_id=organization_id,
status=ObjectStatus.ACTIVE,
)
if integration is None:
logger.error("Integration %s does not exist", integration_id)
return {"success": False}
installation = integration.get_installation(organization_id=organization_id)
assert isinstance(installation, GitHubEnterpriseIntegration)
integration = integration_service.refresh_github_access_token(
integration_id=integration.id,
organization_id=organization_id,
)
assert integration is not None, "Integration should have existed given previous checks"
access_token = integration.metadata["access_token"]
permissions = integration.metadata["permissions"]
if not access_token:
logger.error("No access token found for integration %s", integration.id)
return {"success": False}
try:
fernet = Fernet(settings.SEER_GHE_ENCRYPT_KEY.encode("utf-8"))
encrypted_access_token = fernet.encrypt(access_token.encode("utf-8")).decode("utf-8")
except Exception:
logger.exception("Failed to encrypt access token")
return {"success": False}
return {
"success": True,
"base_url": f"https://{installation.model.metadata['domain_name'].split('/')[0]}/api/v3",
"verify_ssl": installation.model.metadata["installation"]["verify_ssl"],
"encrypted_access_token": encrypted_access_token,
"permissions": permissions,
}
def send_seer_webhook(*, event_name: str, organization_id: int, payload: dict) -> dict:
"""
Send a seer webhook event for an organization.
Args:
event_name: The sub-name of seer event (e.g., "root_cause_started")
organization_id: The ID of the organization to send the webhook for
payload: The webhook payload data
Returns:
dict: Status of the webhook sending operation
"""
# Validate event_name by constructing the full event type and checking if it's valid
from sentry.sentry_apps.metrics import SentryAppEventType
event_type = f"seer.{event_name}"
try:
SentryAppEventType(event_type)
except ValueError:
logger.exception(
"seer.webhook_invalid_event_type",
extra={"event_type": event_type},
)
return {"success": False, "error": f"Invalid event type: {event_type}"}
# Handle organization lookup safely
try:
organization = Organization.objects.get(
id=organization_id, status=OrganizationStatus.ACTIVE
)
except Organization.DoesNotExist:
logger.exception(
"seer.webhook_organization_not_found_or_not_active",
extra={"organization_id": organization_id},
)
return {"success": False, "error": "Organization not found or not active"}
if not features.has("organizations:seer-webhooks", organization):
return {"success": False, "error": "Seer webhooks are not enabled for this organization"}
broadcast_webhooks_for_organization.delay(
resource_name="seer",
event_name=event_name,
organization_id=organization_id,
payload=payload,
)
return {"success": True}
def trigger_coding_agent_launch(
*,
organization_id: int,
integration_id: int,
run_id: int,
trigger_source: str = "solution",
) -> dict:
"""
Trigger a coding agent launch for an autofix run.
Args:
organization_id: The organization ID
integration_id: The coding agent integration ID
run_id: The autofix run ID
trigger_source: Either "root_cause" or "solution" (default: "solution")
Returns:
dict: {"success": bool}
"""
try:
launch_coding_agents_for_run(
organization_id=organization_id,
integration_id=integration_id,
run_id=run_id,
trigger_source=AutofixTriggerSource(trigger_source),
)
return {"success": True}
except (NotFound, PermissionDenied, ValidationError, APIException):
logger.exception(
"coding_agent.rpc_launch_error",
extra={
"organization_id": organization_id,
"integration_id": integration_id,
"run_id": run_id,
},
)
return {"success": False}
def check_repository_integrations_status(*, repository_integrations: list[dict[str, Any]]) -> dict:
"""
Check whether repository integrations exist and are active.
Args:
repository_integrations: List of dicts, each containing:
- organization_id: Organization ID (required)
- external_id: External repository ID (required)
- provider: Provider identifier (required, e.g., "github", "github_enterprise")
Supports both with and without "integrations:" prefix
Returns:
dict: {
"integration_ids": list of integration IDs (as integers) from the database,
or None if repository doesn't exist/isn't active/doesn't have an integration id
}
e.g., {"integration_ids": [123, None, 456]}
None indicates repository not found, inactive, or has unsupported SCM provider.
The integration_ids are returned so Seer can store them for future reference.
Note:
- Repositories are matched by (organization_id, provider, external_id) which has a unique constraint
- integration_id is NOT required in the request and NOT used in matching
- integration_id from the database is returned as an integer so Seer can store it for future reference
"""
if not repository_integrations:
return {"integration_ids": []}
logger.info(
"seer_rpc.check_repository_integrations_status.called",
extra={
"repository_integrations_count": len(repository_integrations),
"repository_integrations_sample": repository_integrations[:10],
},
)
q_objects = Q()
for item in repository_integrations:
# Match only by organization_id, provider, and external_id
q_objects |= Q(
organization_id=item["organization_id"],
provider=f"integrations:{item['provider']}",
external_id=item["external_id"],
) | Q(
organization_id=item["organization_id"],
provider=item["provider"],
external_id=item["external_id"],
)
existing_repos = Repository.objects.filter(
q_objects, status=ObjectStatus.ACTIVE, provider__in=SEER_SUPPORTED_SCM_PROVIDERS
).values_list("organization_id", "provider", "integration_id", "external_id")
existing_map: dict[tuple, int | None] = {}
for org_id, provider, integration_id, external_id in existing_repos:
key = (org_id, provider, external_id)
# If multiple repos match (shouldn't happen), keep the first one
if key not in existing_map:
existing_map[key] = integration_id
integration_ids = []
for item in repository_integrations:
repo_tuple_with_prefix = (
item["organization_id"],
f"integrations:{item['provider']}",
item["external_id"],
)
repo_tuple_without_prefix = (
item["organization_id"],
item["provider"],
item["external_id"],
)
found_integration_id = existing_map.get(repo_tuple_with_prefix) or existing_map.get(
repo_tuple_without_prefix
)
integration_ids.append(found_integration_id)
logger.info(
"seer_rpc.check_repository_integrations_status.completed",
extra={"integration_ids": integration_ids},
)
return {"integration_ids": integration_ids}
seer_method_registry: dict[str, Callable] = { # return type must be serialized
# Common to Seer features
"get_organization_seer_consent_by_org_name": get_organization_seer_consent_by_org_name,
"get_github_enterprise_integration_config": get_github_enterprise_integration_config,
"get_organization_project_ids": get_organization_project_ids,
"check_repository_integrations_status": check_repository_integrations_status,
#
# Autofix
"get_organization_slug": get_organization_slug,
"get_organization_autofix_consent": get_organization_autofix_consent,
"get_error_event_details": get_error_event_details,
"get_profile_details": get_profile_details,
"send_seer_webhook": send_seer_webhook,
"get_attributes_for_span": get_attributes_for_span,
"trigger_coding_agent_launch": trigger_coding_agent_launch,
#
# Bug prediction
"get_sentry_organization_ids": get_sentry_organization_ids,
"get_issues_by_function_name": by_function_name.fetch_issues,
"get_issues_related_to_exception_type": by_error_type.fetch_issues,
"get_issues_by_raw_query": by_text_query.fetch_issues,
"get_latest_issue_event": utils.get_latest_issue_event,
#
# Assisted query
"get_attribute_names": get_attribute_names,
"get_attribute_values_with_substring": get_attribute_values_with_substring,
"get_attributes_and_values": get_attributes_and_values,
"get_spans": get_spans,
"get_issue_filter_keys": get_issue_filter_keys,
"get_filter_key_values": get_filter_key_values,
"execute_issues_query": execute_issues_query,
"get_issues_stats": get_issues_stats,
"get_event_filter_keys": get_event_filter_keys,
"get_event_filter_key_values": get_event_filter_key_values,
#
# Explorer
"get_transactions_for_project": rpc_get_transactions_for_project,
"get_trace_for_transaction": rpc_get_trace_for_transaction,
"get_profiles_for_trace": rpc_get_profiles_for_trace,
"get_issues_for_transaction": rpc_get_issues_for_transaction,
"get_trace_waterfall": rpc_get_trace_waterfall,
"get_issue_and_event_details": get_issue_and_event_details,
"get_profile_flamegraph": rpc_get_profile_flamegraph,
"execute_table_query": execute_table_query,
"execute_timeseries_query": execute_timeseries_query,
"get_trace_item_attributes": get_trace_item_attributes,
"get_repository_definition": get_repository_definition,
"call_custom_tool": call_custom_tool,
"get_log_attributes_for_trace": get_log_attributes_for_trace,
"get_metric_attributes_for_trace": get_metric_attributes_for_trace,
#
# Replays
"get_replay_summary_logs": rpc_get_replay_summary_logs,
"get_replay_metadata": get_replay_metadata,
}
def generate_request_signature(url_path: str, body: bytes) -> str:
"""
Generate a signature for the request body
with the first shared secret. If there are other
shared secrets in the list they are only to be used
for verfication during key rotation.
"""
if not settings.SEER_RPC_SHARED_SECRET:
raise RpcAuthenticationSetupException("Cannot sign RPC requests without RPC_SHARED_SECRET")
signature_input = body
secret = settings.SEER_RPC_SHARED_SECRET[0]
signature = hmac.new(secret.encode("utf-8"), signature_input, hashlib.sha256).hexdigest()
return f"rpc0:{signature}"
| SentryOrganizaionIdsAndSlugs |
python | encode__django-rest-framework | tests/generic_relations/migrations/0001_initial.py | {
"start": 43,
"end": 1182
} | class ____(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.SlugField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
],
),
]
| Migration |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 9252,
"end": 9326
} | class ____(DataType):
"""Base class for nested data types."""
| NestedType |
python | marshmallow-code__marshmallow | tests/base.py | {
"start": 6175,
"end": 6257
} | class ____(UserSchema):
age = fields.Float(as_string=True)
| UserFloatStringSchema |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 41428,
"end": 42134
} | class ____:
"""classes which subclass this can act as "mixin" classes for
TypeEngine."""
__slots__ = ()
if TYPE_CHECKING:
@util.memoized_property
def _static_cache_key(
self,
) -> Union[CacheConst, Tuple[Any, ...]]: ...
@overload
def adapt(self, cls: Type[_TE], **kw: Any) -> _TE: ...
@overload
def adapt(
self, cls: Type[TypeEngineMixin], **kw: Any
) -> TypeEngine[Any]: ...
def adapt(
self, cls: Type[Union[TypeEngine[Any], TypeEngineMixin]], **kw: Any
) -> TypeEngine[Any]: ...
def dialect_impl(self, dialect: Dialect) -> TypeEngine[Any]: ...
| TypeEngineMixin |
python | astropy__astropy | astropy/table/serialize.py | {
"start": 4425,
"end": 15979
} | class ____(dict):
"""Subclass of dict used to serialize mixin columns.
It is used in the representation to contain the name and possible
other info for a mixin column or attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
"""
info = SerializedColumnInfo()
@property
def shape(self):
"""Minimal shape implementation to allow use as a mixin column.
Returns the shape of the first item that has a shape at all,
or ``()`` if none of the values has a shape attribute.
"""
return next(
(value.shape for value in self.values() if hasattr(value, "shape")), ()
)
def __repr__(self):
"""Representation of SerializedColumn
Examples
--------
>>> from astropy.table.serialize import SerializedColumn
>>> SerializedColumn({"a": 1})
SerializedColumn({'a': 1})
"""
return f"{self.__class__.__name__}({super().__repr__()})"
def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial in (
("unit", lambda x: x is not None and x != ""),
("format", lambda x: x is not None),
("description", lambda x: x is not None),
("meta", lambda x: x),
):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
# Find column attributes that have the same length as the column itself.
# These will be stored in the table as new columns (aka "data attributes").
# Examples include SkyCoord.ra (what is typically considered the data and is
# always an array) and Skycoord.obs_time (which can be a scalar or an
# array).
data_attrs = [
key
for key, value in obj_attrs.items()
if getattr(value, "shape", ())[:1] == col.shape[:1]
]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data for
# MaskedColumn). For primary data, we attempt to store any info on
# the format, etc., on the column, but not for ancillary data (e.g.,
# no sense to use a float format for a mask).
is_primary = data_attr == col.info._represent_as_dict_primary_data
if is_primary:
new_name = name
new_info = info
else:
new_name = name + "." + data_attr
new_info = {}
if not has_info_class(data, MixinInfo):
col_cls = (
MaskedColumn
if (
hasattr(data, "mask")
and np.any(data.mask != np.zeros((), data.mask.dtype))
)
else Column
)
data = col_cls(data, name=new_name, **new_info)
if is_primary:
# Don't store info in the __serialized_columns__ dict for this column
# since this is redundant with info stored on the new column.
info = {}
# Recurse. If this is anything that needs further serialization (i.e.,
# a Mixin column, a structured Column, a MaskedColumn for which mask is
# stored, etc.), it will define obj_attrs[new_name]. Otherwise, it will
# just add to new_cols and all we have to do is to link to the new name.
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(
obj_attrs.pop(new_name, {"name": new_name})
)
# Strip out from info any attributes defined by the parent,
# and store whatever remains.
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs["__info__"] = info
# Store the fully qualified class name
if not isinstance(col, SerializedColumn):
obj_attrs.setdefault("__class__", col.__module__ + "." + col.__class__.__name__)
mixin_cols[name] = obj_attrs
def represent_mixins_as_columns(tbl, exclude_classes=()):
"""Represent input Table ``tbl`` using only `~astropy.table.Column`
or `~astropy.table.MaskedColumn` objects.
This function represents any mixin columns like `~astropy.time.Time` in
``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns
a new Table. A single mixin column may be split into multiple column
components as needed for fully representing the column. This includes the
possibility of recursive splitting, as shown in the example below. The
new column names are formed as ``<column_name>.<component>``, e.g.
``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``.
In addition to splitting columns, this function updates the table ``meta``
dictionary to include a dict named ``__serialized_columns__`` which provides
additional information needed to construct the original mixin columns from
the split columns.
This function is used by astropy I/O when writing tables to ECSV, FITS,
HDF5 formats.
Note that if the table does not include any mixin columns then the original
table is returned with no update to ``meta``.
Parameters
----------
tbl : `~astropy.table.Table` or subclass
Table to represent mixins as Columns
exclude_classes : tuple of class
Exclude any mixin columns which are instannces of any classes in the tuple
Returns
-------
tbl : `~astropy.table.Table`
New Table with updated columns, or else the original input ``tbl``
Examples
--------
>>> from astropy.table import Table, represent_mixins_as_columns
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord
>>> x = [100.0, 200.0]
>>> obstime = Time([1999.0, 2000.0], format='jyear')
>>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime)
>>> tbl = Table([sc, x], names=['sc', 'x'])
>>> represent_mixins_as_columns(tbl)
<Table length=2>
sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x
deg deg
float64 float64 float64 float64 float64
------- ------- -------------- -------------- -------
1.0 3.0 2451180.0 -0.25 100.0
2.0 4.0 2451545.0 0.0 200.0
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(
col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes
)
# If no metadata was created then just return the original table.
if mixin_cols:
meta = deepcopy(tbl.meta)
meta["__serialized_columns__"] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
else:
out = tbl
for col in out.itercols():
if not isinstance(col, Column) and col.__class__ not in exclude_classes:
# This catches columns for which info has not been set up right and
# therefore were not converted. See the corresponding test in
# test_mixin.py for an example.
raise TypeError(
"failed to represent column "
f"{col.info.name!r} ({col.__class__.__name__}) as one "
"or more Column subclasses. This looks like a mixin class "
"that does not have the correct _represent_as_dict() method "
"in the class `info` attribute."
)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
cls_full_name = obj_attrs.pop("__class__", None)
if cls_full_name is None:
# We're dealing with a SerializedColumn holding columns, stored in
# obj_attrs. For this case, info holds the name (and nothing else).
mixin = SerializedColumn(obj_attrs)
mixin.info.name = info["name"]
return mixin
# We translate locally created skyoffset frames and treat all
# built-in frames as known.
if cls_full_name.startswith("abc.SkyOffset"):
cls_full_name = "astropy.coordinates.SkyOffsetFrame"
elif (
cls_full_name not in __construct_mixin_classes
and not cls_full_name.startswith("astropy.coordinates.builtin_frames")
):
raise ValueError(f"unsupported class for construct {cls_full_name}")
mod_name, _, cls_name = cls_full_name.rpartition(".")
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
| SerializedColumn |
python | huggingface__transformers | tests/models/chinese_clip/test_modeling_chinese_clip.py | {
"start": 8100,
"end": 11198
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return ChineseCLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = ChineseCLIPVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| ChineseCLIPVisionModelTester |
python | django__django | tests/test_runner/tests.py | {
"start": 28955,
"end": 32580
} | class ____(unittest.TestCase):
def setUp(self):
self.runner_instance = DiscoverRunner(verbosity=0)
def test_setup_aliased_databases(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
"other": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
}
)
with mock.patch(
"django.db.backends.dummy.base.DatabaseWrapper.creation_class"
) as mocked_db_creation:
with mock.patch("django.test.utils.connections", new=tested_connections):
old_config = self.runner_instance.setup_databases()
self.runner_instance.teardown_databases(old_config)
mocked_db_creation.return_value.destroy_test_db.assert_called_once_with(
"dbname", 0, False
)
def test_setup_test_database_aliases(self):
"""
The default database must be the first because data migrations
use the default alias by default.
"""
tested_connections = db.ConnectionHandler(
{
"other": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
"default": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
}
)
with mock.patch("django.test.utils.connections", new=tested_connections):
test_databases, _ = get_unique_databases_and_mirrors()
self.assertEqual(
test_databases,
{
("", "", "django.db.backends.dummy", "test_dbname"): (
"dbname",
["default", "other"],
),
},
)
def test_destroy_test_db_restores_db_name(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"],
"NAME": "xxx_test_database",
},
}
)
# Using the real current name as old_name to not mess with the test
# suite.
old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"]
with mock.patch("django.db.connections", new=tested_connections):
tested_connections["default"].creation.destroy_test_db(
old_name, verbosity=0, keepdb=True
)
self.assertEqual(
tested_connections["default"].settings_dict["NAME"], old_name
)
def test_serialization(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.dummy",
},
}
)
with mock.patch(
"django.db.backends.dummy.base.DatabaseWrapper.creation_class"
) as mocked_db_creation:
with mock.patch("django.test.utils.connections", new=tested_connections):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, keepdb=False
)
mocked_db_creation.return_value.serialize_db_to_string.assert_called_once_with()
@skipUnlessDBFeature("supports_sequence_reset")
| SetupDatabasesTests |
python | scrapy__scrapy | tests/test_utils_python.py | {
"start": 1904,
"end": 2585
} | class ____:
def test_converting_an_utf8_encoded_string_to_unicode(self):
assert to_unicode(b"lel\xc3\xb1e") == "lel\xf1e"
def test_converting_a_latin_1_encoded_string_to_unicode(self):
assert to_unicode(b"lel\xf1e", "latin-1") == "lel\xf1e"
def test_converting_a_unicode_to_unicode_should_return_the_same_object(self):
assert to_unicode("\xf1e\xf1e\xf1e") == "\xf1e\xf1e\xf1e"
def test_converting_a_strange_object_should_raise_type_error(self):
with pytest.raises(TypeError):
to_unicode(423)
def test_errors_argument(self):
assert to_unicode(b"a\xedb", "utf-8", errors="replace") == "a\ufffdb"
| TestToUnicode |
python | django-import-export__django-import-export | tests/core/migrations/0014_bookwithchapternumbers.py | {
"start": 458,
"end": 1520
} | class ____(migrations.Migration):
dependencies = [
("core", "0013_alter_author_birthday"),
]
operations = []
pg_only_operations = [
migrations.CreateModel(
name="BookWithChapterNumbers",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100, verbose_name="Book name")),
("chapter_numbers", chapter_numbers_field),
],
),
]
def apply(self, project_state, schema_editor, collect_sql=False):
if can_use_postgres_fields and schema_editor.connection.vendor.startswith(
"postgres"
):
self.operations = self.operations + self.pg_only_operations
return super().apply(project_state, schema_editor, collect_sql)
| Migration |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_v1_in_v2_test.py | {
"start": 2634,
"end": 36711
} | class ____(test.TestCase, parameterized.TestCase):
def _v1_single_metagraph_saved_model(self, use_resource):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=None, dtype=dtypes.float32, name="start"
)
if use_resource:
distractor = ref_variable.RefVariable(-1.0, name="distractor")
v = resource_variable_ops.ResourceVariable(3.0, name="v")
else:
# "distractor" gets saved in the checkpoint and so used in the restore
# function, but not in the pruned function for the signature. This tests
# node naming: it needs to be consistent (and ideally always the same as
# the node in the original GraphDef) for the resource manager to find
# the right variable.
distractor = ref_variable.RefVariable(-1.0, name="distractor")
v = ref_variable.RefVariable(3.0, name="v")
local_variable = variable_v1.VariableV1(
1.0,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True,
)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run(
[v.initializer, distractor.initializer, local_variable.initializer]
)
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer,
)
return path
@test_util.run_in_graph_and_eager_modes
def test_pretty_printed_signature(self):
imported = load.load(
self._v1_single_metagraph_saved_model(use_resource=True)
)
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
concrete_fn = imported.signatures["serving_default"]
summary = (
"(start: TensorSpec(shape=<unknown>, dtype=tf.float32,"
" name='start')) -> Dict[['output', TensorSpec(shape=<unknown>,"
" dtype=tf.float32, name=None)]]"
)
details = (
r"Input Parameters:\n"
r" start \(POSITIONAL_OR_KEYWORD\): TensorSpec\(shape=<unknown>,"
r" dtype=tf\.float32, name='start'\)\n"
r"Output Type:\n"
r" Dict\[\['output', TensorSpec\(shape=<unknown>,"
r" dtype=tf\.float32, name=None\)\]\]\n"
r"Captures:\n"
r" \d+: TensorSpec\(shape=\(\), dtype=tf\.resource, name=None\)\n"
r" \d+: TensorSpec\(shape=\(\), dtype=tf\.resource, name=None\)"
)
self.assertEqual(
concrete_fn.pretty_printed_signature(verbose=False), summary
)
self.assertRegex(
concrete_fn.pretty_printed_signature(verbose=True), details
)
self.assertRegex(repr(concrete_fn), r"<ConcreteFunction .* at .*")
self.assertRegex(str(concrete_fn), r"ConcreteFunction " + details)
@test_util.run_in_graph_and_eager_modes
def test_resource_variable_import(self):
imported = load.load(
self._v1_single_metagraph_saved_model(use_resource=True)
)
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
fn = imported.signatures["serving_default"]
self.assertEqual(
{"output": 6.0}, self.evaluate(fn(constant_op.constant(2.0)))
)
self.assertAllEqual([3.0, 1.0], self.evaluate(imported.variables))
self.evaluate(imported.variables[0].assign(4.0))
self.assertEqual(
{"output": 8.0}, self.evaluate(fn(start=constant_op.constant(2.0)))
)
self.evaluate(imported.variables[1].assign(2.0))
self.assertEqual(
{"output": 24.0}, self.evaluate(fn(start=constant_op.constant(3.0)))
)
self.assertTrue(imported.variables[0].trainable)
self.assertFalse(imported.variables[1].trainable)
with backprop.GradientTape() as tape:
output = fn(start=constant_op.constant(4.0))
self.assertEqual(imported.variables[:1], list(tape.watched_variables()))
self.assertEqual(
8.0, self.evaluate(tape.gradient(output, imported.variables[0]))
)
@test_util.run_in_graph_and_eager_modes
def test_ref_variable_import(self):
saved = self._v1_single_metagraph_saved_model(use_resource=False)
imported = load.load(saved)
fn = imported.signatures["serving_default"]
self.evaluate(lookup_ops.tables_initializer())
self.evaluate(ops.get_collection("saved_model_initializers"))
self.assertEqual(
6.0, self.evaluate(fn(start=constant_op.constant(2.0))["output"])
)
def _v1_output_shape_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start"
)
output = array_ops.identity(start, name="output")
output.set_shape([1]) # Ok to use [1] because shape is only informational
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
builder = builder_impl.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={
"serving_default": signature_def_utils.build_signature_def(
{"start": utils_impl.build_tensor_info(start)},
{"output": utils_impl.build_tensor_info(output)},
)
},
)
builder.save()
return path
def test_restore_output_shapes(self):
saved = self._v1_output_shape_saved_model()
imported = load.load(saved)
fn = imported.signatures["serving_default"]
self.assertEqual(tensor_shape.TensorShape([1]), fn.outputs[0].shape)
def _v1_multi_metagraph_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start"
)
v = resource_variable_ops.ResourceVariable(21.0)
first_output = array_ops.identity(start * v, name="first_output")
second_output = array_ops.identity(v, name="second_output")
with session_lib.Session() as session:
session.run(v.initializer)
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
builder = builder_impl.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
session,
tags=["first"],
signature_def_map={
"first_key": signature_def_utils.build_signature_def(
{"first_start": utils_impl.build_tensor_info(start)},
{
"first_output": utils_impl.build_tensor_info(
first_output
)
},
)
},
)
builder.add_meta_graph(
tags=["second"],
signature_def_map={
"second_key": signature_def_utils.build_signature_def(
{"second_start": utils_impl.build_tensor_info(start)},
{
"second_output": utils_impl.build_tensor_info(
second_output
)
},
)
},
)
builder.save()
return path
def test_multi_meta_graph_loading(self):
with self.assertRaisesRegex(ValueError, "2 MetaGraphs"):
load.load(self._v1_multi_metagraph_saved_model())
first_imported = load.load(
self._v1_multi_metagraph_saved_model(), tags=["first"]
)
self.assertEqual(
{"first_output": 42.0},
self.evaluate(
first_imported.signatures["first_key"](
first_start=constant_op.constant(2.0)
)
),
)
second_imported = load.load(
self._v1_multi_metagraph_saved_model(), tags=set(["second"])
)
with self.assertRaisesRegex(TypeError, "second_start"):
second_imported.signatures["second_key"](x=constant_op.constant(2.0))
with self.assertRaisesRegex(TypeError, "second_start"):
second_imported.signatures["second_key"](
second_start=constant_op.constant(2.0), x=constant_op.constant(2.0)
)
self.assertEqual(
{"second_output": 21.0},
self.evaluate(
second_imported.signatures["second_key"](
second_start=constant_op.constant(2.0)
)
),
)
def _v1_asset_saved_model(self, clear_shared_name):
export_graph = ops.Graph()
vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
with export_graph.as_default():
initializer = lookup_ops.TextFileInitializer(
vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
table = lookup_ops.HashTable(initializer, default_value=-1)
start = array_ops.placeholder(shape=None, dtype=dtypes.string, name="in")
output = table.lookup(start, name="out")
if clear_shared_name:
export_graph.get_operation_by_name("hash_table")._clear_attr(
"shared_name"
)
with session_lib.Session() as session:
session.run([table.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=table.initializer,
)
file_io.delete_file(vocab_path)
return path
@test_util.run_in_graph_and_eager_modes
def test_asset_loading(self):
first_path = self._v1_asset_saved_model(clear_shared_name=False)
imported = load.load(first_path)
self.evaluate(lookup_ops.tables_initializer())
fn = imported.signatures["serving_default"]
self.assertAllClose(
{"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))
)
second_path = os.path.join(
self.get_temp_dir(), "saved_model", str(ops.uid())
)
save.save(imported, second_path, signatures=imported.signatures)
shutil.rmtree(first_path)
del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
second_import = load.load(second_path)
self.evaluate(lookup_ops.tables_initializer())
fn = second_import.signatures["serving_default"]
self.assertAllClose(
{"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))
)
third_path = os.path.join(
self.get_temp_dir(), "saved_model", str(ops.uid())
)
save.save(second_import, third_path, signatures=second_import.signatures)
shutil.rmtree(second_path)
del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
third_import = load.load(third_path)
self.evaluate(lookup_ops.tables_initializer())
fn = third_import.signatures["serving_default"]
self.assertAllClose(
{"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))
)
@test_util.run_in_graph_and_eager_modes
def test_node_name_sharing(self):
fourth_path = self._v1_asset_saved_model(clear_shared_name=True)
fourth_import = load.load(fourth_path)
self.evaluate(lookup_ops.tables_initializer())
fn = fourth_import.signatures["serving_default"]
self.assertAllClose(
{"output": [2, 0]}, fn(start=constant_op.constant(["gamma", "alpha"]))
)
def _v1_cond_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
branch_selector = array_ops.placeholder(
name="branch_selector", shape=[], dtype=dtypes.bool
)
output = cond.cond(
branch_selector,
lambda: array_ops.ones([]),
lambda: array_ops.zeros([]),
)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"branch_selector": branch_selector},
outputs={"output": output},
)
return path
def test_cond(self):
first_path = self._v1_cond_saved_model()
imported = load.load(first_path)
function = imported.signatures["serving_default"]
self.assertAllClose({"output": 1.0}, function(constant_op.constant(True)))
self.assertAllClose({"output": 0.0}, function(constant_op.constant(False)))
def _v1_while_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
loop_iterations = array_ops.placeholder(
name="loop_iterations", shape=[], dtype=dtypes.int32
)
_, output = while_loop.while_loop(
lambda index, accum: index <= loop_iterations,
lambda index, accum: (index + 1, accum + index),
[constant_op.constant(0),
constant_op.constant(0)],
)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"loop_iterations": loop_iterations},
outputs={"output": output},
)
return path
def test_while(self):
first_path = self._v1_while_saved_model()
imported = load.load(first_path)
function = imported.signatures["serving_default"]
self.assertAllClose({"output": 10}, function(constant_op.constant(4)))
self.assertAllClose({"output": 15}, function(constant_op.constant(5)))
def _v1_nested_while_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
def _inner_while(loop_iterations):
_, output = while_loop.while_loop(
lambda index, accum: index <= loop_iterations,
lambda index, accum: (index + 1, accum + index),
[constant_op.constant(0),
constant_op.constant(0)],
)
return output
loop_iterations = array_ops.placeholder(
name="loop_iterations", shape=[], dtype=dtypes.int32
)
_, output = while_loop.while_loop(
lambda index, accum: index <= loop_iterations,
lambda index, accum: (index + 1, accum + _inner_while(index)),
[constant_op.constant(0),
constant_op.constant(0)],
)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"loop_iterations": loop_iterations},
outputs={"output": output},
)
return path
def test_nested_while(self):
first_path = self._v1_nested_while_saved_model()
imported = load.load(first_path)
function = imported.signatures["serving_default"]
self.assertAllClose({"output": 20}, function(constant_op.constant(4)))
self.assertAllClose({"output": 35}, function(constant_op.constant(5)))
def _no_signatures_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
inp = array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32)
array_ops.identity(inp + 1.0, name="out")
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={},
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
)
b.save()
return path
def test_no_signature(self):
path = self._no_signatures_model()
imported = load.load(path)
self.assertEqual([], list(imported.signatures.keys()))
def _signature_with_no_inputs(self):
export_graph = ops.Graph()
with export_graph.as_default():
array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32)
output = random_ops.random_normal([2])
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={
"key": signature_def_utils.build_signature_def(
{}, dict(value=utils_impl.build_tensor_info(output))
)
},
)
b.save()
return path
def test_signature_with_no_inputs(self):
path = self._signature_with_no_inputs()
imported = load.load(path)
self.assertEqual([2], imported.signatures["key"]()["value"].shape)
def test_version_info(self):
path = self._signature_with_no_inputs()
imported = load.load(path)
self.assertEqual(versions.__version__, imported.tensorflow_version)
self.assertEqual(versions.__git_version__, imported.tensorflow_git_version)
def _unfed_placeholder_signature(self):
export_graph = ops.Graph()
with export_graph.as_default():
x = array_ops.placeholder(name="x", shape=[], dtype=dtypes.float32)
output = x * random_ops.random_normal([2])
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={
"key": signature_def_utils.build_signature_def(
{}, dict(value=utils_impl.build_tensor_info(output))
)
},
)
b.save()
return path
def test_unfed_placeholder_exception(self):
path = self._unfed_placeholder_signature()
with self.assertRaisesRegex(
lift_to_graph.UnliftableError,
"signature needs an input for each placeholder.*\n\nUnable to lift",
):
load.load(path)
def test_custom_pruning(self):
path = self._no_signatures_model()
root = load.load(path)
fn = root.prune("x:0", "out:0")
self.assertEqual(2.0, self.evaluate(fn(x=array_ops.ones([]))))
root.graph.as_graph_element("x:0")
def _no_trainable_variable_attribute(self, trainable):
"""A SavedModel where the VariableDef has no 'trainable' (it's false)."""
class _MissingFieldsVariable(resource_variable_ops.ResourceVariable):
def to_proto(self, export_scope=None):
full_proto = super(_MissingFieldsVariable, self).to_proto(export_scope)
return variable_pb2.VariableDef(
variable_name=full_proto.variable_name,
initial_value_name=full_proto.initial_value_name,
initializer_name=full_proto.snapshot_name,
save_slice_info_def=full_proto.save_slice_info_def,
is_resource=full_proto.is_resource,
)
export_graph = ops.Graph()
with export_graph.as_default():
v = _MissingFieldsVariable(3.0, trainable=trainable)
with session_lib.Session() as session:
session.run([v.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session, tags=[tag_constants.SERVING], signature_def_map={}
)
b.save()
return path
def test_trainable_not_set_in_proto(self):
"""If a VariableDef has no 'trainable', we fall back to collections."""
real_tf_version = versions.__version__
# Pretend to be exported from an older version of TensorFlow, so trainable
# will follow collections instead of checking VariableDefs.
versions.__version__ = "1.7.0"
path = self._no_trainable_variable_attribute(trainable=True)
root = load.load(path)
self.assertTrue(root.variables[0].trainable)
path = self._no_trainable_variable_attribute(trainable=False)
root = load.load(path)
self.assertFalse(root.variables[0].trainable)
versions.__version__ = real_tf_version
def _export_variable(self, **kwargs_for_variable):
"""A 1.x SavedModel with a single variable."""
export_graph = ops.Graph()
with export_graph.as_default():
v = resource_variable_ops.ResourceVariable(3.0, **kwargs_for_variable)
with session_lib.Session() as session:
session.run([v.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session, tags=[tag_constants.SERVING], signature_def_map={}
)
b.save()
return path
def test_trainable_in_proto(self):
"""If a VariableDef has a trainable property, we do not use collections."""
path = self._export_variable(
trainable=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES]
)
root = load.load(path)
self.assertTrue(root.variables[0].trainable)
path = self._export_variable(
trainable=False,
collections=[
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
],
)
root = load.load(path)
self.assertFalse(root.variables[0].trainable)
def _model_with_sparse_output(self):
"""Generate a graph with a SparseTensor output and serialize in V1 format"""
export_graph = ops.Graph()
with export_graph.as_default():
in_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[1])
out_sparse_tensor = (
sparse_tensor.SparseTensor(
indices=[[0]], values=in_placeholder, dense_shape=[1]
)
* 2
)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": in_placeholder},
outputs={"output": out_sparse_tensor},
)
return path
def test_load_sparse_outputs(self):
path = self._model_with_sparse_output()
imported = load.load(path)
imported_fn = imported.signatures["serving_default"]
forty_two = constant_op.constant([42], dtype=dtypes.int64)
self.assertEqual([84], imported_fn(forty_two)["output"].values.numpy())
def _model_with_sparse_input(self):
"""Generate a graph with a SparseTensor input and serialize in V1 format."""
export_graph = ops.Graph()
with export_graph.as_default():
in_sparse_placeholder = array_ops.sparse_placeholder(
dtype=dtypes.int64, shape=[2, 2]
)
out_sparse_tensor = (
sparse_tensor.SparseTensor(
indices=in_sparse_placeholder.indices,
values=in_sparse_placeholder.values,
dense_shape=in_sparse_placeholder.dense_shape,
)
* 2
)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": in_sparse_placeholder},
outputs={"output": out_sparse_tensor},
)
return path
def test_load_sparse_inputs(self):
path = self._model_with_sparse_input()
imported = load.load(path)
imported_fn = imported.signatures["serving_default"]
indices = constant_op.constant([[0, 0], [0, 1], [1, 1]], dtype=dtypes.int64)
values = constant_op.constant([42, 43, 44], dtype=dtypes.int64)
dense_shape = constant_op.constant([2, 2], dtype=dtypes.int64)
result = imported_fn(
start_indices=indices,
start_values=values,
start_dense_shape=dense_shape,
)
self.assertAllEqual([84, 86, 88], result["output"].values.numpy())
def _model_with_ragged_input(self):
"""Generate a graph with a RaggedTensor input and serialize in V1 format."""
export_graph = ops.Graph()
with export_graph.as_default():
x = ragged_factory_ops.placeholder(dtypes.float32, 1, [])
y = x * 2
with session_lib.Session() as sess:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(sess, path, inputs={"x": x}, outputs={"y": y})
return path
def test_load_ragged_inputs(self):
path = self._model_with_ragged_input()
imported = load.load(path)
imported_fn = imported.signatures["serving_default"]
x = ragged_factory_ops.constant([[10.0, 20.0], [30.0]])
result = imported_fn(x_component_0=x.values, x_component_1=x.row_splits)
self.assertAllEqual(result["y"], [[20.0, 40.0], [60.0]])
def _model_with_defun(self):
"""Generate a graph with a Defun and serialize in V1 format."""
export_graph = ops.Graph()
with export_graph.as_default():
@framework_function.Defun(dtypes.int64)
def z(x):
return x + 1
@framework_function.Defun(dtypes.int64)
def g(x):
return z(x) + 1
@framework_function.Defun(dtypes.int64)
def f(x):
return g(x) + 1
in_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[1])
out = f(in_placeholder)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": in_placeholder},
outputs={"output": out},
)
return path
def test_load_defun(self):
path = self._model_with_defun()
imported = load.load(path)
imported_fn = imported.signatures["serving_default"]
forty_two = constant_op.constant([42], dtype=dtypes.int64)
self.assertEqual([45], imported_fn(forty_two)["output"].numpy())
def test_load_and_restore_partitioned_variables(self):
export_graph = ops.Graph()
with export_graph.as_default():
partitioned_var = variable_scope.get_variable(
"a",
shape=[6],
initializer=init_ops.constant_initializer(13),
partitioner=partitioned_variables.fixed_size_partitioner(2),
use_resource=True,
)
x = array_ops.placeholder(shape=[], dtype=dtypes.float32)
y = x * partitioned_var
with session_lib.Session() as session:
session.run(variables.global_variables_initializer())
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session, path, inputs={"x": x}, outputs={"y": y}
)
# Create a name-based checkpoint with different values.
session.run(partitioned_var.assign([[5, 4, 3], [2, 1, 0]]))
ckpt_path = os.path.join(self.get_temp_dir(), "restore_ckpt")
saver.Saver().save(session, ckpt_path)
imported = load.load(path)
self.assertAllClose(
self.evaluate(imported.variables), [[13, 13, 13], [13, 13, 13]]
)
self.evaluate(imported.restore(ckpt_path))
self.assertAllClose(
self.evaluate(imported.variables), [[5, 4, 3], [2, 1, 0]]
)
self.assertAllClose(
self.evaluate(
imported.signatures["serving_default"](constant_op.constant(2.0))
),
{"y": [10, 8, 6, 4, 2, 0]},
)
def test_structured_input_signature(self):
path = self._v1_single_metagraph_saved_model(False)
imported = load.load(path)
args, kwargs = imported.signatures[
"serving_default"
].structured_input_signature
self.assertEqual(args, ())
self.assertAllEqual(
kwargs, {"start": tensor_spec.TensorSpec(shape=None, name="start")}
)
def _model_with_multiple_inputs(self, input_names, compute_fn, var_value):
export_graph = ops.Graph()
with export_graph.as_default():
inputs = tuple(
array_ops.placeholder(shape=(), dtype=dtypes.float32, name=name)
for name in input_names
)
v = resource_variable_ops.ResourceVariable(var_value)
output = array_ops.identity(compute_fn(inputs, v), name="output")
with session_lib.Session() as session:
session.run(v.initializer)
path = os.path.join(
self.get_temp_dir(), "tf1_saved_model", str(ops.uid())
)
builder = builder_impl.SavedModelBuilder(path)
feeds = {
name: utils_impl.build_tensor_info(input)
for name, input in zip(input_names, inputs)
}
builder.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={
"serving_default": signature_def_utils.build_signature_def(
feeds,
{"output": utils_impl.build_tensor_info(output)},
)
},
)
builder.save()
return path
@parameterized.named_parameters(
(f"_{input_names_idx}_{reverse}", input_names, reverse) # pylint: disable=g-complex-comprehension
for reverse in (False, True)
for input_names_idx, input_names in enumerate((
("input1", "input2"),
("input1", "input./-"),
))
)
def test_multiple_inputs(self, input_names, reverse):
if reverse:
input_names = tuple(reversed(input_names))
def compute_fn(ls, a):
result = a
for x in ls:
result = (result + x) * a
return result
var_value = 21.0
path = self._model_with_multiple_inputs(
input_names, compute_fn=compute_fn, var_value=21.0
)
imported = load.load(path)
sorted_with_idx = sorted(
zip(range(len(input_names)), input_names), key=lambda x: x[1]
)
fn = imported.signatures["serving_default"]
for i, (_, input_name) in enumerate(sorted_with_idx):
self.assertEqual(fn.inputs[i].name, f"{input_name}:0")
inputs = tuple(i + 2.0 for i in range(len(input_names)))
expected_output = compute_fn(inputs, var_value)
# Call `fn`` with keyword arguments
self.assertEqual(
self.evaluate(
fn(**{
name: constant_op.constant(v)
for name, v in zip(input_names, inputs)
})["output"]
),
expected_output,
)
# Call `fn`` with positional arguments
self.assertEqual(
self.evaluate(fn(*(inputs[i] for i, _ in sorted_with_idx))["output"]),
expected_output,
)
# Test saving the model again in TF2
path2 = os.path.join(self.get_temp_dir(), "tf2_saved_model", str(ops.uid()))
save.save(imported, path2, imported.signatures)
imported2 = load.load(path2)
fn = imported2.signatures["serving_default"]
# Call `fn`` with keyword arguments
self.assertEqual(
self.evaluate(
fn(**{
name: constant_op.constant(v)
for name, v in zip(input_names, inputs)
})["output"]
),
expected_output,
)
# `fn` can no longer be called with positional arguments, because
# during TF2 saving, those positional-keyword-hybrid arguments are
# converted to keyword-only arguments.
def _v1_multi_input_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
input1 = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="input1"
)
input2 = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="input2"
)
v = resource_variable_ops.ResourceVariable(21.0)
output = array_ops.identity(input1 * v + input2, name="output")
with session_lib.Session() as session:
session.run(v.initializer)
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
builder = builder_impl.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={
"serving_default": signature_def_utils.build_signature_def(
{
"input1": utils_impl.build_tensor_info(input1),
"input2": utils_impl.build_tensor_info(input2),
},
{"output": utils_impl.build_tensor_info(output)},
)
},
)
builder.save()
return path
def test_v1_input_ordered(self):
path = self._v1_multi_input_saved_model()
imported = load.load(path)
self.assertEqual(
imported.signatures["serving_default"].inputs[0].name, "input1:0"
)
self.assertEqual(
imported.signatures["serving_default"].inputs[1].name, "input2:0"
)
def test_resave_signature(self):
# Tests that signatures saved using TF1 can be resaved with TF2.
# See b/211666001 for context.
export_graph = ops.Graph()
with export_graph.as_default():
a = array_ops.placeholder(
shape=[None, 1], dtype=dtypes.float32, name="input_2"
)
b = array_ops.placeholder(
shape=[None, 2], dtype=dtypes.float32, name="input_1"
)
c = array_ops.identity(a)
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session, path, inputs={"a": a, "b": b}, outputs={"c": c}
)
imported = load.load(path)
path2 = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
save.save(imported, path2, imported.signatures)
imported2 = load.load(path2)
self.assertEqual(
imported2.signatures["serving_default"](
a=constant_op.constant([5.0]), b=constant_op.constant([1.0, 3.0])
)["c"].numpy(),
5.0,
)
if __name__ == "__main__":
test.main()
| LoadTest |
python | optuna__optuna | optuna/pruners/_nop.py | {
"start": 206,
"end": 1626
} | class ____(BasePruner):
"""Pruner which never prunes trials.
Example:
.. testcode::
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
import optuna
X, y = load_iris(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
classes = np.unique(y)
def objective(trial):
alpha = trial.suggest_float("alpha", 0.0, 1.0)
clf = SGDClassifier(alpha=alpha)
n_train_iter = 100
for step in range(n_train_iter):
clf.partial_fit(X_train, y_train, classes=classes)
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step)
if trial.should_prune():
assert False, "should_prune() should always return False with this pruner."
raise optuna.TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction="maximize", pruner=optuna.pruners.NopPruner())
study.optimize(objective, n_trials=20)
"""
def prune(self, study: Study, trial: FrozenTrial) -> bool:
return False
| NopPruner |
python | huggingface__transformers | tests/models/mistral3/test_modeling_mistral3.py | {
"start": 7979,
"end": 18224
} | class ____(unittest.TestCase):
@require_read_token
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.model_checkpoint = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
self.model = Mistral3ForConditionalGeneration.from_pretrained(self.model_checkpoint, dtype=torch.bfloat16)
accelerate.cpu_offload(self.model, execution_device=torch_device)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_read_token
def test_mistral3_integration_generate_text_only(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"')
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Write a haiku"},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.bfloat16)
with torch.no_grad():
generate_ids = self.model.generate(**inputs, max_new_tokens=200, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): "Sure, here is a haiku for you:\n\nWhispers of the breeze,\nCherry blossoms softly fall,\nSpring's gentle embrace.",
("cuda", 8): "Sure, here is a haiku for you:\n\nWhispers of the breeze,\nCherry blossoms softly fall,\nSpring's gentle embrace.",
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
@require_read_token
@require_deterministic_for_xpu
def test_mistral3_integration_generate(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"')
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "Describe this image"},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.bfloat16)
with torch.no_grad():
generate_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): "The image features two tabby cats lying on a pink surface, which appears to be a cushion or",
("cuda", 8): 'The image features two cats lying on a pink surface, which appears to be a couch or a bed',
("rocm", (9, 4)): "The image features two cats lying on a pink surface, which appears to be a couch or a bed",
("rocm", (9, 5)): "The image features two tabby cats lying on a pink surface, which appears to be a cushion or"
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
@require_read_token
@require_deterministic_for_xpu
def test_mistral3_integration_batched_generate(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"')
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/ydshieh/mistral3-test-data/resolve/main/view.jpg",
},
{"type": "text", "text": "Write a haiku for this image"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Describe this image"},
],
},
],
]
inputs = processor.apply_chat_template(
messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.bfloat16)
output = self.model.generate(**inputs, do_sample=False, max_new_tokens=25)
gen_tokens = output[:, inputs["input_ids"].shape[1] :]
# Check first output
decoded_output = processor.decode(gen_tokens[0], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "Calm lake's mirror gleams,\nWhispering pines stand in silence,\nPath to peace begins.",
("cuda", 8): "Wooden path to calm,\nReflections whisper secrets,\nNature's peace unfolds.",
("rocm", (9, 5)): "Calm waters reflect\nWooden path to distant shore\nSilence in the scene"
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(gen_tokens[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "The image depicts a vibrant urban scene in what appears to be Chinatown. The focal point is a traditional Chinese archway",
("cuda", 8): 'The image depicts a street scene in what appears to be a Chinatown district. The focal point is a traditional Chinese arch',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@require_read_token
@require_deterministic_for_xpu
def test_mistral3_integration_batched_generate_multi_image(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"')
# Prepare inputs
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/ydshieh/mistral3-test-data/resolve/main/view.jpg",
},
{"type": "text", "text": "Write a haiku for this image"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/ydshieh/mistral3-test-data/resolve/main/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
{
"type": "image",
"url": "https://huggingface.co/ydshieh/mistral3-test-data/resolve/main/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
},
{
"type": "text",
"text": "These images depict two different landmarks. Can you identify them?",
},
],
},
],
]
inputs = processor.apply_chat_template(
messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.bfloat16)
output = self.model.generate(**inputs, do_sample=False, max_new_tokens=25)
gen_tokens = output[:, inputs["input_ids"].shape[1] :]
# Check first output
decoded_output = processor.decode(gen_tokens[0], skip_special_tokens=True)
expected_outputs = Expectations(
{
("cuda", 8): 'Calm waters reflect\nWooden path to distant shore\nSilence in the scene',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(gen_tokens[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "Certainly! The images depict two iconic landmarks:\n\n1. The first image shows the Statue of Liberty in New York City.",
("cuda", 8): 'Certainly! The images depict two famous landmarks in the United States:\n\n1. The first image shows the Statue of Liberty,',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
| Mistral3IntegrationTest |
python | huggingface__transformers | src/transformers/models/m2m_100/modeling_m2m_100.py | {
"start": 29225,
"end": 40429
} | class ____(M2M100PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`]
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = M2M100ScaledWordEmbedding(
config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100DecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = (
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values
)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
positions = positions.to(inputs_embeds.device)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
continue
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| M2M100Decoder |
python | mlflow__mlflow | examples/pyfunc/infer_model_code_paths.py | {
"start": 78,
"end": 666
} | class ____(mlflow.pyfunc.PythonModel):
"""Custom pyfunc class used to create customized mlflow models"""
def predict(self, context, model_input, params: dict[str, Any] | None = None):
prediction = [x % 3 for x in model_input]
return iris_classes(prediction)
with mlflow.start_run(run_name="test_custom_model_with_inferred_code_paths"):
# log a custom model
model_info = mlflow.pyfunc.log_model(
name="artifacts",
infer_code_paths=True,
python_model=CustomPredict(),
)
print(f"Model URI: {model_info.model_uri}")
| CustomPredict |
python | google__pytype | pytype/vm_utils.py | {
"start": 4196,
"end": 4817
} | class ____(_NameErrorDetails):
"""Name error details for a name defined in an outer class."""
def __init__(self, attr, prefix, class_name):
super().__init__()
self._attr = attr
self._prefix = prefix
self._class_name = class_name
def to_error_message(self):
full_attr_name = f"{self._class_name}.{self._attr}"
if self._prefix:
full_class_name = f"{self._prefix}.{self._class_name}"
else:
full_class_name = self._class_name
return (
f"Use {full_attr_name!r} to reference {self._attr!r} from class "
f"{full_class_name!r}"
)
| _NameInOuterClassErrorDetails |
python | ray-project__ray | rllib/core/rl_module/default_model_config.py | {
"start": 216,
"end": 11231
} | class ____:
"""Dataclass to configure all default RLlib RLModules.
Users should NOT use this class for configuring their own custom RLModules, but
use a custom `model_config` dict with arbitrary (str) keys passed into the
`RLModuleSpec` used to define the custom RLModule.
For example:
.. testcode::
import gymnasium as gym
import numpy as np
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.examples.rl_modules.classes.tiny_atari_cnn_rlm import (
TinyAtariCNN
)
my_rl_module = RLModuleSpec(
module_class=TinyAtariCNN,
observation_space=gym.spaces.Box(-1.0, 1.0, (64, 64, 4), np.float32),
action_space=gym.spaces.Discrete(7),
# DreamerV3-style stack working on a 64x64, color or 4x-grayscale-stacked,
# normalized image.
model_config={
"conv_filters": [[16, 4, 2], [32, 4, 2], [64, 4, 2], [128, 4, 2]],
},
).build()
Only RLlib's default RLModules (defined by the various algorithms) should use
this dataclass. Pass an instance of it into your algorithm config like so:
.. testcode::
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
config = (
PPOConfig()
.rl_module(
model_config=DefaultModelConfig(fcnet_hiddens=[32, 32]),
)
)
"""
# ====================================================
# MLP stacks
# ====================================================
# __sphinx_doc_default_model_config_fcnet_begin__
#: List containing the sizes (number of nodes) of a fully connected (MLP) stack.
#: Note that in an encoder-based default architecture with a policy head (and
#: possible value head), this setting only affects the encoder component. To set the
#: policy (and value) head sizes, use `post_fcnet_hiddens`, instead. For example,
#: if you set `fcnet_hiddens=[32, 32]` and `post_fcnet_hiddens=[64]`, you would get
#: an RLModule with a [32, 32] encoder, a [64, act-dim] policy head, and a [64, 1]
#: value head (if applicable).
fcnet_hiddens: List[int] = field(default_factory=lambda: [256, 256])
#: Activation function descriptor for the stack configured by `fcnet_hiddens`.
#: Supported values are: 'tanh', 'relu', 'swish' (or 'silu', which is the same),
#: and 'linear' (or None).
fcnet_activation: str = "tanh"
#: Initializer function or class descriptor for the weight/kernel matrices in the
#: stack configured by `fcnet_hiddens`. Supported values are the initializer names
#: (str), classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
fcnet_kernel_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `fcnet_kernel_initializer`.
fcnet_kernel_initializer_kwargs: Optional[dict] = None
#: Initializer function or class descriptor for the bias vectors in the stack
#: configured by `fcnet_hiddens`. Supported values are the initializer names (str),
#: classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
fcnet_bias_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `fcnet_bias_initializer`.
fcnet_bias_initializer_kwargs: Optional[dict] = None
# __sphinx_doc_default_model_config_fcnet_end__
# ====================================================
# Conv2D stacks
# ====================================================
# __sphinx_doc_default_model_config_conv_begin__
#: List of lists of format [num_out_channels, kernel, stride] defining a Conv2D
#: stack if the input space is 2D. Each item in the outer list represents one Conv2D
#: layer. `kernel` and `stride` may be single ints (width and height have same
#: value) or 2-tuples (int, int) specifying width and height dimensions separately.
#: If None (default) and the input space is 2D, RLlib tries to find a default filter
#: setup given the exact input dimensions.
conv_filters: Optional[ConvFilterSpec] = None
#: Activation function descriptor for the stack configured by `conv_filters`.
#: Supported values are: 'tanh', 'relu', 'swish' (or 'silu', which is the same), and
#: 'linear' (or None).
conv_activation: str = "relu"
#: Initializer function or class descriptor for the weight/kernel matrices in the
#: stack configured by `conv_filters`. Supported values are the initializer names
#: (str), classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
conv_kernel_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `conv_kernel_initializer`.
conv_kernel_initializer_kwargs: Optional[dict] = None
#: Initializer function or class descriptor for the bias vectors in the stack
#: configured by `conv_filters`. Supported values are the initializer names (str),
#: classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
conv_bias_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `conv_bias_initializer`.
conv_bias_initializer_kwargs: Optional[dict] = None
# __sphinx_doc_default_model_config_conv_end__
# ====================================================
# Head configs (e.g. policy- or value function heads)
# ====================================================
#: List containing the sizes (number of nodes) of a fully connected (MLP) head (ex.
#: policy-, value-, or Q-head). Note that in order to configure the encoder
#: architecture, use `fcnet_hiddens`, instead.
head_fcnet_hiddens: List[int] = field(default_factory=lambda: [])
#: Activation function descriptor for the stack configured by `head_fcnet_hiddens`.
#: Supported values are: 'tanh', 'relu', 'swish' (or 'silu', which is the same),
#: and 'linear' (or None).
head_fcnet_activation: str = "relu"
#: Initializer function or class descriptor for the weight/kernel matrices in the
#: stack configured by `head_fcnet_hiddens`. Supported values are the initializer
#: names (str), classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
head_fcnet_kernel_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `head_fcnet_kernel_initializer`.
head_fcnet_kernel_initializer_kwargs: Optional[dict] = None
#: Initializer function or class descriptor for the bias vectors in the stack
#: configured by `head_fcnet_hiddens`. Supported values are the initializer names
#: (str), classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
head_fcnet_bias_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `head_fcnet_bias_initializer`.
head_fcnet_bias_initializer_kwargs: Optional[dict] = None
# ====================================================
# Continuous action settings
# ====================================================
#: If True, for DiagGaussian action distributions (or any other continuous control
#: distribution), make the second half of the policy's outputs a "free" bias
#: parameter, rather than state-/NN-dependent nodes. In this case, the number of
#: nodes of the policy head have the same dimension as the action space as no slots
#: for log(stddev) are required (only for the mean values).
free_log_std: bool = False
#: Whether to clip the log(stddev) when using a DiagGaussian action distribution
#: (or any other continuous control distribution). This can stabilize training and
#: avoid very small or large log(stddev) values leading to numerical instabilities
#: turning outputs to `nan`. The default is to clamp the log(stddev) in between
#: -20 and 20. Set to float("inf") for no clamping.
log_std_clip_param: float = 20.0
#: Whether encoder layers (defined by `fcnet_hiddens` or `conv_filters`) should be
#: shared between policy- and value function.
vf_share_layers: bool = True
# ====================================================
# LSTM settings
# ====================================================
#: Whether to wrap the encoder component (defined by `fcnet_hiddens` or
#: `conv_filters`) with an LSTM.
use_lstm: bool = False
#: The maximum seq len for building the train batch for an LSTM model.
#: Defaults to 20.
max_seq_len: int = 20
#: The size of the LSTM cell.
lstm_cell_size: int = 256
lstm_use_prev_action: bool = False
lstm_use_prev_reward: bool = False
#: Initializer function or class descriptor for the weight/kernel matrices in the
#: LSTM layer. Supported values are the initializer names (str), classes or
#: functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
lstm_kernel_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `lstm_kernel_initializer`.
lstm_kernel_initializer_kwargs: Optional[dict] = None
#: Initializer function or class descriptor for the bias vectors in the stack
#: configured by the LSTM layer. Supported values are the initializer names (str),
#: classes or functions listed by the frameworks (`torch`). See
#: https://pytorch.org/docs/stable/nn.init.html for `torch`. If `None` (default),
#: the default initializer defined by `torch` is used.
lstm_bias_initializer: Optional[Union[str, Callable]] = None
#: Kwargs passed into the initializer function defined through
#: `lstm_bias_initializer`.
lstm_bias_initializer_kwargs: Optional[dict] = None
| DefaultModelConfig |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/values.py | {
"start": 12037,
"end": 13872
} | class ____(PerWorkerDatasetFromDatasetFunction):
"""Represents worker-distributed datasets created from a dataset."""
def __init__(self, dataset, coordinator):
"""Makes an iterable from datasets created by the given dataset.
It creates a dataset_fn which deserializes a dataset from a graph under the
hood.
Args:
dataset: A tf.data.Dataset, a DistributedDataset or a
DistributedDatasetsFromFunction
coordinator: a `ClusterCoordinator` object, used to create dataset
resources.
"""
if isinstance(dataset, input_lib.DistributedDataset):
original_dataset = dataset._original_dataset
serialized = serialize_dataset_to_graph(original_dataset)
def dataset_fn():
deserialized = deserialize_dataset_from_graph(
serialized, original_dataset.element_spec)
dataset.build(dataset_to_replace=deserialized)
return dataset
elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):
def dataset_fn():
dataset.build()
return dataset
elif isinstance(dataset, dataset_ops.Dataset):
serialized = serialize_dataset_to_graph(dataset)
def dataset_fn():
return deserialize_dataset_from_graph(serialized, dataset.element_spec)
else:
raise ValueError("Unexpected dataset type!")
super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)
def get_per_worker_dataset(dataset_or_dataset_fn, coordinator):
"""Returns a per-worker dataset from a dataset or a dataset function."""
if callable(dataset_or_dataset_fn):
return PerWorkerDatasetFromDatasetFunction(dataset_or_dataset_fn,
coordinator)
else:
return PerWorkerDatasetFromDataset(dataset_or_dataset_fn, coordinator)
| PerWorkerDatasetFromDataset |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/lease_domains.py | {
"start": 6809,
"end": 7074
} | class ____(Enum):
HTTP = "HTTP"
@classmethod
def from_value(cls, value):
for member in cls:
if member.value == value:
return member
raise ValueError(f"No enum member found for value '{value}'")
| FileDownloadType |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.