language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/dynamic_sampling/tasks/test_tasks.py | {
"start": 3932,
"end": 13298
} | class ____(TasksTestCase):
@property
def now(self):
return MOCK_DATETIME
@staticmethod
def add_sample_rate_per_project(org_id: int, project_id: int, sample_rate: float):
redis_client = get_redis_client_for_ds()
redis_client.hset(
name=generate_boost_low_volume_projects_cache_key(org_id),
key=str(project_id),
value=sample_rate,
)
@staticmethod
def sampling_tier_side_effect(*args, **kwargs):
volume = args[1]
if volume == 20:
return 100_000, 0.25
# We want to also hardcode the error case, to test how the system reacts to errors.
elif volume == 0:
return None
return volume, 1.0
@staticmethod
def forecasted_volume_side_effect(*args, **kwargs):
return kwargs["volume"]
@patch("sentry.quotas.backend.get_blended_sample_rate")
def test_boost_low_volume_projects_with_no_dynamic_sampling(self, get_blended_sample_rate):
get_blended_sample_rate.return_value = 0.25
test_org = self.create_old_organization(name="sample-org")
self.create_project_and_add_metrics("a", 9, test_org)
self.create_project_and_add_metrics("b", 7, test_org)
self.create_project_and_add_metrics("c", 3, test_org)
self.create_project_and_add_metrics("d", 1, test_org)
with self.tasks():
sliding_window_org()
boost_low_volume_projects()
@with_feature("organizations:dynamic-sampling")
@patch("sentry.quotas.backend.get_blended_sample_rate")
def test_boost_low_volume_projects_simple(
self,
get_blended_sample_rate,
):
get_blended_sample_rate.return_value = 0.25
# Create a org
test_org = self.create_old_organization(name="sample-org")
# Create 4 projects
proj_a = self.create_project_and_add_metrics("a", 9, test_org)
proj_b = self.create_project_and_add_metrics("b", 7, test_org)
proj_c = self.create_project_and_add_metrics("c", 3, test_org)
proj_d = self.create_project_and_add_metrics("d", 1, test_org)
with self.tasks():
sliding_window_org()
boost_low_volume_projects()
# we expect only uniform rule
# also we test here that `generate_rules` can handle trough redis long floats
assert generate_rules(proj_a)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.14814814814814817),
}
assert generate_rules(proj_b)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.1904761904761905),
}
assert generate_rules(proj_c)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.4444444444444444),
}
assert generate_rules(proj_d)[0]["samplingValue"] == {"type": "sampleRate", "value": 1.0}
@with_feature("organizations:dynamic-sampling")
@patch("sentry.quotas.backend.get_blended_sample_rate")
def test_boost_low_volume_projects_simple_with_empty_project(
self,
get_blended_sample_rate,
):
get_blended_sample_rate.return_value = 0.25
test_org = self.create_old_organization(name="sample-org")
proj_a = self.create_project_and_add_metrics("a", 9, test_org)
proj_b = self.create_project_and_add_metrics("b", 7, test_org)
proj_c = self.create_project_and_add_metrics("c", 3, test_org)
proj_d = self.create_project_and_add_metrics("d", 1, test_org)
proj_e = self.create_project_without_metrics("e", test_org)
with self.tasks():
sliding_window_org()
boost_low_volume_projects()
# we expect only uniform rule
# also we test here that `generate_rules` can handle trough redis long floats
assert generate_rules(proj_a)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.14814814814814817),
}
assert generate_rules(proj_b)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.1904761904761905),
}
assert generate_rules(proj_c)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.4444444444444444),
}
assert generate_rules(proj_d)[0]["samplingValue"] == {"type": "sampleRate", "value": 1.0}
assert generate_rules(proj_e)[0]["samplingValue"] == {"type": "sampleRate", "value": 1.0}
@with_feature("organizations:dynamic-sampling")
@patch("sentry.quotas.backend.get_blended_sample_rate")
@patch("sentry.quotas.backend.get_transaction_sampling_tier_for_volume")
@patch("sentry.dynamic_sampling.tasks.common.extrapolate_monthly_volume")
def test_boost_low_volume_projects_simple_with_sliding_window_org_from_cache(
self,
extrapolate_monthly_volume,
get_transaction_sampling_tier_for_volume,
get_blended_sample_rate,
):
extrapolate_monthly_volume.side_effect = self.forecasted_volume_side_effect
get_transaction_sampling_tier_for_volume.side_effect = self.sampling_tier_side_effect
get_blended_sample_rate.return_value = 0.8
test_org = self.create_old_organization(name="sample-org")
proj_a = self.create_project_and_add_metrics("a", 9, test_org)
proj_b = self.create_project_and_add_metrics("b", 7, test_org)
proj_c = self.create_project_and_add_metrics("c", 3, test_org)
proj_d = self.create_project_and_add_metrics("d", 1, test_org)
with self.tasks():
sliding_window_org()
boost_low_volume_projects()
# we expect only uniform rule
# also we test here that `generate_rules` can handle trough redis long floats
assert generate_rules(proj_a)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.14814814814814817),
}
assert generate_rules(proj_b)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.1904761904761905),
}
assert generate_rules(proj_c)[0]["samplingValue"] == {
"type": "sampleRate",
"value": pytest.approx(0.4444444444444444),
}
assert generate_rules(proj_d)[0]["samplingValue"] == {"type": "sampleRate", "value": 1.0}
@with_feature("organizations:dynamic-sampling")
@patch(
"sentry.dynamic_sampling.tasks.boost_low_volume_projects.schedule_invalidate_project_config"
)
@patch("sentry.quotas.backend.get_blended_sample_rate")
@patch("sentry.quotas.backend.get_transaction_sampling_tier_for_volume")
@patch("sentry.dynamic_sampling.tasks.common.extrapolate_monthly_volume")
def test_config_invalidation_when_sample_rates_change(
self,
extrapolate_monthly_volume,
get_transaction_sampling_tier_for_volume,
get_blended_sample_rate,
schedule_invalidate_project_config,
):
extrapolate_monthly_volume.side_effect = self.forecasted_volume_side_effect
get_transaction_sampling_tier_for_volume.side_effect = self.sampling_tier_side_effect
get_blended_sample_rate.return_value = 0.8
test_org = self.create_old_organization(name="sample-org")
proj_a = self.create_project_and_add_metrics("a", 9, test_org)
proj_b = self.create_project_and_add_metrics("b", 7, test_org)
self.add_sample_rate_per_project(org_id=test_org.id, project_id=proj_a.id, sample_rate=0.1)
self.add_sample_rate_per_project(org_id=test_org.id, project_id=proj_b.id, sample_rate=0.2)
with self.tasks():
sliding_window_org()
boost_low_volume_projects()
assert schedule_invalidate_project_config.call_count == 2
@with_feature("organizations:dynamic-sampling")
@patch(
"sentry.dynamic_sampling.tasks.boost_low_volume_projects.schedule_invalidate_project_config"
)
@patch("sentry.quotas.backend.get_blended_sample_rate")
@patch("sentry.quotas.backend.get_transaction_sampling_tier_for_volume")
@patch("sentry.dynamic_sampling.tasks.common.extrapolate_monthly_volume")
def test_config_invalidation_when_sample_rates_do_not_change(
self,
extrapolate_monthly_volume,
get_transaction_sampling_tier_for_volume,
get_blended_sample_rate,
schedule_invalidate_project_config,
):
extrapolate_monthly_volume.side_effect = self.forecasted_volume_side_effect
get_transaction_sampling_tier_for_volume.side_effect = self.sampling_tier_side_effect
get_blended_sample_rate.return_value = 1.0
test_org = self.create_old_organization(name="sample-org")
proj_a = self.create_project_and_add_metrics("a", 9, test_org)
proj_b = self.create_project_and_add_metrics("b", 7, test_org)
self.add_sample_rate_per_project(org_id=test_org.id, project_id=proj_a.id, sample_rate=1.0)
self.add_sample_rate_per_project(org_id=test_org.id, project_id=proj_b.id, sample_rate=1.0)
with self.tasks():
boost_low_volume_projects()
schedule_invalidate_project_config.assert_not_called()
@freeze_time(MOCK_DATETIME)
| TestBoostLowVolumeProjectsTasks |
python | ethereum__web3.py | web3/middleware/buffered_gas_estimate.py | {
"start": 494,
"end": 1652
} | class ____(Web3Middleware):
"""
Includes a gas estimate for all transactions that do not already have a gas value.
"""
def request_processor(self, method: "RPCEndpoint", params: Any) -> Any:
if method == "eth_sendTransaction":
transaction = params[0]
if "gas" not in transaction:
transaction = assoc(
transaction,
"gas",
hex(get_buffered_gas_estimate(cast("Web3", self._w3), transaction)),
)
params = (transaction,)
return method, params
# -- async -- #
async def async_request_processor(self, method: "RPCEndpoint", params: Any) -> Any:
if method == "eth_sendTransaction":
transaction = params[0]
if "gas" not in transaction:
gas_estimate = await async_get_buffered_gas_estimate(
cast("AsyncWeb3[Any]", self._w3), transaction
)
transaction = assoc(transaction, "gas", hex(gas_estimate))
params = (transaction,)
return method, params
| BufferedGasEstimateMiddleware |
python | huggingface__transformers | src/transformers/utils/dummy_tokenizers_objects.py | {
"start": 129,
"end": 304
} | class ____(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
| PreTrainedTokenizerFast |
python | spyder-ide__spyder | spyder/plugins/pythonpath/container.py | {
"start": 822,
"end": 8796
} | class ____(PluginMainContainer):
sig_pythonpath_changed = Signal(object, bool)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# ---- PluginMainContainer API
# -------------------------------------------------------------------------
def setup(self):
# Migrate to new config options if necessary
if not self.get_conf("config_options_migrated", False):
self._migrate_to_config_options()
# This attribute is only used to detect changes and after initializing
# here should only be set in update_active_project_path.
self._project_path = OrderedDict()
# These attributes are only used to detect changes and after
# initializing here should only be set in _save_paths.
self._user_paths = OrderedDict(self.get_conf('user_paths'))
self._system_paths = self.get_conf('system_paths')
self._prioritize = self.get_conf('prioritize')
self._spyder_pythonpath = self.get_conf('spyder_pythonpath')
# Path manager dialog
self.path_manager_dialog = PathManager(parent=self, sync=True)
self.path_manager_dialog.sig_path_changed.connect(
self._save_paths
)
self.path_manager_dialog.redirect_stdio.connect(
self.sig_redirect_stdio_requested)
# Path manager action
self.path_manager_action = self.create_action(
PythonpathActions.Manager,
_("PYTHONPATH manager"),
icon=self.create_icon('pythonpath'),
triggered=self.show_path_manager
)
def update_actions(self):
pass
# ---- Public API
# -------------------------------------------------------------------------
def update_active_project_path(self, path):
"""
Update active project path.
_project_path is set in this method and nowhere else.
"""
# _project_path should be reset whenever it is updated.
self._project_path = OrderedDict()
if path is None:
logger.debug("Update Spyder PYTHONPATH because project was closed")
else:
logger.debug(f"Add project paths to Spyder PYTHONPATH: {path}")
self._project_path.update({path: True})
self._save_paths()
def show_path_manager(self):
"""
Show path manager dialog.
"""
# Do not update paths if widget is already open,
# see spyder-ide/spyder#20808.
if not self.path_manager_dialog.isVisible():
self.path_manager_dialog.update_paths(
project_path=self._project_path,
user_paths=self._user_paths,
system_paths=self._system_paths,
prioritize=self._prioritize
)
# Show and give it focus
self.path_manager_dialog.show()
self.path_manager_dialog.activateWindow()
self.path_manager_dialog.raise_()
self.path_manager_dialog.setFocus()
def get_spyder_pythonpath(self):
"""Return active Spyder PYTHONPATH as a list of paths."""
# Desired behavior is project_path | user_paths | system_paths, but
# Python 3.8 does not support | operator for OrderedDict.
all_paths = OrderedDict(reversed(self._system_paths.items()))
all_paths.update(reversed(self._user_paths.items()))
all_paths.update(reversed(self._project_path.items()))
all_paths = OrderedDict(reversed(all_paths.items()))
return [p for p, v in all_paths.items() if v]
# ---- Private API
# -------------------------------------------------------------------------
def _save_paths(self, user_paths=None, system_paths=None, prioritize=None):
"""
Save user and system path dictionaries and prioritize to config.
Parameters
----------
user_paths: OrderedDict
Paths set by the user.
system_paths: OrderedDict
Paths set in the PYTHONPATH environment variable.
prioritize: bool
Whether paths should be prepended (True) or appended (False) to
sys.path.
Notes
-----
- Each dictionary key is a path and the value is the active state.
- sig_pythonpath_changed is emitted from this method, and nowhere else,
on condition that _spyder_pythonpath changed.
"""
assert isinstance(user_paths, (type(None), OrderedDict))
assert isinstance(system_paths, (type(None), OrderedDict))
assert isinstance(prioritize, (type(None), bool))
emit = False
# Don't set options unless necessary
if user_paths is not None and user_paths != self._user_paths:
logger.debug(f"Saving user paths: {user_paths}")
self.set_conf('user_paths', dict(user_paths))
self._user_paths = user_paths
if system_paths is not None and system_paths != self._system_paths:
logger.debug(f"Saving system paths: {system_paths}")
self.set_conf('system_paths', dict(system_paths))
self._system_paths = system_paths
if prioritize is not None and prioritize != self._prioritize:
logger.debug(f"Saving prioritize: {prioritize}")
self.set_conf('prioritize', prioritize)
self._prioritize = prioritize
emit = True
spyder_pythonpath = self.get_spyder_pythonpath()
if spyder_pythonpath != self._spyder_pythonpath:
logger.debug(f"Saving Spyder pythonpath: {spyder_pythonpath}")
self.set_conf('spyder_pythonpath', spyder_pythonpath)
self._spyder_pythonpath = spyder_pythonpath
emit = True
# Only emit signal if spyder_pythonpath or prioritize changed
if emit:
self.sig_pythonpath_changed.emit(
self._spyder_pythonpath, self._prioritize
)
def _migrate_to_config_options(self):
"""
Migrate paths saved in the `path` and `not_active_path` files located
in our config directory to our config system.
# TODO: Remove for Spyder 7
"""
path_file = get_conf_path('path')
not_active_path_file = get_conf_path('not_active_path')
config_path = self.get_conf('path', ())
config_not_active_path = self.get_conf('not_active_path', ())
system_path = self.get_conf('system_path', ())
path = []
not_active_path = []
# Get path from file
if osp.isfile(path_file):
with open(path_file, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
try:
os.remove(path_file)
except OSError:
pass
# Get inactive paths from file
if osp.isfile(not_active_path_file):
with open(not_active_path_file, 'r', encoding='utf-8') as f:
not_active_path = f.read().splitlines()
try:
os.remove(not_active_path_file)
except OSError:
pass
# Get path from config; supersedes paths from file
if config_path:
path = config_path
# Get inactive path from config; supersedes paths from file
if config_not_active_path is not None:
not_active_path = config_not_active_path
# Get system path
system_paths = {}
if system_path:
system_paths = {p: p not in not_active_path for p in system_path}
# path config has all user and system paths; only want user paths
user_paths = {
p: p not in not_active_path for p in path if p not in system_path
}
# Update the configuration
self.set_conf('user_paths', user_paths)
self.set_conf('system_paths', system_paths)
# Do not migrate again
self.set_conf("config_options_migrated", True)
| PythonpathContainer |
python | instagram__MonkeyType | tests/util.py | {
"start": 358,
"end": 1487
} | class ____:
@staticmethod
def a_static_method(foo: Any) -> Optional[FrameType]:
return inspect.currentframe()
@classmethod
def a_class_method(cls, foo: Any) -> Optional[FrameType]:
return inspect.currentframe()
def an_instance_method(self, foo: Any, bar: Any) -> Optional[FrameType]:
return inspect.currentframe()
def has_complex_signature(
self,
a: Any,
b: Any,
/,
c: Any,
d: Any = 0,
*e: Any,
f: Any,
g: Any = 0,
**h: Any,
) -> Optional[FrameType]:
return inspect.currentframe()
@property
def a_property(self) -> Optional[FrameType]:
return inspect.currentframe()
@property
def a_settable_property(self) -> Optional[FrameType]:
return inspect.currentframe()
@a_settable_property.setter
def a_settable_property(self, unused) -> Optional[FrameType]:
return inspect.currentframe()
if cached_property:
@cached_property
def a_cached_property(self) -> Optional[FrameType]:
return inspect.currentframe()
| Dummy |
python | openai__openai-python | src/openai/types/chat/chat_completion_store_message.py | {
"start": 590,
"end": 916
} | class ____(ChatCompletionMessage):
id: str
"""The identifier of the chat message."""
content_parts: Optional[List[ChatCompletionStoreMessageContentPart]] = None
"""
If a content parts array was provided, this is an array of `text` and
`image_url` parts. Otherwise, null.
"""
| ChatCompletionStoreMessage |
python | pypa__warehouse | warehouse/packaging/interfaces.py | {
"start": 1545,
"end": 1885
} | class ____(Interface):
def create_service(context, request):
"""
Create the service, given the context and request for which it is being
created for, passing a name for settings.
"""
def remove_by_prefix(prefix):
"""
Remove all files matching the given prefix.
"""
| IDocsStorage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride2.py | {
"start": 2084,
"end": 2261
} | class ____(Base2[P, R]):
def method1(self, *args: P.args, **kwargs: P.kwargs) -> R: ...
def method2(self, *args: Any, **kwargs: Any) -> R: ...
T = TypeVar("T")
| Derived2 |
python | huggingface__transformers | src/transformers/models/distilbert/modeling_distilbert.py | {
"start": 33986,
"end": 39984
} | class ____(DistilBertPreTrainedModel):
def __init__(self, config: PreTrainedConfig):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, 1)
self.dropout = nn.Dropout(config.seq_classif_dropout)
# Initialize weights and apply final processing
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`)
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[MultipleChoiceModelOutput, tuple[torch.Tensor, ...]]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
Examples:
```python
>>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
>>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
>>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.distilbert(
input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
position_ids=position_ids,
return_dict=True,
**kwargs,
)
hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
logits = self.classifier(pooled_output) # (bs * num_choices, 1)
reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
| DistilBertForMultipleChoice |
python | kubernetes-client__python | kubernetes/client/models/v1_secret_key_selector.py | {
"start": 383,
"end": 5690
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'name': 'str',
'optional': 'bool'
}
attribute_map = {
'key': 'key',
'name': 'name',
'optional': 'optional'
}
def __init__(self, key=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
"""V1SecretKeySelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self._optional = None
self.discriminator = None
self.key = key
if name is not None:
self.name = name
if optional is not None:
self.optional = optional
@property
def key(self):
"""Gets the key of this V1SecretKeySelector. # noqa: E501
The key of the secret to select from. Must be a valid secret key. # noqa: E501
:return: The key of this V1SecretKeySelector. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1SecretKeySelector.
The key of the secret to select from. Must be a valid secret key. # noqa: E501
:param key: The key of this V1SecretKeySelector. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def name(self):
"""Gets the name of this V1SecretKeySelector. # noqa: E501
Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this V1SecretKeySelector. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1SecretKeySelector.
Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this V1SecretKeySelector. # noqa: E501
:type: str
"""
self._name = name
@property
def optional(self):
"""Gets the optional of this V1SecretKeySelector. # noqa: E501
Specify whether the Secret or its key must be defined # noqa: E501
:return: The optional of this V1SecretKeySelector. # noqa: E501
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""Sets the optional of this V1SecretKeySelector.
Specify whether the Secret or its key must be defined # noqa: E501
:param optional: The optional of this V1SecretKeySelector. # noqa: E501
:type: bool
"""
self._optional = optional
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SecretKeySelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SecretKeySelector):
return True
return self.to_dict() != other.to_dict()
| V1SecretKeySelector |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 16435,
"end": 17045
} | class ____(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
"""A meta-regressor which is only a router."""
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y, **fit_params):
params = process_routing(self, "fit", **fit_params)
self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
def get_metadata_routing(self):
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
return router
| MetaRegressor |
python | pytorch__pytorch | test/test_stateless.py | {
"start": 1015,
"end": 36086
} | class ____(TestCase):
def _run_call_with_mock_module(self, module, functional_call, device='cpu', prefix=''):
x = torch.rand((1, 1)).to(device)
weight = torch.tensor([[1.0]], device=device)
bias = torch.tensor([0.0], device=device)
buffer = torch.tensor([0.0], device=device)
if prefix != '':
parameters = {f'{prefix}.l1.weight': weight,
f'{prefix}.l1.bias': bias,
f'{prefix}.buffer': buffer}
else:
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
to_check = module
if prefix != '':
to_check = getattr(module, prefix)
prev_weight = to_check.l1.weight.clone()
prev_buffer = to_check.buffer.clone()
# the parameters represent an identity function contrary to the
# existing params in module. So here we expect the result to be the
# same as the input if the weight swapping went well.
res = functional_call(module, parameters, x)
self.assertEqual(x, res)
# check that the weight remain unmodified
cur_weight = to_check.l1.weight
cur_buffer = to_check.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
@contextlib.contextmanager
def _ensure_module_unchanged(self, module, message):
orig_parameters, orig_buffers = tuple(module.parameters()), tuple(module.buffers())
orig_tensors = orig_parameters + orig_buffers
orig_tensors_values = tuple(t.clone() for t in orig_tensors)
try:
yield module
finally:
parameters, buffers = tuple(module.parameters()), tuple(module.buffers())
self.assertTrue(
len(parameters) == len(orig_parameters)
and len(buffers) == len(orig_buffers)
and all(
t1 is t2 and torch.allclose(t1, t3)
for t1, t2, t3 in zip(
orig_tensors,
parameters + buffers,
orig_tensors_values,
)
),
message,
)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call(self, functional_call):
module = MockModule()
self._run_call_with_mock_module(module, functional_call)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_with_jit(self, functional_call):
module = MockModule()
jit_module = torch.jit.script(module)
with self.assertRaisesRegex(
RuntimeError,
r'used with Jitted modules'
):
self._run_call_with_mock_module(jit_module, functional_call)
x = torch.rand((1, 1))
traced_module = torch.jit.trace(module, x)
with self.assertRaisesRegex(
RuntimeError,
r'used with Jitted modules'
):
self._run_call_with_mock_module(traced_module, functional_call)
@unittest.skipIf(not TEST_MULTIGPU, 'multi-GPU not supported')
@unittest.skip("This doesn't work right now")
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_with_data_parallel(self, functional_call):
module = MockModule()
module.cuda()
dp_module = torch.nn.DataParallel(module, [0, 1])
self._run_call_with_mock_module(dp_module, functional_call, device='cuda', prefix='module')
@unittest.skipIf(not TEST_MULTIGPU, 'multi-GPU not supported')
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_with_data_parallel_error(self, functional_call):
module = MockModule()
module.cuda()
dp_module = torch.nn.DataParallel(module, [0, 1])
with self.assertRaisesRegex(RuntimeError, r'used with nn.DataParallel module'):
functional_call(
dp_module,
{'module.weight': torch.zeros(5, device='cuda')},
(torch.ones(2, 5, device='cuda'),))
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_with_gradient(self, functional_call):
module = MockModule()
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]], requires_grad=True)
bias = torch.tensor([0.0], requires_grad=True)
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
res = functional_call(module, parameters, x)
# Check that a backward step calculates the gradient of the supplied parameters
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
self.assertIsNone(buffer.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(module.l1.weight.grad)
self.assertIsNone(module.l1.bias.grad)
self.assertIsNone(module.buffer.grad)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_batch_norm(self, functional_call):
module = torch.nn.BatchNorm1d(10)
module.train() # Allow stats update
# lets replace the running_mean buffer and check if its correctly updated
x = torch.full((20, 10), 128.0)
rm = torch.zeros(10)
parameters = {'running_mean': rm}
prev_rm = module.running_mean.clone()
functional_call(module, parameters, x)
cur_rm = module.running_mean
self.assertEqual(cur_rm, prev_rm)
self.assertEqual(rm, torch.full((10,), 12.8))
# Now run functional without reparameterization and check that the module has
# been updated
functional_call(module, {}, x)
self.assertEqual(module.running_mean, torch.full((10,), 12.8))
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_circular_references(self, functional_call):
module = MockModule()
# Add a circular reference
module.l1.m = module
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]])
bias = torch.tensor([0.0])
buffer = torch.tensor([0.0])
parameters = {'l1.m.l1.weight': weight,
'l1.bias': bias,
'l1.m.buffer': buffer}
prev_weight = module.l1.weight.clone()
prev_buffer = module.buffer.clone()
res = functional_call(module, parameters, x, tie_weights=False)
self.assertEqual(x, res)
# check that the weights remain unmodified and were correctly accessed
cur_weight = module.l1.weight
cur_buffer = module.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrized_module_change_parametrization_original(self, functional_call):
module = MockModule()
torch.nn.utils.parametrizations.spectral_norm(module.l1)
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
orig_sn_weight = module.l1.weight.clone()
x = torch.rand((1, 1))
# We substitute the parameter inside the parametrization
# the parametrization itself is not overwritten so it will be applied with a different
# value for the original tensor
parameters = {'l1.parametrizations.weight.original': torch.nn.Parameter(torch.tensor([[1.0]])),
'l1.bias': torch.tensor([0.0]),
'buffer': torch.tensor([0.0])}
res = functional_call(module, parameters, x)
self.assertEqual(x, res)
# verify that the spectral normalization is still applied
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
self.assertEqual(orig_sn_weight, module.l1.weight)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_module_fail_reset_to_original(self, functional_call):
module = MockModule()
torch.nn.utils.parametrizations.spectral_norm(module.l1)
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
orig_sn_weight = module.l1.weight.clone()
# We substitute the parameter inside the parametrization
# the parametrization itself is not overwritten so it will be applied with a different
# value for the original tensor
parameters = {'l1.parametrizations.weight.original': torch.nn.Parameter(torch.tensor([[1.0]])),
'l1.bias': torch.tensor([0.0]),
'buffer': torch.tensor([0.0])}
with self.assertRaisesRegex(RuntimeError, "shapes cannot be multiplied"):
@torch._dynamo.disable
def _error_case():
x = torch.rand((4, 5)) # to work, it should be of size (1, 1)
functional_call(module, parameters, x) # this call will fail because x is the wrong size
_error_case()
# verify that the spectral normalization is still applied
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
self.assertEqual(orig_sn_weight, module.l1.weight)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_some_weights(self, functional_call):
module = MockModule()
weight = torch.tensor([[2.0]])
extra = torch.tensor([1.0])
parameters = {'l1.weight': weight}
x = torch.randn(1, 1)
out = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.l1.bias + module.buffer)
parameters = {'l1.weight': weight,
'extra': extra}
x = torch.randn(1, 1)
out = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.l1.bias + module.buffer)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_strict(self, functional_call):
module = MockModule()
weight = torch.tensor([[2.0]])
bias = torch.tensor([5.0])
buffer = torch.tensor([3.0])
extra = torch.tensor([1.0])
# All weights no error
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a successful call',
):
out = functional_call(module, parameters, x, strict=True)
self.assertEqual(out, x * weight + bias + buffer)
# Some weights
parameters = {'l1.weight': weight}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Missing key(s): 'buffer', 'l1.bias'."),
):
out = functional_call(module, parameters, x, strict=True)
# Extra keys
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'extra': extra}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'."),
):
out = functional_call(module, parameters, x, strict=True)
# Some weights with extra keys
parameters = {'l1.weight': weight,
'extra': extra}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'.") + r'\s+' + re.escape("Missing key(s): 'buffer', 'l1.bias'."),
):
out = functional_call(module, parameters, x, strict=True)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_special(self, functional_call):
class NonTensor:
def __repr__(self):
return f'<{self.__class__.__name__}>'
module = MockModule()
weight = torch.tensor([[2.0]])
bias = torch.tensor([5.0])
buffer = torch.tensor([3.0])
non_tensor = NonTensor()
# Set to None
parameters = {'l1.weight': weight,
'l1.bias': None,
'buffer': buffer}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a successful call',
):
out = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + buffer)
# Set non-tensor
parameters = {'l1.weight': non_tensor}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
TypeError,
re.escape("<NonTensor> is not an instance of torch.Tensor"),
):
out = functional_call(module, parameters, x)
# Set non-tensor attribute
parameters = {'l1.weight': weight, 'foo': torch.tensor([1.0])}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
TypeError,
re.escape("attribute `foo`: 0.0 is not an instance of torch.Tensor"),
):
out = functional_call(module, parameters, x)
# Set non-exist submodule
parameters = {'l1.weight': weight,
'l2.bias': bias}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
AttributeError,
re.escape("MockModule has no attribute `l2`"),
):
out = functional_call(module, parameters, x)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_tied_weights_warns(self, functional_call):
module = MockModule()
module.tied_bias = module.l1.bias
module.tied_buffer = torch.nn.Buffer(module.buffer)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_tie_weights(self, functional_call):
module = MockTiedModule()
weight = torch.tensor([[2.0]])
bias = torch.tensor([5.0])
buffer = torch.tensor([3.0])
extra = torch.tensor([1.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
out = functional_call(module, parameters, x, tie_weights=True)
self.assertEqual(out, x * weight + bias + bias + buffer + buffer)
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'extra': extra}
x = torch.randn(1, 1)
out = functional_call(module, parameters, x, tie_weights=True)
self.assertEqual(out, x * weight + bias + bias + buffer + buffer)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_tie_some_weights(self, functional_call):
module = MockTiedModule()
weight = torch.tensor([[2.0]])
buffer = torch.tensor([3.0])
parameters = {'l1.weight': weight,
'buffer': buffer}
x = torch.randn(1, 1)
out = stateless.functional_call(module, parameters, x, tie_weights=True)
self.assertEqual(out, x * 2. + module.l1.bias + module.tied_bias + buffer + buffer)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless._functional_call, "stateless")
])
def test_tied_weights_errors(self, functional_call):
module = MockTiedModule()
weight = torch.tensor([[1.0]])
bias = torch.tensor([0.0])
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
self.assertNotWarn(lambda: functional_call(module, parameters, x, tie_weights=True))
# if tied values are the same tensors, shouldn't warn
parameters['tied_bias'] = bias
parameters['tied_buffer'] = buffer
self.assertNotWarn(lambda: functional_call(module, parameters, x, tie_weights=True))
del parameters['tied_bias']
del parameters['tied_buffer']
with self.assertRaisesRegex(
ValueError,
re.escape("functional_call got multiple values for keys ['l1.bias', 'tied_bias']"),
):
parameters['tied_bias'] = torch.tensor([5.0])
functional_call(module, parameters, x, tie_weights=True)
del parameters['tied_bias']
with self.assertRaisesRegex(
ValueError,
re.escape("functional_call got multiple values for keys ['buffer', 'tied_buffer']"),
):
parameters['tied_buffer'] = torch.tensor([5.0])
functional_call(module, parameters, x, tie_weights=True)
def test_tied_weights_no_error_without_flag(self):
module = MockTiedModule()
weight = torch.tensor([[1.0]])
bias = torch.tensor([0.0])
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
self.assertNotWarn(lambda: stateless._functional_call(module, parameters, x, tie_weights=False))
parameters['tied_bias'] = torch.tensor([5.0])
self.assertNotWarn(lambda: stateless._functional_call(module, parameters, x, tie_weights=False))
del parameters['tied_bias']
parameters['tied_buffer'] = torch.tensor([5.0])
self.assertNotWarn(lambda: stateless._functional_call(module, parameters, x, tie_weights=False))
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_reparametrize_tie_weights_strict(self, functional_call):
module = MockTiedModule()
weight = torch.tensor([[2.0]])
bias = torch.tensor([5.0])
buffer = torch.tensor([3.0])
extra = torch.tensor([1.0])
# Tie weights no error
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a successful call',
):
out = functional_call(module, parameters, x, tie_weights=True, strict=True)
self.assertEqual(out, x * weight + bias + bias + buffer + buffer)
# Tie weights without flag
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Missing key(s): 'tied_bias', 'tied_buffer'."),
):
out = functional_call(module, parameters, x, tie_weights=False, strict=True)
# Tie some weights
parameters = {'l1.weight': weight,
'buffer': buffer}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Missing key(s): 'l1.bias', 'tied_bias'."),
):
out = stateless.functional_call(module, parameters, x, tie_weights=True, strict=True)
# Tie weights with extra keys
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'extra': extra}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'."),
):
out = stateless.functional_call(module, parameters, x, tie_weights=True, strict=True)
# Tie weights with extra keys and without flag
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'extra': extra}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'.") + r'\s+' + re.escape("Missing key(s): 'tied_bias', 'tied_buffer'."),
):
out = stateless.functional_call(module, parameters, x, tie_weights=False, strict=True)
# Tie some weights with extra keys
parameters = {'l1.weight': weight,
'buffer': buffer,
'extra': extra}
x = torch.randn(1, 1)
with self._ensure_module_unchanged(
module,
'the module should not have been modified by a failed call',
):
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'.") + r'\s+' + re.escape("Missing key(s): 'l1.bias', 'tied_bias'."),
):
out = stateless.functional_call(module, parameters, x, tie_weights=True, strict=True)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_setattr(self, functional_call):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Buffer(torch.tensor([0.0]))
def forward(self, x):
self.foo = self.foo + 1
return x + self.foo
foo = torch.tensor([2.0])
x = torch.randn(1)
a = {'foo': foo}
mod = Foo()
functional_call(mod, a, x)
self.assertEqual(mod.foo, torch.tensor([0.0]))
self.assertEqual(a['foo'], torch.tensor([3.0]))
self.assertEqual(foo, torch.tensor([2.0]))
self.assertTrue(a['foo'] is not foo)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_in_place_operator(self, functional_call):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Buffer(torch.tensor([0.0]))
def forward(self, x):
self.foo.add_(1)
return x + self.foo
foo = torch.tensor([2.0])
x = torch.randn(1)
a = {'foo': foo}
mod = Foo()
functional_call(mod, a, x)
self.assertEqual(mod.foo, torch.tensor([0.0]))
self.assertEqual(a['foo'], torch.tensor([3.0]))
self.assertEqual(foo, torch.tensor([3.0]))
self.assertTrue(a['foo'] is foo)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_setattr_strict(self, functional_call):
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
assert not hasattr(self, 'extra')
def forward(self, x):
return x + self.extra
a = {'extra': torch.zeros(())}
mod = Bar()
self.assertTrue(not hasattr(mod, 'extra'))
out = functional_call(mod, a, torch.ones(()))
self.assertEqual(out, torch.ones(()))
self.assertTrue(not hasattr(mod, 'extra'))
a = {'extra': torch.zeros(())}
with self.assertRaisesRegex(
RuntimeError,
re.escape("Unexpected key(s): 'extra'."),
):
out = functional_call(mod, a, torch.ones(()), strict=True)
self.assertTrue(not hasattr(mod, 'extra'))
a = {}
with self.assertRaisesRegex(
AttributeError,
re.escape("'Bar' object has no attribute 'extra'"),
):
out = functional_call(mod, a, torch.ones(()))
self.assertTrue(not hasattr(mod, 'extra'))
a = {}
with self.assertRaisesRegex(
AttributeError,
re.escape("'Bar' object has no attribute 'extra'"),
):
out = functional_call(mod, a, torch.ones(()), strict=True)
self.assertTrue(not hasattr(mod, 'extra'))
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_with_kwargs(self, functional_call):
class Foo(torch.nn.Module):
def __init__(self, x):
super().__init__()
self.x = x
def forward(self, inp, *, other_inp):
return inp * self.x + other_inp
a = {'x': torch.zeros(2, 3)}
mod = Foo(torch.randn(2, 3))
inp, other_inp = torch.randn(2, 3), torch.randn(2, 3)
with self.assertRaisesRegex(TypeError, "missing 1 required keyword-only argument: 'other_inp'"):
functional_call(mod, a, inp)
res = functional_call(mod, a, inp, {'other_inp': other_inp})
self.assertEqual(res, other_inp)
res_1 = functional_call(mod, a, (), {'inp': inp, 'other_inp': other_inp})
self.assertEqual(res, res_1)
res_2 = functional_call(mod, a, kwargs={'inp': inp, 'other_inp': other_inp})
self.assertEqual(res, res_2)
def test_functional_call_tuple_dicts(self):
mod = MockModule()
x = torch.rand((1, 1))
parameters = {k: torch.ones_like(v) for k, v in mod.named_parameters()}
buffers = {k: torch.zeros_like(v) for k, v in mod.named_buffers()}
# two dictionaries
res = torch.func.functional_call(mod, (parameters, buffers), x)
self.assertEqual(res, x + 1)
# no dictionaries
res = torch.func.functional_call(mod, (), x)
self.assertEqual(res, mod(x))
# three dictionaries
a = ({'l1.weight': torch.ones(1, 1)}, {'l1.bias': torch.ones(1)}, {'buffer': torch.zeros(1)})
res = torch.func.functional_call(mod, a, x)
self.assertEqual(res, x + 1)
def test_functional_call_multiple_dicts_error(self):
mod = MockModule()
x = torch.rand((1, 1))
parameters = {'l1.weight': torch.zeros((1, 1)), 'l1.bias': torch.zeros((1, 1))}
repeated_parameters = {'l1.weight': torch.ones((1, 1))}
with self.assertRaisesRegex(
ValueError,
re.escape("['l1.weight'] appeared in multiple dictionaries"),
):
torch.func.functional_call(mod, (parameters, repeated_parameters), x)
@parametrize("functional_call", [
subtest(torch.func.functional_call, "torch_func"),
subtest(stateless.functional_call, "stateless")
])
def test_functional_call_member_reference(self, functional_call):
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.buffer = torch.nn.Buffer(torch.ones(1))
def forward(self, x):
parameters = tuple(self.parameters())
buffers = tuple(self.buffers())
return self.l1(x) + self.buffer, parameters, buffers
module = Module()
weight = torch.tensor([[2.0]])
bias = torch.tensor([5.0])
buffer = torch.tensor([3.0])
extra = torch.tensor([1.0])
extra_p = torch.nn.Parameter(extra)
# All weights
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + bias + buffer)
self.assertEqual(parameters, (weight, bias))
self.assertEqual(buffers, (buffer,))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, bias))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (buffer,))))
# Some weights
parameters = {'l1.weight': weight}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.l1.bias + module.buffer)
self.assertEqual(parameters, (weight, module.l1.bias))
self.assertEqual(buffers, (module.buffer,))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, module.l1.bias))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (module.buffer,))))
# All weights with extra keys
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'l1.extra': extra}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + bias + buffer)
self.assertEqual(parameters, (weight, bias))
self.assertEqual(buffers, (buffer,))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, bias))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (buffer,))))
# All weights with extra keys with parameters
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer,
'l1.extra': extra_p}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + bias + buffer)
self.assertEqual(parameters, (weight, bias, extra_p))
self.assertEqual(buffers, (buffer,))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, bias, extra_p))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (buffer,))))
# Some weights with extra keys
parameters = {'l1.weight': weight,
'l1.extra': extra}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.l1.bias + module.buffer)
self.assertEqual(parameters, (weight, module.l1.bias))
self.assertEqual(buffers, (module.buffer))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, module.l1.bias))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (module.buffer,))))
# Some weights with extra keys with parameters
parameters = {'l1.weight': weight,
'l1.extra': extra_p}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.l1.bias + module.buffer)
self.assertEqual(parameters, (weight, module.l1.bias, extra_p))
self.assertEqual(buffers, (module.buffer))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight, module.l1.bias, extra_p))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (module.buffer,))))
# Set None
parameters = {'l1.weight': weight,
'l1.bias': None}
x = torch.randn(1, 1)
out, parameters, buffers = functional_call(module, parameters, x)
self.assertEqual(out, x * weight + module.buffer)
self.assertEqual(parameters, (weight,))
self.assertEqual(buffers, (module.buffer))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(parameters, (weight,))))
self.assertTrue(all(t1 is t2 for t1, t2 in zip(buffers, (module.buffer,))))
| TestStatelessFunctionalAPI |
python | getsentry__sentry | src/sentry/integrations/source_code_management/status_check.py | {
"start": 73,
"end": 508
} | class ____(str, enum.Enum):
"""
A subset of possible status values for a status check that is compatible across various providers.
For example, Github uses more state/conclusion values that won't map over to everything.
If needed, use those directly.
"""
ACTION_REQUIRED = "action_required"
IN_PROGRESS = "in_progress"
FAILURE = "failure"
NEUTRAL = "neutral"
SUCCESS = "success"
| StatusCheckStatus |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-koda-retriever/tests/koda_mocking.py | {
"start": 725,
"end": 1729
} | class ____(MockLLM):
"""Simple mock LLM that returns a response based on the prompt."""
prompt_responses: dict = PROMPT_RESPONSES
strict: bool = False
default_response: str = "concept seeking query"
@classmethod
def class_name(cls) -> str:
return "KVMockLLM"
def random_prompt(self) -> str:
"""Returns a random prompt from the prompt_responses dictionary."""
return random.choice(list(self.prompt_responses.keys()))
def complete(self, prompt: str, **kwargs) -> CompletionResponse:
"""Returns a response that was matched from the given prompt."""
if self.strict:
if prompt not in self.prompt_responses:
err = f"Prompt '{prompt}' not found in prompt_responses. Please recreate this MockLLM with the expected prompts and responses."
raise ValueError(err)
response = self.prompt_responses.get(prompt, self.default_response)
return CompletionResponse(text=response)
| KVMockLLM |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/hitl.py | {
"start": 12445,
"end": 15826
} | class ____(HITLOperator, SkipMixin):
"""Human-in-the-loop Operator that has only 'Approval' and 'Reject' options."""
inherits_from_skipmixin = True
FIXED_ARGS = ["options", "multiple"]
APPROVE = "Approve"
REJECT = "Reject"
def __init__(
self,
*,
ignore_downstream_trigger_rules: bool = False,
fail_on_reject: bool = False,
**kwargs,
) -> None:
"""
Human-in-the-loop Operator for simple approval workflows.
This operator presents the user with two fixed options: "Approve" and "Reject".
Behavior:
- "Approve": Downstream tasks execute as normal.
- "Reject":
- Downstream tasks are skipped according to the `ignore_downstream_trigger_rules` setting.
- If `fail_on_reject=True`, the task fails instead of only skipping downstream tasks.
Warning:
Using `fail_on_reject=True` is generally discouraged. A HITLOperator's role is to collect
human input, and receiving any response—including "Reject"—indicates the task succeeded.
Treating "Reject" as a task failure mixes human decision outcomes with Airflow task
success/failure states.
Only use this option if you explicitly intend for a "Reject" response to fail the task.
Args:
ignore_downstream_trigger_rules: If True, skips all downstream tasks regardless of trigger rules.
fail_on_reject: If True, the task fails when "Reject" is selected. Generally discouraged.
Read the warning carefully before using.
"""
for arg in self.FIXED_ARGS:
if arg in kwargs:
raise ValueError(f"Passing {arg} to ApprovalOperator is not allowed.")
self.ignore_downstream_trigger_rules = ignore_downstream_trigger_rules
self.fail_on_reject = fail_on_reject
super().__init__(
options=[self.APPROVE, self.REJECT],
multiple=False,
**kwargs,
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
ret = super().execute_complete(context=context, event=event)
chosen_option = ret["chosen_options"][0]
if chosen_option == self.APPROVE:
self.log.info("Approved. Proceeding with downstream tasks...")
return ret
if self.fail_on_reject and chosen_option == self.REJECT:
raise HITLRejectException('Receive "Reject"')
if not self.downstream_task_ids:
self.log.info("No downstream tasks; nothing to do.")
return ret
def get_tasks_to_skip():
if self.ignore_downstream_trigger_rules is True:
tasks = context["task"].get_flat_relatives(upstream=False)
else:
tasks = context["task"].get_direct_relatives(upstream=False)
yield from (t for t in tasks if not t.is_teardown)
tasks_to_skip = get_tasks_to_skip()
# this lets us avoid an intermediate list unless debug logging
if self.log.getEffectiveLevel() <= logging.DEBUG:
self.log.debug("Downstream task IDs %s", tasks_to_skip := list(get_tasks_to_skip()))
self.log.info("Skipping downstream tasks")
self.skip(ti=context["ti"], tasks=tasks_to_skip)
return ret
| ApprovalOperator |
python | pyca__cryptography | tests/hazmat/primitives/test_hkdf_vectors.py | {
"start": 474,
"end": 797
} | class ____:
test_hkdfsha1 = generate_hkdf_test(
load_nist_vectors,
os.path.join("KDF"),
["rfc-5869-HKDF-SHA1.txt"],
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA256()),
skip_message="Does not support SHA256.",
)
| TestHKDFSHA1 |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/quarantine.py | {
"start": 177,
"end": 4190
} | class ____:
scope: str
name: str
web_url: str
def __eq__(self, other):
return self.scope == other.scope and self.name == other.name
def __hash__(self):
return hash((self.scope, self.name))
@lru_cache
def get_buildkite_quarantined_objects(
token, org_slug, suite_slug, annotation, suppress_errors=False
) -> set[QuarantinedObject]:
quarantined_objects = set()
try:
headers = {"Authorization": f"Bearer {token}"}
url = f"https://api.buildkite.com/v2/analytics/organizations/{org_slug}/suites/{suite_slug}/tests/{annotation}"
start_time = time.time()
timeout = 10
while url and time.time() - start_time < timeout:
response = requests.get(url, headers=headers)
response.raise_for_status()
for test in response.json():
scope = test.get("scope", "")
name = " ".join(test.get("name", "").split())
web_url = test.get("web_url", "")
quarantined_object = QuarantinedObject(scope, name, web_url)
quarantined_objects.add(quarantined_object)
link_header = response.headers.get("Link", "")
next_url = None
for part in link_header.split(","):
if 'rel="next"' in part:
next_url = part[part.find("<") + 1 : part.find(">")]
break
url = next_url
except Exception as e:
logging.error(e)
if not suppress_errors:
raise e
return quarantined_objects
def filter_and_print_steps_by_quarantined(
all_steps, skip_quarantined_steps, mute_quarantined_steps
):
if (skip_quarantined_steps or mute_quarantined_steps) and not environment.run_all_tests():
filtered_steps, skipped_steps, muted_steps = filter_steps_by_quarantined(
all_steps, skip_quarantined_steps, mute_quarantined_steps
)
if skipped_steps:
for step in skipped_steps:
logging.info(f"Skipped step: {step.get('label') or 'unnamed'}")
if muted_steps:
for step in muted_steps:
logging.info(f"Muted step: {step.get('label') or 'unnamed'}")
return filtered_steps
return all_steps
def filter_steps_by_quarantined(steps, skip_quarantined_steps, mute_quarantined_steps):
if (not skip_quarantined_steps and not mute_quarantined_steps) or environment.run_all_tests():
return steps, [], []
filtered_steps = []
skipped_steps = []
muted_steps = []
for step in steps:
# Handle both individual steps and step groups
if "group" in step:
# For step groups, check if any of the steps in the group are quarantined
group_steps = step["steps"]
filtered_group_steps = []
for group_step in group_steps:
label = group_step.get("label") or ""
if label in skip_quarantined_steps:
skipped_steps.append(group_step)
elif label in mute_quarantined_steps:
group_step["soft_fail"] = True
muted_steps.append(group_step)
filtered_group_steps.append(group_step)
else:
filtered_group_steps.append(group_step)
if filtered_group_steps:
step["steps"] = filtered_group_steps
filtered_steps.append(step)
else:
# For individual steps, check if the step key is in quarantined list
label = step.get("label") or ""
if label in skip_quarantined_steps:
skipped_steps.append(step)
elif label in mute_quarantined_steps:
step["soft_fail"] = True
muted_steps.append(step)
filtered_steps.append(step)
else:
filtered_steps.append(step)
return filtered_steps, skipped_steps, muted_steps
| QuarantinedObject |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/connection_objects_retrieval.py | {
"start": 679,
"end": 15165
} | class ____(Exception):
pass
def parse_config(config: dict | str | None) -> Optional[SecretDict]:
if not config:
return None
if isinstance(config, str):
return SecretDict(json.loads(config))
else:
return SecretDict(config)
def parse_catalog(catalog: dict | str | None) -> Optional[AirbyteCatalog]:
if not catalog:
return None
if isinstance(catalog, str):
return AirbyteCatalog.parse_obj(json.loads(catalog))
else:
return AirbyteCatalog.parse_obj(catalog)
def parse_configured_catalog(
configured_catalog: dict | str | None, selected_streams: set[str] | None = None
) -> Optional[ConfiguredAirbyteCatalog]:
if not configured_catalog:
return None
if isinstance(configured_catalog, str):
configured_catalog = json.loads(configured_catalog)
patched_catalog = hacks.patch_configured_catalog(configured_catalog)
catalog = ConfiguredAirbyteCatalog.parse_obj(patched_catalog)
if selected_streams:
return ConfiguredAirbyteCatalog(streams=[stream for stream in catalog.streams if stream.stream.name in selected_streams])
return catalog
def parse_state(state: dict | str | None) -> Optional[dict]:
if not state:
return None
if isinstance(state, str):
return json.loads(state)
else:
return state
def get_connector_config_from_path(config_path: Path) -> Optional[SecretDict]:
return parse_config(config_path.read_text())
def get_state_from_path(state_path: Path) -> Optional[dict]:
return parse_state(state_path.read_text())
def get_configured_catalog_from_path(path: Path, selected_streams: Optional[set[str]] = None) -> Optional[ConfiguredAirbyteCatalog]:
return parse_configured_catalog(path.read_text(), selected_streams)
COMMAND_TO_REQUIRED_OBJECT_TYPES = {
Command.SPEC: set(),
Command.CHECK: {ConnectionObject.SOURCE_CONFIG},
Command.DISCOVER: {ConnectionObject.SOURCE_CONFIG},
Command.READ: {ConnectionObject.SOURCE_CONFIG, ConnectionObject.CONFIGURED_CATALOG},
Command.READ_WITH_STATE: {
ConnectionObject.SOURCE_CONFIG,
ConnectionObject.CONFIGURED_CATALOG,
ConnectionObject.STATE,
},
}
def get_connection_objects(
requested_objects: set[ConnectionObject],
connection_id: Optional[str],
custom_config_path: Optional[Path],
custom_configured_catalog_path: Optional[Path],
custom_state_path: Optional[Path],
retrieval_reason: Optional[str],
connector_image: Optional[str] = None,
connector_version: Optional[str] = None,
auto_select_connections: bool = False,
selected_streams: Optional[set[str]] = None,
connection_subset: ConnectionSubset = ConnectionSubset.SANDBOXES,
max_connections: Optional[int] = None,
) -> List[ConnectionObjects]:
"""This function retrieves the connection objects values.
It checks that the required objects are available and raises a UsageError if they are not.
If a connection_id is provided, it retrieves the connection objects from the connection.
If custom objects are provided, it overrides the retrieved objects with them.
Args:
requested_objects (Set[ConnectionObject]): The set of requested connection objects.
connection_id (Optional[str]): The connection id to retrieve the connection objects for.
custom_config_path (Optional[Path]): The local path to the custom config to use.
custom_configured_catalog_path (Optional[Path]): The local path to the custom catalog to use.
custom_state_path (Optional[Path]): The local path to the custom state to use.
retrieval_reason (Optional[str]): The reason to access the connection objects.
fail_if_missing_objects (bool, optional): Whether to raise a ValueError if a required object is missing. Defaults to True.
connector_image (Optional[str]): The image name for the connector under test.
connector_version (Optional[str]): The version for the connector under test.
auto_select_connections (bool, optional): Whether to automatically select connections if no connection id is passed. Defaults to False.
selected_streams (Optional[Set[str]]): The set of selected streams to use when auto selecting a connection.
connection_subset (ConnectionSubset): The subset of connections to select from.
max_connections (Optional[int]): The maximum number of connections to retrieve.
Raises:
click.UsageError: If a required object is missing for the command.
click.UsageError: If a retrieval reason is missing when passing a connection id.
Returns:
List[ConnectionObjects]: List of connection objects.
"""
if connection_id and auto_select_connections:
raise ValueError("Cannot set both `connection_id` and `auto_select_connections`.")
if auto_select_connections and not connector_image:
raise ValueError("A connector image must be provided when using auto_select_connections.")
custom_config = get_connector_config_from_path(custom_config_path) if custom_config_path else None
custom_configured_catalog = (
get_configured_catalog_from_path(custom_configured_catalog_path, selected_streams) if custom_configured_catalog_path else None
)
custom_state = get_state_from_path(custom_state_path) if custom_state_path else None
is_ci = os.getenv("CI", False)
if connection_id:
if not retrieval_reason:
raise ValueError("A retrieval reason is required to access the connection objects when passing a connection id.")
connection_objects = _get_connection_objects_from_retrieved_objects(
requested_objects,
retrieval_reason=retrieval_reason,
source_docker_repository=connector_image,
source_docker_image_tag=connector_version,
selected_streams=selected_streams,
connection_id=connection_id,
custom_config=custom_config,
custom_configured_catalog=custom_configured_catalog,
custom_state=custom_state,
connection_subset=connection_subset,
max_connections=max_connections,
)
else:
if auto_select_connections:
connection_objects = _get_connection_objects_from_retrieved_objects(
requested_objects,
retrieval_reason=retrieval_reason,
source_docker_repository=connector_image,
source_docker_image_tag=connector_version,
selected_streams=selected_streams,
custom_config=custom_config,
custom_configured_catalog=custom_configured_catalog,
custom_state=custom_state,
connection_subset=connection_subset,
max_connections=max_connections,
)
else:
# We don't make any requests to the connection-retriever; it is expected that config/catalog/state have been provided if needed for the commands being run.
connection_objects = [
ConnectionObjects(
source_config=custom_config,
destination_config=custom_config,
catalog=None,
configured_catalog=custom_configured_catalog,
state=custom_state,
workspace_id=None,
source_id=None,
destination_id=None,
connection_id=None,
source_docker_image=None,
)
]
if not connection_objects:
raise ValueError("No connection objects could be fetched.")
all_connection_ids = [connection_object.connection_id for connection_object in connection_objects]
assert len(set(all_connection_ids)) == len(all_connection_ids), "Connection IDs must be unique."
return connection_objects
def _find_best_candidates_subset(candidates: List[TestingCandidate]) -> List[Tuple[TestingCandidate, List[str]]]:
"""
This function reduces the list of candidates to the best subset of candidates.
The best subset is the one which maximizes the number of streams tested and minimizes the number of candidates.
"""
candidates_sorted_by_duration = sorted(candidates, key=lambda x: x.last_attempt_duration_in_microseconds)
tested_streams = set()
candidates_and_streams_to_test = []
for candidate in candidates_sorted_by_duration:
candidate_streams_to_test = []
for stream in candidate.streams_with_data:
# The candidate is selected if one of its streams has not been tested yet
if stream not in tested_streams:
candidate_streams_to_test.append(stream)
tested_streams.add(stream)
if candidate_streams_to_test:
candidates_and_streams_to_test.append((candidate, candidate_streams_to_test))
return candidates_and_streams_to_test
def _get_connection_objects_from_retrieved_objects(
requested_objects: Set[ConnectionObject],
retrieval_reason: str,
source_docker_repository: str,
source_docker_image_tag: str,
selected_streams: Optional[Set[str]],
connection_id: Optional[str] = None,
custom_config: Optional[Dict] = None,
custom_configured_catalog: Optional[ConfiguredAirbyteCatalog] = None,
custom_state: Optional[Dict] = None,
connection_subset: ConnectionSubset = ConnectionSubset.SANDBOXES,
max_connections: Optional[int] = None,
):
console.log(
textwrap.dedent(
"""
Retrieving connection objects from the database.
We will build a subset of candidates to test.
This subset should minimize the number of candidates and sync duration while maximizing the number of streams tested.
We patch configured catalogs to only test streams once.
If the max_connections parameter is set, we will only keep the top connections with the most streams to test.
"""
)
)
try:
candidates = retrieve_testing_candidates(
source_docker_repository=source_docker_repository,
source_docker_image_tag=source_docker_image_tag,
with_streams=selected_streams,
connection_subset=connection_subset,
)
except IndexError:
raise InvalidConnectionError(
f"No candidates were found for the provided source docker image ({source_docker_repository}:{source_docker_image_tag})."
)
# If the connection_id is provided, we filter the candidates to only keep the ones with the same connection_id
if connection_id:
candidates = [candidate for candidate in candidates if candidate.connection_id == connection_id]
candidates_and_streams_to_test = _find_best_candidates_subset(candidates)
candidates_and_streams_to_test = sorted(candidates_and_streams_to_test, key=lambda x: len(x[1]), reverse=True)
if max_connections:
candidates_and_streams_to_test = candidates_and_streams_to_test[:max_connections]
number_of_streams_tested = sum([len(streams_to_test) for _, streams_to_test in candidates_and_streams_to_test])
console.log(f"Selected {len(candidates_and_streams_to_test)} candidates to test {number_of_streams_tested} streams.")
all_connection_objects = []
for candidate, streams_to_test in candidates_and_streams_to_test:
retrieved_objects = retrieve_objects(
requested_objects,
retrieval_reason=retrieval_reason,
source_docker_repository=source_docker_repository,
source_docker_image_tag=source_docker_image_tag,
connection_id=candidate.connection_id,
connection_subset=connection_subset,
)
retrieved_objects = retrieved_objects[0]
retrieved_source_config = parse_config(retrieved_objects.source_config)
retrieved_destination_config = parse_config(retrieved_objects.destination_config)
retrieved_catalog = parse_catalog(retrieved_objects.catalog)
retrieved_configured_catalog = parse_configured_catalog(retrieved_objects.configured_catalog, selected_streams)
retrieved_state = parse_state(retrieved_objects.state)
retrieved_source_docker_image = retrieved_objects.source_docker_image
connection_url = build_connection_url(retrieved_objects.workspace_id, retrieved_objects.connection_id)
if retrieved_source_docker_image is None:
raise InvalidConnectionError(
f"No docker image was found for connection ID {retrieved_objects.connection_id}. Please double check that the latest job run used version {source_docker_image_tag}. Connection URL: {connection_url}"
)
elif retrieved_source_docker_image.split(":")[0] != source_docker_repository:
raise InvalidConnectionError(
f"The provided docker image ({source_docker_repository}) does not match the image for connection ID {retrieved_objects.connection_id}. Please double check that this connection is using the correct image. Connection URL: {connection_url}"
)
elif retrieved_source_docker_image.split(":")[1] != source_docker_image_tag:
raise InvalidConnectionError(
f"The provided docker image tag ({source_docker_image_tag}) does not match the image tag for connection ID {retrieved_objects.connection_id}. Please double check that this connection is using the correct image tag and the latest job ran using this version. Connection URL: {connection_url}"
)
all_connection_objects.append(
ConnectionObjects(
source_config=custom_config if custom_config else retrieved_source_config,
destination_config=custom_config if custom_config else retrieved_destination_config,
catalog=retrieved_catalog,
configured_catalog=custom_configured_catalog if custom_configured_catalog else retrieved_configured_catalog,
state=custom_state if custom_state else retrieved_state,
workspace_id=retrieved_objects.workspace_id,
source_id=retrieved_objects.source_id,
destination_id=retrieved_objects.destination_id,
source_docker_image=retrieved_source_docker_image,
connection_id=retrieved_objects.connection_id,
)
)
return all_connection_objects
| InvalidConnectionError |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py | {
"start": 54298,
"end": 57585
} | class ____(GKEOperatorMixin, GoogleCloudBaseOperator):
"""
Suspend Job by given name.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKESuspendJobOperator`
:param name: The name of the Job to suspend
:param namespace: The name of the Google Kubernetes Engine namespace.
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster.
:param use_internal_ip: Use the internal IP address as the endpoint.
:param use_dns_endpoint: Use the DNS address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple({"name", "namespace"} | set(GKEOperatorMixin.template_fields))
operator_extra_links = (KubernetesEngineJobLink(),)
def __init__(
self,
name: str,
namespace: str,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
use_dns_endpoint: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.name = name
self.namespace = namespace
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
self.use_dns_endpoint = use_dns_endpoint
self.impersonation_chain = impersonation_chain
self.job: V1Job | None = None
def execute(self, context: Context) -> None:
self.job = self.hook.patch_namespaced_job(
job_name=self.name,
namespace=self.namespace,
body={"spec": {"suspend": True}},
)
self.log.info(
"Job %s from cluster %s was suspended.",
self.name,
self.cluster_name,
)
KubernetesEngineJobLink.persist(
context=context,
location=self.location,
cluster_name=self.cluster_name,
namespace=self.job.metadata.namespace,
job_name=self.job.metadata.name,
project_id=self.project_id,
)
return k8s.V1Job.to_dict(self.job)
| GKESuspendJobOperator |
python | pytorch__pytorch | torch/_inductor/utils.py | {
"start": 130063,
"end": 130594
} | class ____:
"""
Metadata for Customized CUDAGraphWrapper.
Currently assumes there is 1 dynamo graph and will extend to
multiple graphs in the future.
"""
# The number of partitions that are cudagraphable.
num_partitions: int
# Index of the current partition.
partition_index: int
PartitionFnType = Callable[..., Any]
CUDAGraphWrapperType = Callable[
[PartitionFnType, CUDAGraphWrapperMetadata], PartitionFnType
]
# only incremented by user call of mark_step_begin
| CUDAGraphWrapperMetadata |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 30555,
"end": 30858
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"CONFUSED",
"EYES",
"HEART",
"HOORAY",
"LAUGH",
"ROCKET",
"THUMBS_DOWN",
"THUMBS_UP",
)
| ReactionContent |
python | conda__conda | conda/exceptions.py | {
"start": 22885,
"end": 24611
} | class ____(CondaError):
def __init__(
self,
packages: Iterable[MatchSpec | PackageRecord | str],
channel_urls: Iterable[str] = (),
):
if channel_urls:
message = dals(
"""
The following packages are not available from current channels:
%(packages_formatted)s
Current channels:
%(channels_formatted)s
To search for alternate channels that may provide the conda package you're
looking for, navigate to
https://anaconda.org
and use the search bar at the top of the page.
"""
)
from .base.context import context
if context.use_only_tar_bz2:
message += dals(
"""
Note: 'use_only_tar_bz2' is enabled. This might be omitting some
packages from the index. Set this option to 'false' and retry.
"""
)
packages_formatted = dashlist(packages)
channels_formatted = dashlist(channel_urls)
else:
message = dals(
"""
The following packages are missing from the target environment:
%(packages_formatted)s
"""
)
packages_formatted = dashlist(packages)
channels_formatted = ""
super().__init__(
message,
packages=packages,
packages_formatted=packages_formatted,
channel_urls=list(channel_urls),
channels_formatted=channels_formatted,
)
| PackagesNotFoundError |
python | pytorch__pytorch | test/jit/test_dtype_analysis.py | {
"start": 4921,
"end": 10132
} | class ____(TestDtypeBase):
def test_unary(self):
# Testing the Unary Implementation that uses metatensors
def relu_inplace(x):
return x.relu_()
def log(x):
return torch.log(x)
functions = [relu_inplace, log]
input_shapes = [
((2, 2),), # Simple Case
((0, 2),), # Size 0 Tensor
((),), # zerodim
]
input_dtypes = [
(float32,), # Simple Case
(int64,), # Test how some unary ops implicitly convert to float
(complex32,), # Show we can handle complex vals as well
]
for fn, in_shapes, in_dtypes in product(functions, input_shapes, input_dtypes):
self.assert_dtype_equal(fn, in_shapes, in_dtypes)
def test_binary_tensors(self):
# Testing using Metatensors
def add(x, y):
return x + y
def div(x, y):
return x / y
functions = [add, div]
input_shapes = [
((1, 1, 2), (1, 2)), # Different Dim, non-zerodim
((), (1, 2)), # One zerodim
((1, 2), ()), # Other zerodim
((2, 0, 3), (1, 3)), # Test a tensor with a dim of 0
((), ()), # both zerodim
]
input_dtypes = [
(float32, float32), # Simple Case
(int32, int64), # Size Promotion (compliated case for 0dim tensors)
(float32, int32), # type Promotion
(int64, float32), # Type promotion with size change
(float64, complex32), # Show we can handle complex vals as well
]
for fn, in_shapes, in_dtypes in product(functions, input_shapes, input_dtypes):
self.assert_dtype_equal(fn, in_shapes, in_dtypes)
def test_binary_scalar(self):
# Test the mixing of scalar and non-scalar args
input_shapes = [
((2, 2), self.SCALAR), # Non-Zerodim vs scalar
((), self.SCALAR), # Zerodim vs scalar
# Scalar vs Scalar is automatically inferred.
]
input_dtypes = [
(float32, float32), # Simple Case
(int32, int64), # Size Promotion (compliated case for 0dim tensors)
(int32, float32), # type Promotion
]
with set_default_dtype(float32):
for in_shapes, in_dtypes in product(input_shapes, input_dtypes):
scalar_type = in_dtypes[1]
if scalar_type == float32:
def add(x, y: float):
return x + y
else:
def add(x, y: int):
return x + y
self.assert_dtype_equal(add, in_shapes, in_dtypes)
def test_custom_rules(self):
# Test some of the ops that are not covered by Metatensors
# Note that unlike the Conv2d module, the function conv2d
# does not take dtype/device arguments.
def conv2d_fn(input, weight, bias):
return torch.nn.functional.conv2d(input, weight, bias)
def adaptive_avg_pool2d_fn(input, output_size: Tuple[int]):
return torch._C._nn.adaptive_avg_pool2d(input, output_size)
for fn, inputs_fn in (
(conv2d_fn, sample_inputs_conv2d),
(adaptive_avg_pool2d_fn, sample_inputs_adaptive_avg_pool2d),
):
for dtype in (torch.int8, torch.float64):
# Gets default version for conv2d
sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[
-1
]
input_args = [sample_input.input, *sample_input.args]
self.assert_dtype_equal_custom_args(fn, input_args)
def test_conv_no_mixed_args(self):
def conv2d_fn(input, weight, bias):
return torch.nn.functional.conv2d(input, weight, bias)
# Now make sure that conv2d doesn't support mixed args
conv_ins = sample_inputs_conv2d(None, "cpu", torch.float, False)
conv_in = list(conv_ins)[-1]
weight, bias = conv_in.args
weight = weight.type(torch.long)
with self.assertRaises(RuntimeError):
conv2d_fn(conv_in.input, weight, bias)
# Check that we also don't propagate
graph = torch.jit.script(conv2d_fn).graph # Note this is a cached graph
self.prop_dtype_on_graph(graph, [conv_in.input, weight, bias])
actual_dtype = self.node_output_dtype_single(graph)
self.assertEqual(actual_dtype, None)
def test_combined(self):
# Test a case with both custom rules and metatensors
def func(input, weight, bias, y):
conv_out = torch.nn.functional.conv2d(input, weight, bias)
conv_2 = conv_out + y
flattened = torch.flatten(conv_2, start_dim=2)
add_res = flattened + y
return add_res
conv_ins = sample_inputs_conv2d(None, "cpu", torch.int8, False)
conv_in = list(conv_ins)[-1]
y_val = torch.rand((1,), dtype=torch.float32)
input_args = [conv_in.input, *conv_in.args, y_val]
self.assert_dtype_equal_custom_args(func, input_args)
| TestDtypeAnalysis |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/loop14.py | {
"start": 78,
"end": 544
} | class ____:
def confirm_dialog(self) -> "State | bool":
return False
state = State()
reveal_type(state, expected_text="State")
for _ in range(1):
result = state.confirm_dialog()
if isinstance(result, State):
reveal_type(state, expected_text="State")
reveal_type(result, expected_text="State")
state = result
else:
reveal_type(state, expected_text="State")
reveal_type(result, expected_text="bool")
| State |
python | huggingface__transformers | src/transformers/models/time_series_transformer/modeling_time_series_transformer.py | {
"start": 21823,
"end": 26964
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TimeSeriesTransformerAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = TimeSeriesTransformerAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| TimeSeriesTransformerDecoderLayer |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 44431,
"end": 47911
} | class ____(ToolContainerBase, wx.ToolBar):
_icon_extension = '.svg'
def __init__(self, toolmanager, parent=None, style=wx.TB_BOTTOM):
if parent is None:
parent = toolmanager.canvas.GetParent()
ToolContainerBase.__init__(self, toolmanager)
wx.ToolBar.__init__(self, parent, -1, style=style)
self._space = self.AddStretchableSpace()
self._label_text = wx.StaticText(self, style=wx.ALIGN_RIGHT)
self.AddControl(self._label_text)
self._toolitems = {}
self._groups = {} # Mapping of groups to the separator after them.
def _get_tool_pos(self, tool):
"""
Find the position (index) of a wx.ToolBarToolBase in a ToolBar.
``ToolBar.GetToolPos`` is not useful because wx assigns the same Id to
all Separators and StretchableSpaces.
"""
pos, = (pos for pos in range(self.ToolsCount)
if self.GetToolByPos(pos) == tool)
return pos
def add_toolitem(self, name, group, position, image_file, description,
toggle):
# Find or create the separator that follows this group.
if group not in self._groups:
self._groups[group] = self.InsertSeparator(
self._get_tool_pos(self._space))
sep = self._groups[group]
# List all separators.
seps = [t for t in map(self.GetToolByPos, range(self.ToolsCount))
if t.IsSeparator() and not t.IsStretchableSpace()]
# Find where to insert the tool.
if position >= 0:
# Find the start of the group by looking for the separator
# preceding this one; then move forward from it.
start = (0 if sep == seps[0]
else self._get_tool_pos(seps[seps.index(sep) - 1]) + 1)
else:
# Move backwards from this separator.
start = self._get_tool_pos(sep) + 1
idx = start + position
if image_file:
bmp = NavigationToolbar2Wx._icon(image_file)
kind = wx.ITEM_NORMAL if not toggle else wx.ITEM_CHECK
tool = self.InsertTool(idx, -1, name, bmp, wx.NullBitmap, kind,
description or "")
else:
size = (self.GetTextExtent(name)[0] + 10, -1)
if toggle:
control = wx.ToggleButton(self, -1, name, size=size)
else:
control = wx.Button(self, -1, name, size=size)
tool = self.InsertControl(idx, control, label=name)
self.Realize()
def handler(event):
self.trigger_tool(name)
if image_file:
self.Bind(wx.EVT_TOOL, handler, tool)
else:
control.Bind(wx.EVT_LEFT_DOWN, handler)
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tool, handler))
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for tool, handler in self._toolitems[name]:
if not tool.IsControl():
self.ToggleTool(tool.Id, toggled)
else:
tool.GetControl().SetValue(toggled)
self.Refresh()
def remove_toolitem(self, name):
for tool, handler in self._toolitems.pop(name, []):
self.DeleteTool(tool.Id)
def set_message(self, s):
self._label_text.SetLabel(s)
@backend_tools._register_tool_class(_FigureCanvasWxBase)
| ToolbarWx |
python | streamlit__streamlit | lib/streamlit/runtime/media_file_manager.py | {
"start": 1979,
"end": 2489
} | class ____:
"""Metadata that the MediaFileManager needs for each file it manages."""
def __init__(self, kind: MediaFileKind = MediaFileKind.MEDIA) -> None:
self._kind = kind
self._is_marked_for_delete = False
@property
def kind(self) -> MediaFileKind:
return self._kind
@property
def is_marked_for_delete(self) -> bool:
return self._is_marked_for_delete
def mark_for_delete(self) -> None:
self._is_marked_for_delete = True
| MediaFileMetadata |
python | pytorch__pytorch | test/test_xpu.py | {
"start": 26435,
"end": 29848
} | class ____(TestAutocast):
# These operators are not implemented on XPU backend and we can NOT fall back
# them to CPU. So we have to skip them at this moment.
# TODO: remove these operators from skip list when they are implemented on XPU backend.
# lstm_cell: The operator 'aten::_thnn_fused_lstm_cell' is not currently implemented for the XPU device
skip_list = ["gru_cell", "lstm_cell"]
def setUp(self):
super().setUp()
self.autocast_lists = AutocastTestLists(torch.device("xpu"))
def tearDown(self):
del self.autocast_lists
super().tearDown()
def test_autocast_torch_fp16(self):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if op in self.skip_list:
skip_test = True # skip unimplemented op
if len(op_with_args) == 3:
skip_test = True # skip cudnn op
if not skip_test:
self._run_autocast_outofplace(
op, args, torch.float16, device="xpu", amp_dtype=torch.float16
)
def test_autocast_torch_bf16(self):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if op in self.skip_list:
skip_test = True # skip unimplemented op
if len(op_with_args) == 3:
skip_test = True # skip cudnn op
if not skip_test:
self._run_autocast_outofplace(op, args, torch.bfloat16, device="xpu")
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(
op, args, torch.float32, device="xpu", amp_dtype=torch.float16
)
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(
op,
args,
torch.float32,
device="xpu",
out_type=out_type,
amp_dtype=torch.float16,
)
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(
torch.nn.Linear(8, 8), torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)
).xpu()
input = torch.rand(
(8, 8), device="xpu", dtype=torch.float16, requires_grad=True
)
for reentrant in (True, False):
with torch.autocast("xpu"):
output = checkpoint_sequential(model, 2, input, use_reentrant=reentrant)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
def test_xpu_autocast_dtype(self):
dtype = torch.get_autocast_dtype("xpu")
self.assertEqual(dtype, torch.float16)
mat0_fp32 = torch.randn((10, 10), dtype=torch.float32, device="xpu")
mat1_fp32 = torch.randn((10, 10), dtype=torch.float32, device="xpu")
with torch.amp.autocast("xpu"):
result = torch.mm(mat0_fp32, mat1_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_XPU, "XPU not available, skipping tests")
| TestXpuAutocast |
python | ZoranPandovski__al-go-rithms | data_structures/b_tree/Python/sum_root_leaf.py | {
"start": 230,
"end": 1222
} | class ____():
def root_leaf(self,root, given_sum, val, path_list):
if root == None:
return
if root.left == None and root.right == None:
if (val + root.val) == given_sum:
path.append(root.val)
return True
else:
return False
if(self.root_leaf(root.left,given_sum,(val + root.val),path_list)):
path.append(root.val)
return True
if(self.root_leaf(root.right,given_sum,(val + root.val),path_list)):
path.append(root.val)
return True
return False
### Testcases ###
root = None
tree = binary_search_tree.Tree()
root = tree.insert(root, 4)
root = tree.insert(root, 2)
root = tree.insert(root, 1)
root = tree.insert(root, 3)
root = tree.insert(root, 5)
#tree.preorder(root)
obj = SumRootLeaf()
path = []
if(obj.root_leaf(root,9,0,path)):
print(path)
else:
print(None)
| SumRootLeaf |
python | modin-project__modin | modin/experimental/xgboost/utils.py | {
"start": 938,
"end": 2494
} | class ____:
"""
A manager class that controls lifecycle of `xgb.RabitTracker`.
All workers that are used for distributed training will connect to
Rabit Tracker stored in this class.
Parameters
----------
num_workers : int
Number of workers of `self.rabit_tracker`.
host_ip : str
IP address of host that creates `self` object.
"""
# TODO: Specify type of host_ip
def __init__(self, num_workers: int, host_ip):
self._num_workers = num_workers
self.env = {"DMLC_NUM_WORKER": self._num_workers}
self.rabit_tracker = xgb.RabitTracker(
host_ip=host_ip, n_workers=self._num_workers
)
def __enter__(self):
"""
Entry point of manager.
Updates Rabit Tracker environment, starts `self.rabit_tracker`.
Returns
-------
dict
Dict with Rabit Tracker environment.
"""
self.env.update(self.rabit_tracker.worker_envs())
self.rabit_tracker.start(self._num_workers)
return self.env
# TODO: (type, value, traceback) -> *args
def __exit__(self, type, value, traceback):
"""
Exit point of manager.
Finishes `self.rabit_tracker`.
Parameters
----------
type : exception type
Type of exception, captured by manager.
value : Exception
Exception value.
traceback : TracebackType
Traceback of exception.
"""
self.rabit_tracker.join()
| RabitContextManager |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis19.py | {
"start": 315,
"end": 1451
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis19.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [43813504, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"label_position": "high"})
chart.set_y_axis({"label_position": "low"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | plotly__plotly.py | plotly/graph_objs/surface/contours/_y.py | {
"start": 233,
"end": 9979
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "surface.contours"
_path_str = "surface.contours.y"
_valid_props = {
"color",
"end",
"highlight",
"highlightcolor",
"highlightwidth",
"project",
"show",
"size",
"start",
"usecolormap",
"width",
}
@property
def color(self):
"""
Sets the color of the contour lines.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def end(self):
"""
Sets the end contour level value. Must be more than
`contours.start`
The 'end' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
@property
def highlight(self):
"""
Determines whether or not contour lines about the y dimension
are highlighted on hover.
The 'highlight' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["highlight"]
@highlight.setter
def highlight(self, val):
self["highlight"] = val
@property
def highlightcolor(self):
"""
Sets the color of the highlighted contour lines.
The 'highlightcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["highlightcolor"]
@highlightcolor.setter
def highlightcolor(self, val):
self["highlightcolor"] = val
@property
def highlightwidth(self):
"""
Sets the width of the highlighted contour lines.
The 'highlightwidth' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["highlightwidth"]
@highlightwidth.setter
def highlightwidth(self, val):
self["highlightwidth"] = val
@property
def project(self):
"""
The 'project' property is an instance of Project
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.y.Project`
- A dict of string/value properties that will be passed
to the Project constructor
Returns
-------
plotly.graph_objs.surface.contours.y.Project
"""
return self["project"]
@project.setter
def project(self, val):
self["project"] = val
@property
def show(self):
"""
Determines whether or not contour lines about the y dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def size(self):
"""
Sets the step between each contour level. Must be positive.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def start(self):
"""
Sets the starting contour level value. Must be less than
`contours.end`
The 'start' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
@property
def usecolormap(self):
"""
An alternate to "color". Determines whether or not the contour
lines are colored using the trace "colorscale".
The 'usecolormap' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["usecolormap"]
@usecolormap.setter
def usecolormap(self, val):
self["usecolormap"] = val
@property
def width(self):
"""
Sets the width of the contour lines.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more than
`contours.start`
highlight
Determines whether or not contour lines about the y
dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour lines.
highlightwidth
Sets the width of the highlighted contour lines.
project
:class:`plotly.graph_objects.surface.contours.y.Project
` instance or dict with compatible properties
show
Determines whether or not contour lines about the y
dimension are drawn.
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
usecolormap
An alternate to "color". Determines whether or not the
contour lines are colored using the trace "colorscale".
width
Sets the width of the contour lines.
"""
def __init__(
self,
arg=None,
color=None,
end=None,
highlight=None,
highlightcolor=None,
highlightwidth=None,
project=None,
show=None,
size=None,
start=None,
usecolormap=None,
width=None,
**kwargs,
):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.contours.Y`
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more than
`contours.start`
highlight
Determines whether or not contour lines about the y
dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour lines.
highlightwidth
Sets the width of the highlighted contour lines.
project
:class:`plotly.graph_objects.surface.contours.y.Project
` instance or dict with compatible properties
show
Determines whether or not contour lines about the y
dimension are drawn.
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
usecolormap
An alternate to "color". Determines whether or not the
contour lines are colored using the trace "colorscale".
width
Sets the width of the contour lines.
Returns
-------
Y
"""
super().__init__("y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.surface.contours.Y
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.contours.Y`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("end", arg, end)
self._set_property("highlight", arg, highlight)
self._set_property("highlightcolor", arg, highlightcolor)
self._set_property("highlightwidth", arg, highlightwidth)
self._set_property("project", arg, project)
self._set_property("show", arg, show)
self._set_property("size", arg, size)
self._set_property("start", arg, start)
self._set_property("usecolormap", arg, usecolormap)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Y |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_catalog.py | {
"start": 1061,
"end": 9213
} | class ____(AwsBaseHook):
"""
Interact with AWS Glue Data Catalog.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("glue") <Glue.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `AWS Glue Data Catalog \
<https://docs.aws.amazon.com/glue/latest/dg/components-overview.html#data-catalog-intro>`__
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="glue", *args, **kwargs)
async def async_get_partitions(
self,
client: Any,
database_name: str,
table_name: str,
expression: str = "",
page_size: int | None = None,
max_items: int | None = 1,
) -> set[tuple]:
"""
Asynchronously retrieves the partition values for a table.
:param database_name: The name of the catalog database where the partitions reside.
:param table_name: The name of the partitions' table.
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:param page_size: pagination size
:param max_items: maximum items to return
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = client.get_paginator("get_partitions")
partitions = set()
async for page in paginator.paginate(
DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config
):
for partition in page["Partitions"]:
partitions.add(tuple(partition["Values"]))
return partitions
def get_partitions(
self,
database_name: str,
table_name: str,
expression: str = "",
page_size: int | None = None,
max_items: int | None = None,
) -> set[tuple]:
"""
Retrieve the partition values for a table.
.. seealso::
- :external+boto3:py:class:`Glue.Paginator.GetPartitions`
:param database_name: The name of the catalog database where the partitions reside.
:param table_name: The name of the partitions' table.
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:param page_size: pagination size
:param max_items: maximum items to return
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("get_partitions")
response = paginator.paginate(
DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config
)
partitions = set()
for page in response:
for partition in page["Partitions"]:
partitions.add(tuple(partition["Values"]))
return partitions
def check_for_partition(self, database_name: str, table_name: str, expression: str) -> bool:
"""
Check whether a partition exists.
.. code-block:: python
hook = GlueCatalogHook()
t = "static_babynames_partitioned"
hook.check_for_partition("airflow", t, "ds='2015-01-01'")
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table @partition belongs to
:expression: Expression that matches the partitions to check for, e.g.: ``a = 'b' AND c = 'd'``
"""
partitions = self.get_partitions(database_name, table_name, expression, max_items=1)
return bool(partitions)
def get_table(self, database_name: str, table_name: str) -> dict:
"""
Get the information of the table.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_table`
.. code-block:: python
hook = GlueCatalogHook()
r = hook.get_table("db", "table_foo")
r["Name"] = "table_foo"
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table
"""
result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name)
return result["Table"]
def get_table_location(self, database_name: str, table_name: str) -> str:
"""
Get the physical location of the table.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_table`
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table
"""
table = self.get_table(database_name, table_name)
return table["StorageDescriptor"]["Location"]
def get_partition(self, database_name: str, table_name: str, partition_values: list[str]) -> dict:
"""
Get a Partition.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_partition`
.. code-block:: python
hook = GlueCatalogHook()
partition = hook.get_partition("db", "table", ["string"])
partition["Values"]
:param database_name: Database name
:param table_name: Database's Table name
:param partition_values: List of utf-8 strings that define the partition
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartition
:raises: AirflowException
"""
try:
response = self.get_conn().get_partition(
DatabaseName=database_name, TableName=table_name, PartitionValues=partition_values
)
return response["Partition"]
except ClientError as e:
self.log.error("Client error: %s", e)
raise AirflowException("AWS request failed, check logs for more info")
def create_partition(self, database_name: str, table_name: str, partition_input: dict) -> dict:
"""
Create a new Partition.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_partition`
.. code-block:: python
hook = GlueCatalogHook()
partition_input = {"Values": []}
hook.create_partition(database_name="db", table_name="table", partition_input=partition_input)
:param database_name: Database name
:param table_name: Database's Table name
:param partition_input: Definition of how the partition is created
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-CreatePartition
:raises: AirflowException
"""
try:
return self.get_conn().create_partition(
DatabaseName=database_name, TableName=table_name, PartitionInput=partition_input
)
except ClientError as e:
self.log.error("Client error: %s", e)
raise AirflowException("AWS request failed, check logs for more info")
| GlueCatalogHook |
python | getsentry__sentry | src/sentry/search/events/filter.py | {
"start": 1554,
"end": 30485
} | class ____(TypedDict, total=False):
organization_id: int
project_id: Sequence[int]
environment: Sequence[str] | None
environment_id: list[int] | None
def translate_transaction_status(val: str) -> str:
if val not in SPAN_STATUS_NAME_TO_CODE:
raise InvalidSearchQuery(
f"Invalid value {val} for transaction.status condition. Accepted "
f"values are {oxfordize_list([str(key) for key in SPAN_STATUS_NAME_TO_CODE.keys()])}"
)
return SPAN_STATUS_NAME_TO_CODE[val]
def to_list[T](value: list[T] | T) -> list[T]:
if isinstance(value, list):
return value
return [value]
def _environment_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
# conditions added to env_conditions are OR'd
env_conditions: list[list[object]] = []
value = search_filter.value.value
values = set(value if isinstance(value, (list, tuple)) else [value])
# the "no environment" environment is null in snuba
if "" in values:
values.remove("")
operator = "IS NULL" if search_filter.operator == "=" else "IS NOT NULL"
env_conditions.append(["environment", operator, None])
if len(values) == 1:
operator = "=" if search_filter.operator in EQUALITY_OPERATORS else "!="
env_conditions.append(["environment", operator, values.pop()])
elif values:
operator = "IN" if search_filter.operator in EQUALITY_OPERATORS else "NOT IN"
env_conditions.append(["environment", operator, values])
return env_conditions
def _message_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
if search_filter.value.is_wildcard():
# XXX: We don't want the '^$' values at the beginning and end of
# the regex since we want to find the pattern anywhere in the
# message. Strip off here
value = search_filter.value.value[1:-1]
return [["match", ["message", f"'(?i){value}'"]], search_filter.operator, 1]
elif value == "":
operator = "=" if search_filter.operator == "=" else "!="
return [["equals", ["message", f"{value}"]], operator, 1]
else:
# https://clickhouse.yandex/docs/en/query_language/functions/string_search_functions/#position-haystack-needle
# positionCaseInsensitive returns 0 if not found and an index of 1 or more if found
# so we should flip the operator here
operator = "!=" if search_filter.operator in EQUALITY_OPERATORS else "="
if search_filter.is_in_filter:
# XXX: This `toString` usage is unnecessary, but we need it in place to
# trick the legacy Snuba language into not treating `message` as a
# function. Once we switch over to snql it can be removed.
return [
[
"multiSearchFirstPositionCaseInsensitive",
[["toString", ["message"]], ["array", [f"'{v}'" for v in value]]],
],
operator,
0,
]
# make message search case insensitive
return [["positionCaseInsensitive", ["message", f"'{value}'"]], operator, 0]
def _transaction_status_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
# Handle "has" queries
if search_filter.value.raw_value == "":
return [["isNull", [name]], search_filter.operator, 1]
if search_filter.is_in_filter:
internal_value = [
translate_transaction_status(val) for val in search_filter.value.raw_value
]
else:
internal_value = translate_transaction_status(search_filter.value.raw_value)
return [name, search_filter.operator, internal_value]
def _issue_id_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
# Handle "has" queries
if (
search_filter.value.raw_value == ""
or search_filter.is_in_filter
and [v for v in value if not v]
):
# The state of having no issues is represented differently on transactions vs
# other events. On the transactions table, it is represented by 0 whereas it is
# represented by NULL everywhere else. We use coalesce here so we can treat this
# consistently
name = ["coalesce", [name, 0]]
if search_filter.is_in_filter:
value = [v if v else 0 for v in value]
else:
value = 0
# Skip isNull check on group_id value as we want to
# allow snuba's prewhere optimizer to find this condition.
return [name, search_filter.operator, value]
def _user_display_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
user_display_expr = FIELD_ALIASES[USER_DISPLAY_ALIAS].get_expression(params)
# Handle 'has' condition
if search_filter.value.raw_value == "":
return [["isNull", [user_display_expr]], search_filter.operator, 1]
if search_filter.value.is_wildcard():
return [
["match", [user_display_expr, f"'(?i){value}'"]],
search_filter.operator,
1,
]
return [user_display_expr, search_filter.operator, value]
def _error_unhandled_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
# This field is the inversion of error.handled, otherwise the logic is the same.
if search_filter.value.raw_value == "":
output = 0 if search_filter.operator == "!=" else 1
return [["isHandled", []], "=", output]
if value in ("1", 1):
return [["notHandled", []], "=", 1]
if value in ("0", 0):
return [["isHandled", []], "=", 1]
raise InvalidSearchQuery(
"Invalid value for error.unhandled condition. Accepted values are 1, 0"
)
def _error_handled_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
# Treat has filter as equivalent to handled
if search_filter.value.raw_value == "":
output = 1 if search_filter.operator == "!=" else 0
return [["isHandled", []], "=", output]
# Null values and 1 are the same, and both indicate a handled error.
if value in ("1", 1):
return [["isHandled", []], "=", 1]
if value in ("0", 0):
return [["notHandled", []], "=", 1]
raise InvalidSearchQuery("Invalid value for error.handled condition. Accepted values are 1, 0")
def _team_key_transaction_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
):
value = search_filter.value.value
key_transaction_expr = FIELD_ALIASES[TEAM_KEY_TRANSACTION_ALIAS].get_field(params)
if search_filter.value.raw_value == "":
operator = "!=" if search_filter.operator == "!=" else "="
return [key_transaction_expr, operator, 0]
if value in ("1", 1):
return [key_transaction_expr, "=", 1]
if value in ("0", 0):
return [key_transaction_expr, "=", 0]
raise InvalidSearchQuery(
"Invalid value for team_key_transaction condition. Accepted values are 1, 0"
)
def _flip_field_sort(field: str):
return field[1:] if field.startswith("-") else f"-{field}"
def _release_stage_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
) -> list[Any]:
"""
Parses a release stage search and returns a snuba condition to filter to the
requested releases.
"""
# TODO: Filter by project here as well. It's done elsewhere, but could critically limit versions
# for orgs with thousands of projects, each with their own releases (potentially drowning out ones we care about)
if not params or "organization_id" not in params:
raise ValueError("organization_id is a required param")
organization_id = params["organization_id"]
project_ids = params.get("project_id")
environments = params.get("environment")
qs = (
Release.objects.filter_by_stage(
organization_id,
search_filter.operator,
search_filter.value.value,
project_ids=project_ids,
environments=environments,
)
.values_list("version", flat=True)
.order_by("date_added")[:MAX_SEARCH_RELEASES]
)
versions = list(qs)
final_operator = "IN"
if not versions:
# XXX: Just return a filter that will return no results if we have no versions
versions = [SEMVER_EMPTY_RELEASE]
return ["release", final_operator, versions]
def _semver_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
) -> list[Any]:
"""
Parses a semver query search and returns a snuba condition to filter to the
requested releases.
Since we only have semver information available in Postgres currently, we query
Postgres and return a list of versions to include/exclude. For most customers this
will work well, however some have extremely large numbers of releases, and we can't
pass them all to Snuba. To try and serve reasonable results, we:
- Attempt to query based on the initial semver query. If this returns
MAX_SEMVER_SEARCH_RELEASES results, we invert the query and see if it returns
fewer results. If so, we use a `NOT IN` snuba condition instead of an `IN`.
- Order the results such that the versions we return are semantically closest to
the passed filter. This means that when searching for `>= 1.0.0`, we'll return
version 1.0.0, 1.0.1, 1.1.0 before 9.x.x.
"""
if not params or "organization_id" not in params:
raise ValueError("organization_id is a required param")
organization_id: int = params["organization_id"]
project_ids: list[int] | None = params.get("project_id")
# We explicitly use `raw_value` here to avoid converting wildcards to shell values
version: str = search_filter.value.raw_value
operator: str = search_filter.operator
# Note that we sort this such that if we end up fetching more than
# MAX_SEMVER_SEARCH_RELEASES, we will return the releases that are closest to
# the passed filter.
order_by = Release.SEMVER_COLS
if operator.startswith("<"):
order_by = list(map(_flip_field_sort, order_by))
qs = (
Release.objects.filter_by_semver(
organization_id,
parse_semver(version, operator),
project_ids=project_ids,
)
.values_list("version", flat=True)
.order_by(*order_by)[:MAX_SEARCH_RELEASES]
)
versions = list(qs)
final_operator = "IN"
if len(versions) == MAX_SEARCH_RELEASES:
# We want to limit how many versions we pass through to Snuba. If we've hit
# the limit, make an extra query and see whether the inverse has fewer ids.
# If so, we can do a NOT IN query with these ids instead. Otherwise, we just
# do our best.
operator = OPERATOR_NEGATION_MAP[operator]
# Note that the `order_by` here is important for index usage. Postgres seems
# to seq scan with this query if the `order_by` isn't included, so we
# include it even though we don't really care about order for this query
qs_flipped = (
Release.objects.filter_by_semver(organization_id, parse_semver(version, operator))
.order_by(*map(_flip_field_sort, order_by))
.values_list("version", flat=True)[:MAX_SEARCH_RELEASES]
)
exclude_versions = list(qs_flipped)
if exclude_versions and len(exclude_versions) < len(versions):
# Do a negative search instead
final_operator = "NOT IN"
versions = exclude_versions
if not versions:
# XXX: Just return a filter that will return no results if we have no versions
versions = [SEMVER_EMPTY_RELEASE]
return ["release", final_operator, versions]
def _semver_package_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
) -> list[Any]:
"""
Applies a semver package filter to the search. Note that if the query returns more than
`MAX_SEARCH_RELEASES` here we arbitrarily return a subset of the releases.
"""
if not params or "organization_id" not in params:
raise ValueError("organization_id is a required param")
organization_id: int = params["organization_id"]
project_ids: list[int] | None = params.get("project_id")
package: str | list[str] = search_filter.value.raw_value
versions = list(
Release.objects.filter_by_semver(
organization_id,
SemverFilter("exact", [], package),
project_ids=project_ids,
).values_list("version", flat=True)[:MAX_SEARCH_RELEASES]
)
if not versions:
# XXX: Just return a filter that will return no results if we have no versions
versions = [SEMVER_EMPTY_RELEASE]
return ["release", "IN", versions]
def _semver_build_filter_converter(
search_filter: SearchFilter,
name: str,
params: FilterConvertParams | None,
) -> list[Any]:
"""
Applies a semver build filter to the search. Note that if the query returns more than
`MAX_SEARCH_RELEASES` here we arbitrarily return a subset of the releases.
"""
if not params or "organization_id" not in params:
raise ValueError("organization_id is a required param")
organization_id: int = params["organization_id"]
project_ids: list[int] | None = params.get("project_id")
build: str = search_filter.value.raw_value
operator, negated = handle_operator_negation(search_filter.operator)
try:
django_op = OPERATOR_TO_DJANGO[operator]
except KeyError:
raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.")
versions = list(
Release.objects.filter_by_semver_build(
organization_id,
django_op,
build,
project_ids=project_ids,
negated=negated,
).values_list("version", flat=True)[:MAX_SEARCH_RELEASES]
)
if not versions:
# XXX: Just return a filter that will return no results if we have no versions
versions = [SEMVER_EMPTY_RELEASE]
return ["release", "IN", versions]
def handle_operator_negation(operator: str) -> tuple[str, bool]:
negated = False
if operator == "!=":
negated = True
operator = "="
return operator, negated
def parse_semver(version, operator) -> SemverFilter:
"""
Attempts to parse a release version using our semver syntax. version should be in
format `<package_name>@<version>` or `<version>`, where package_name is a string and
version is a version string matching semver format (https://semver.org/). We've
slightly extended this format to allow up to 4 integers. EG
- sentry@1.2.3.4
- sentry@1.2.3.4-alpha
- 1.2.3.4
- 1.2.3.4-alpha
- 1.*
"""
(operator, negated) = handle_operator_negation(operator)
try:
operator = OPERATOR_TO_DJANGO[operator]
except KeyError:
raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.")
version = version if "@" in version else f"{SEMVER_FAKE_PACKAGE}@{version}"
parsed = parse_release_relay(version, json_loads=orjson.loads)
parsed_version = parsed.get("version_parsed")
if parsed_version:
# Convert `pre` to always be a string
prerelease = parsed_version["pre"] if parsed_version["pre"] else ""
semver_filter = SemverFilter(
operator,
[
parsed_version["major"],
parsed_version["minor"],
parsed_version["patch"],
parsed_version["revision"],
0 if prerelease else 1,
prerelease,
],
negated=negated,
)
if parsed["package"] and parsed["package"] != SEMVER_FAKE_PACKAGE:
semver_filter.package = parsed["package"]
return semver_filter
else:
# Try to parse as a wildcard match
package, version = version.split("@", 1)
version_parts = []
if version:
for part in version.split(".", 3):
if part in SEMVER_WILDCARDS:
break
try:
# We assume all ints for a wildcard match - not handling prerelease as
# part of these
version_parts.append(int(part))
except ValueError:
raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE)
package = package if package and package != SEMVER_FAKE_PACKAGE else None
return SemverFilter("exact", version_parts, package, negated)
key_conversion_map: dict[
str, Callable[[SearchFilter, str, FilterConvertParams], Sequence[Any] | None]
] = {
"environment": _environment_filter_converter,
"message": _message_filter_converter,
TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter,
"issue.id": _issue_id_filter_converter,
USER_DISPLAY_ALIAS: _user_display_filter_converter,
ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter,
"error.handled": _error_handled_filter_converter,
TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter,
RELEASE_STAGE_ALIAS: _release_stage_filter_converter,
SEMVER_ALIAS: _semver_filter_converter,
SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter,
SEMVER_BUILD_ALIAS: _semver_build_filter_converter,
}
def convert_search_filter_to_snuba_query(
search_filter: AggregateFilter | SearchFilter,
key: str | None = None,
params: FilterConvertParams | None = None,
) -> Sequence[Any] | None:
name = search_filter.key.name if key is None else key
value = search_filter.value.value
params = params or {}
# We want to use group_id elsewhere so shouldn't be removed from the dataset
# but if a user has a tag with the same name we want to make sure that works
if name in {"group_id"}:
name = f"tags[{name}]"
if name in NO_CONVERSION_FIELDS:
return None
elif name in key_conversion_map:
return key_conversion_map[name](search_filter, name, params)
elif (
name in ARRAY_FIELDS
and search_filter.value.is_wildcard()
and not search_filter.value.is_str_sequence()
):
# Escape and convert meta characters for LIKE expressions.
raw_value = search_filter.value.raw_value
assert isinstance(raw_value, str), raw_value
# TODO: There are rare cases where this chaining don't
# work. For example, a wildcard like '\**' will incorrectly
# be replaced with '\%%'.
like_value = (
# Slashes have to be double escaped so they are
# interpreted as a string literal.
raw_value.replace("\\", "\\\\")
.replace("%", "\\%")
.replace("_", "\\_")
.replace("*", "%")
)
operator = "LIKE" if search_filter.operator == "=" else "NOT LIKE"
return [name, operator, like_value]
elif name in ARRAY_FIELDS and search_filter.is_in_filter:
operator = "=" if search_filter.operator == "IN" else "!="
# XXX: This `arrayConcat` usage is unnecessary, but we need it in place to
# trick the legacy Snuba language into not treating `name` as a
# function. Once we switch over to snql it can be removed.
return [
["hasAny", [["arrayConcat", [name]], ["array", [f"'{v}'" for v in value]]]],
operator,
1,
]
elif name in ARRAY_FIELDS and search_filter.value.raw_value == "":
return [["notEmpty", [name]], "=", 1 if search_filter.operator == "!=" else 0]
else:
# timestamp{,.to_{hour,day}} need a datetime string
# last_seen needs an integer
if isinstance(value, datetime) and name not in {
"timestamp",
"timestamp.to_hour",
"timestamp.to_day",
}:
value = int(value.timestamp()) * 1000
if name in {"trace.span", "trace.parent_span"}:
if search_filter.value.is_wildcard():
raise InvalidSearchQuery(WILDCARD_NOT_ALLOWED.format(name))
if not search_filter.value.is_span_id():
raise InvalidSearchQuery(INVALID_SPAN_ID.format(name))
# Validate event ids and trace ids are uuids
if name in {"id", "trace"}:
if search_filter.value.is_wildcard():
raise InvalidSearchQuery(WILDCARD_NOT_ALLOWED.format(name))
elif not search_filter.value.is_event_id():
label = "Filter ID" if name == "id" else "Filter Trace ID"
raise InvalidSearchQuery(INVALID_ID_DETAILS.format(label))
# most field aliases are handled above but timestamp.to_{hour,day} are
# handled here
if name in FIELD_ALIASES:
name = FIELD_ALIASES[name].get_field(params)
# Tags are never null, but promoted tags are columns and so can be null.
# To handle both cases, use `ifNull` to convert to an empty string and
# compare so we need to check for empty values.
if search_filter.key.is_tag:
name = ["ifNull", [name, "''"]]
# Handle checks for existence
if search_filter.operator in ("=", "!=") and search_filter.value.value == "":
if search_filter.key.is_tag:
return [name, search_filter.operator, value]
else:
# If not a tag, we can just check that the column is null.
return [["isNull", [name]], search_filter.operator, 1]
is_null_condition = None
# TODO(wmak): Skip this for all non-nullable keys not just event.type
if (
search_filter.operator in ("!=", "NOT IN")
and not search_filter.key.is_tag
and name != "event.type"
):
# Handle null columns on inequality comparisons. Any comparison
# between a value and a null will result to null, so we need to
# explicitly check for whether the condition is null, and OR it
# together with the inequality check.
# We don't need to apply this for tags, since if they don't exist
# they'll always be an empty string.
is_null_condition = [["isNull", [name]], "=", 1]
# If we have a mixture of wildcards and non-wildcards in a [] set, we must
# group them into their own sets to apply the appropriate operators, and
# then 'OR' them together.
strs = search_filter.value.split_wildcards()
if strs is not None and len(strs[1]) > 0:
(non_wildcards, wildcards) = strs
operator = "="
if search_filter.operator == "NOT IN":
operator = "!="
condition = [
convert_search_filter_to_snuba_query(
SearchFilter(search_filter.key, operator, SearchValue(wc))
)
for wc in wildcards
]
if len(non_wildcards) > 0:
non_wcs = convert_search_filter_to_snuba_query(
SearchFilter(
search_filter.key, search_filter.operator, SearchValue(non_wildcards)
)
)
condition.append(non_wcs)
elif search_filter.value.is_wildcard():
# mypy complains if you just use the literal; int isn't an Any, somehow?
match_val: Any = 1
condition = [["match", [name, f"'(?i){value}'"]], search_filter.operator, match_val]
else:
condition = [name, search_filter.operator, value]
# We only want to return as a list if we have the check for null
# present. Returning as a list causes these conditions to be ORed
# together. Otherwise just return the raw condition, so that it can be
# used correctly in aggregates.
if is_null_condition:
return [is_null_condition, condition]
else:
return condition
def format_search_filter(term, params):
projects_to_filter = [] # Used to avoid doing multiple conditions on project ID
conditions = []
group_ids = None
name = term.key.name
value = term.value.value
if name in (PROJECT_ALIAS, PROJECT_NAME_ALIAS):
if term.operator == "=" and value == "":
raise InvalidSearchQuery("Invalid query for 'has' search: 'project' cannot be empty.")
slugs = to_list(value)
projects = {
p.slug: p.id
for p in Project.objects.filter(id__in=params.get("project_id", []), slug__in=slugs)
}
missing = [slug for slug in slugs if slug not in projects]
if missing:
if term.operator in EQUALITY_OPERATORS:
raise InvalidSearchQuery(
f"Invalid query. Project(s) {oxfordize_list(missing)} do not exist or are not actively selected."
)
project_ids = list(sorted(projects.values()))
if project_ids:
# Create a new search filter with the correct values
term = SearchFilter(
SearchKey("project_id"),
term.operator,
SearchValue(project_ids if term.is_in_filter else project_ids[0]),
)
converted_filter = convert_search_filter_to_snuba_query(term)
if converted_filter:
if term.operator in EQUALITY_OPERATORS:
projects_to_filter = project_ids
conditions.append(converted_filter)
elif name == ISSUE_ID_ALIAS and value != "":
# A blank term value means that this is a 'has' filter
if term.operator in EQUALITY_OPERATORS:
group_ids = to_list(value)
else:
converted_filter = convert_search_filter_to_snuba_query(term, params=params)
if converted_filter:
conditions.append(converted_filter)
elif name == ISSUE_ALIAS:
operator = term.operator
value = to_list(value)
# `unknown` is a special value for when there is no issue associated with the event
group_short_ids = [v for v in value if v and v != "unknown"]
filter_values = ["" for v in value if not v or v == "unknown"]
if group_short_ids and params and "organization_id" in params:
try:
groups = Group.objects.by_qualified_short_id_bulk(
params["organization_id"],
group_short_ids,
)
except Exception:
raise InvalidSearchQuery(f"Invalid value '{group_short_ids}' for 'issue:' filter")
else:
filter_values.extend(sorted(g.id for g in groups))
term = SearchFilter(
SearchKey("issue.id"),
operator,
SearchValue(filter_values if term.is_in_filter else filter_values[0]),
)
converted_filter = convert_search_filter_to_snuba_query(term)
conditions.append(converted_filter)
elif (
name == RELEASE_ALIAS
and params
and (value == "latest" or term.is_in_filter and any(v == "latest" for v in value))
):
value = [
part
for v in to_list(value)
for part in parse_release(
v,
params["project_id"],
params.get("environment_objects"),
params.get("organization_id"),
)
]
operator_conversions = {"=": "IN", "!=": "NOT IN"}
operator = operator_conversions.get(term.operator, term.operator)
converted_filter = convert_search_filter_to_snuba_query(
SearchFilter(
term.key,
operator,
SearchValue(value),
)
)
if converted_filter:
conditions.append(converted_filter)
else:
converted_filter = convert_search_filter_to_snuba_query(term, params=params)
if converted_filter:
conditions.append(converted_filter)
return conditions, projects_to_filter, group_ids
# Not a part of search.events.types to avoid a circular loop
ParsedTerm = Union[
SearchFilter,
AggregateFilter,
ParenExpression,
str,
]
ParsedTerms = Sequence[ParsedTerm]
| FilterConvertParams |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 343,
"end": 421
} | class ____:
def test(self, arg=None, barg=None):
pass
| ParentDefaults |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 29341,
"end": 37213
} | class ____(DagsterType):
def __init__(self, python_type: t.Type):
qualified_name = f"{python_type.__module__}.{python_type.__name__}"
self.python_type = python_type
super(TypeHintInferredDagsterType, self).__init__(
key=f"_TypeHintInferred[{qualified_name}]",
description=(
f"DagsterType created from a type hint for the Python type {qualified_name}"
),
type_check_fn=isinstance_type_check_fn(
python_type, python_type.__name__, qualified_name
),
typing_type=python_type,
)
@property
def display_name(self) -> str:
return self.python_type.__name__
def resolve_dagster_type(dagster_type: object) -> DagsterType:
# circular dep
from dagster._core.definitions.result import MaterializeResult, ObserveResult
from dagster._core.types.primitive_mapping import (
is_supported_runtime_python_builtin,
remap_python_builtin_for_runtime,
)
from dagster._core.types.python_dict import (
Dict as DDict,
)
from dagster._core.types.python_dict import (
PythonDict,
)
from dagster._core.types.python_set import DagsterSetApi, PythonSet
from dagster._core.types.python_tuple import DagsterTupleApi, PythonTuple
from dagster._core.types.transform_typing import transform_typing_type
from dagster._utils.typing_api import is_typing_type
check.invariant(
not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),
"Cannot resolve a config type to a runtime type",
)
check.invariant(
not (isinstance(dagster_type, type) and is_subclass(dagster_type, DagsterType)),
f"Do not pass runtime type classes. Got {dagster_type}",
)
# First, check to see if we're using Dagster's generic output type to do the type catching.
if is_generic_output_annotation(
dagster_type
) or is_generic_materialize_result_annotation(dagster_type):
type_args = get_args(dagster_type)
# If no inner type was provided, forward Any type.
dagster_type = type_args[0] if len(type_args) == 1 else Any
elif is_dynamic_output_annotation(dagster_type):
dynamic_out_annotation = get_args(dagster_type)[0]
type_args = get_args(dynamic_out_annotation)
dagster_type = type_args[0] if len(type_args) == 1 else Any
elif dagster_type == MaterializeResult:
dagster_type = Any
elif dagster_type == ObserveResult:
# ObserveResult does not include a value
dagster_type = Nothing
# Then, check to see if it is part of python's typing library
if is_typing_type(dagster_type):
dagster_type = transform_typing_type(dagster_type)
if isinstance(dagster_type, DagsterType):
return dagster_type
# Test for unhashable objects -- this is if, for instance, someone has passed us an instance of
# a dict where they meant to pass dict or Dict, etc.
try:
hash(dagster_type)
except TypeError as e:
raise DagsterInvalidDefinitionError(
DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(
additional_msg=(
", which isn't hashable. Did you pass an instance of a type instead of "
"the type?"
),
dagster_type=str(dagster_type),
)
) from e
if BuiltinEnum.contains(dagster_type):
return DagsterType.from_builtin_enum(dagster_type)
if is_supported_runtime_python_builtin(dagster_type):
return remap_python_builtin_for_runtime(dagster_type)
if dagster_type is None:
return Any
if dagster_type is DDict:
return PythonDict
if isinstance(dagster_type, DagsterTupleApi):
return PythonTuple
if isinstance(dagster_type, DagsterSetApi):
return PythonSet
if isinstance(dagster_type, DagsterListApi):
return List(Any)
if isinstance(dagster_type, type):
return resolve_python_type_to_dagster_type(dagster_type)
raise DagsterInvalidDefinitionError(
DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(
dagster_type=str(dagster_type), additional_msg="."
)
)
def is_dynamic_output_annotation(dagster_type: object) -> bool:
check.invariant(
not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),
"Cannot resolve a config type to a runtime type",
)
check.invariant(
not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),
f"Do not pass runtime type classes. Got {dagster_type}",
)
if dagster_type == DynamicOutput or get_origin(dagster_type) == DynamicOutput:
raise DagsterInvariantViolationError(
"Op annotated with return type DynamicOutput. DynamicOutputs can only be returned in"
" the context of a List. If only one output is needed, use the Output API."
)
if get_origin(dagster_type) == list and len(get_args(dagster_type)) == 1: # noqa: E721
list_inner_type = get_args(dagster_type)[0]
return (
list_inner_type == DynamicOutput
or get_origin(list_inner_type) == DynamicOutput
)
return False
def is_generic_output_annotation(dagster_type: object) -> bool:
return dagster_type == Output or get_origin(dagster_type) == Output
def is_generic_materialize_result_annotation(dagster_type: object) -> bool:
return (
dagster_type == MaterializeResult
or get_origin(dagster_type) == MaterializeResult
)
def resolve_python_type_to_dagster_type(python_type: t.Type) -> DagsterType:
"""Resolves a Python type to a Dagster type."""
check.inst_param(python_type, "python_type", type)
if python_type in _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY:
return _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type]
else:
dagster_type = TypeHintInferredDagsterType(python_type)
_PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type] = dagster_type
return dagster_type
ALL_RUNTIME_BUILTINS = list(_RUNTIME_MAP.values())
def construct_dagster_type_dictionary(
node_defs: Sequence["NodeDefinition"],
) -> Mapping[str, DagsterType]:
from dagster._core.definitions.graph_definition import GraphDefinition
type_dict_by_name = {t.unique_name: t for t in ALL_RUNTIME_BUILTINS}
type_dict_by_key = {t.key: t for t in ALL_RUNTIME_BUILTINS}
def process_node_def(node_def: "NodeDefinition"):
input_output_types = list(node_def.all_input_output_types())
for dagster_type in input_output_types:
# We don't do uniqueness check on key because with classes
# like Array, Noneable, etc, those are ephemeral objects
# and it is perfectly fine to have many of them.
type_dict_by_key[dagster_type.key] = dagster_type
if not dagster_type.has_unique_name:
continue
if dagster_type.unique_name not in type_dict_by_name:
type_dict_by_name[dagster_type.unique_name] = dagster_type
continue
if type_dict_by_name[dagster_type.unique_name] is not dagster_type:
raise DagsterInvalidDefinitionError(
(
f'You have created two dagster types with the same name "{dagster_type.display_name}". '
"Dagster types have must have unique names."
)
)
if isinstance(node_def, GraphDefinition):
for child_node_def in node_def.node_defs:
process_node_def(child_node_def)
for node_def in node_defs:
process_node_def(node_def)
return type_dict_by_key
| TypeHintInferredDagsterType |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/acme.py | {
"start": 1662,
"end": 2142
} | class ____(CloudEnvironment):
"""ACME environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
ansible_vars = dict(
acme_host=self._get_cloud_config('acme_host'),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| ACMEEnvironment |
python | kamyu104__LeetCode-Solutions | Python/count-substrings-without-repeating-character.py | {
"start": 563,
"end": 1027
} | class ____(object):
def numberOfSpecialSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
result = left = 0
lookup = [False]*26
for right in xrange(len(s)):
while lookup[ord(s[right])-ord('a')]:
lookup[ord(s[left])-ord('a')] = False
left += 1
lookup[ord(s[right])-ord('a')] = True
result += (right-left+1)
return result
| Solution2 |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_composer.py | {
"start": 14454,
"end": 15757
} | class ____:
@mock.patch(COMPOSER_STRING.format("CloudComposerHook"))
def test_execute(self, mock_hook) -> None:
op = CloudComposerTriggerDAGRunOperator(
task_id=TASK_ID,
project_id=TEST_GCP_PROJECT,
region=TEST_GCP_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_dag_id=TEST_COMPOSER_DAG_ID,
composer_dag_conf=TEST_COMPOSER_DAG_CONF,
gcp_conn_id=TEST_GCP_CONN_ID,
timeout=TEST_TIMEOUT,
)
op.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_environment.assert_called_once_with(
project_id=TEST_GCP_PROJECT,
region=TEST_GCP_REGION,
environment_id=TEST_ENVIRONMENT_ID,
timeout=TEST_TIMEOUT,
)
mock_hook.return_value.trigger_dag_run.assert_called_once_with(
composer_airflow_uri=mock_hook.return_value.get_environment.return_value.config.airflow_uri,
composer_dag_id=TEST_COMPOSER_DAG_ID,
composer_dag_conf=TEST_COMPOSER_DAG_CONF,
timeout=TEST_TIMEOUT,
)
| TestCloudComposerTriggerDAGRunOperator |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/exceptions.py | {
"start": 2571,
"end": 3363
} | class ____(BuildUserError):
GENERIC_WITH_PARSE_EXCEPTION = "build:user:mkdocs:yaml-parse"
INVALID_DOCS_DIR_CONFIG = "build:user:mkdocs:invalid-dir-config"
INVALID_DOCS_DIR_PATH = "build:user:mkdocs:invalid-dir-path"
INVALID_EXTRA_CONFIG = "build:user:mkdocs:invalid-extra-config"
EMPTY_CONFIG = "build:user:mkdocs:empty-config"
NOT_FOUND = "build:user:mkdocs:config-not-found"
CONFIG_NOT_DICT = "build:user:mkdocs:invalid-yaml"
SYNTAX_ERROR = "build:user:mkdocs:syntax-error"
# NOTE: there is no need to have three different error classes for this.
# We can merge all of them in one, always raise the same exception with different messages.
#
# TODO: improve messages for symlink errors with a more detailed error and include the `filepath`.
| MkDocsYAMLParseError |
python | pypa__warehouse | warehouse/mock/billing.py | {
"start": 707,
"end": 2899
} | class ____:
def __init__(self, organization, request):
billing_service = request.find_service(IBillingService, context=None)
if not request.organization_access or not isinstance(
billing_service, MockStripeBillingService
):
raise HTTPNotFound
self.organization = organization
self.request = request
@view_config(
route_name="mock.billing.checkout-session",
renderer="warehouse:templates/mock/billing/checkout-session.html",
)
def mock_checkout_session(self):
return {"organization": self.organization}
@view_config(
route_name="mock.billing.portal-session",
renderer="warehouse:templates/mock/billing/portal-session.html",
)
def mock_portal_session(self):
return {"organization": self.organization}
@view_config(route_name="mock.billing.trigger-checkout-session-completed")
def mock_trigger_checkout_session_completed(self):
mock_event = {
"type": "checkout.session.completed",
"data": {
"object": {
"id": (
"mockcs_"
+ "".join(random.choices(digits + ascii_letters, k=58))
),
"customer": (
self.organization.customer
and self.organization.customer.customer_id
),
"customer_email": (
self.organization.customer
and self.organization.customer.billing_email
),
"status": "complete",
"subscription": (
"mocksub_"
+ "".join(random.choices(digits + ascii_letters, k=24))
),
"metadata": {
"billing_service": "pypi",
"domain": "localhost",
},
},
},
}
handle_billing_webhook_event(self.request, mock_event)
return HTTPSeeOther(self.request.route_path("manage.organizations"))
| MockBillingViews |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 58289,
"end": 61270
} | class ____(TestCase):
def test_constructor(self):
for cons in (OrderedSet, frozenset):
for s in ("123", "", range(1000), ("do", 1.2), range(2000, 2200, 5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(
sorted(cons(g(s)), key=repr), sorted(g(s), key=repr)
)
self.assertRaises(TypeError, cons, X(s))
self.assertRaises(TypeError, cons, N(s))
self.assertRaises(ZeroDivisionError, cons, E(s))
def test_inline_methods(self):
s = OrderedSet("november")
for data in (
"123",
"",
range(1000),
("do", 1.2),
range(2000, 2200, 5),
"december",
):
for meth in (
s.union,
s.intersection,
s.difference,
s.symmetric_difference,
s.isdisjoint,
):
for g in (G, I, Ig, L, R):
# Only iterables supported, not sequences
if g is G:
continue
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(
sorted(actual, key=repr), sorted(expected, key=repr)
)
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in (
"123",
"",
range(1000),
("do", 1.2),
range(2000, 2200, 5),
"december",
):
for methname in (
"update",
"intersection_update",
"difference_update",
"symmetric_difference_update",
):
for g in (G, I, Ig, S, L, R):
# Only Iterables supported, not Sequence
if g is G:
continue
s = OrderedSet("january")
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(
TypeError, getattr(OrderedSet("january"), methname), X(data)
)
self.assertRaises(
TypeError, getattr(OrderedSet("january"), methname), N(data)
)
self.assertRaises(
ZeroDivisionError, getattr(OrderedSet("january"), methname), E(data)
)
| TestVariousIteratorArgs |
python | wandb__wandb | wandb/sdk/data_types/table.py | {
"start": 48860,
"end": 50064
} | class ____(_dtypes.Type):
name = "table"
legacy_names = ["wandb.Table"]
types = [Table]
def __init__(self, column_types=None):
if column_types is None:
column_types = _dtypes.UnknownType()
if isinstance(column_types, dict):
column_types = _dtypes.TypedDictType(column_types)
elif not (
isinstance(column_types, _dtypes.TypedDictType)
or isinstance(column_types, _dtypes.UnknownType)
):
raise TypeError("column_types must be a dict or TypedDictType")
self.params.update({"column_types": column_types})
def assign_type(self, wb_type=None):
if isinstance(wb_type, _TableType):
column_types = self.params["column_types"].assign_type(
wb_type.params["column_types"]
)
if not isinstance(column_types, _dtypes.InvalidType):
return _TableType(column_types)
return _dtypes.InvalidType()
@classmethod
def from_obj(cls, py_obj):
if not isinstance(py_obj, Table):
raise TypeError("py_obj must be a wandb.Table")
else:
return cls(py_obj._column_types)
| _TableType |
python | doocs__leetcode | solution/1000-1099/1059.All Paths from Source Lead to Destination/Solution.py | {
"start": 0,
"end": 571
} | class ____:
def leadsToDestination(
self, n: int, edges: List[List[int]], source: int, destination: int
) -> bool:
@cache
def dfs(i):
if i == destination:
return not g[i]
if i in vis or not g[i]:
return False
vis.add(i)
for j in g[i]:
if not dfs(j):
return False
return True
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
vis = set()
return dfs(source)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 939,
"end": 1007
} | class ____(TypedDict, extra_items=int | str):
a: int
| ParentClosed3 |
python | dask__distributed | distributed/protocol/serialize.py | {
"start": 26763,
"end": 30161
} | class ____:
def __init__(self, serializer):
self.serializer = serializer
def serialize(self, est):
header = {
"serializer": self.serializer,
"type-serialized": pickle.dumps(type(est)),
"simple": {},
"complex": {},
}
frames = []
if isinstance(est, dict):
d = est
else:
d = est.__dict__
for k, v in d.items():
if _is_msgpack_serializable(v):
header["simple"][k] = v
else:
if isinstance(v, dict):
h, f = self.serialize(v)
h = {"nested-dict": h}
else:
h, f = serialize(v, serializers=(self.serializer, "pickle"))
header["complex"][k] = {
"header": h,
"start": len(frames),
"stop": len(frames) + len(f),
}
frames += f
return header, frames
def deserialize(self, header, frames):
cls = pickle.loads(header["type-serialized"])
if issubclass(cls, dict):
dd = obj = {}
else:
obj = object.__new__(cls)
dd = obj.__dict__
dd.update(header["simple"])
for k, d in header["complex"].items():
h = d["header"]
f = frames[d["start"] : d["stop"]]
nested_dict = h.get("nested-dict")
if nested_dict:
v = self.deserialize(nested_dict, f)
else:
v = deserialize(h, f)
dd[k] = v
return obj
dask_object_with_dict_serializer = ObjectDictSerializer("dask")
dask_deserialize.register(dict)(dask_object_with_dict_serializer.deserialize)
def register_generic(
cls,
serializer_name="dask",
serialize_func=dask_serialize,
deserialize_func=dask_deserialize,
):
"""Register (de)serialize to traverse through __dict__
Normally when registering new classes for Dask's custom serialization you
need to manage headers and frames, which can be tedious. If all you want
to do is traverse through your object and apply serialize to all of your
object's attributes then this function may provide an easier path.
This registers a class for the custom Dask serialization family. It
serializes it by traversing through its __dict__ of attributes and applying
``serialize`` and ``deserialize`` recursively. It collects a set of frames
and keeps small attributes in the header. Deserialization reverses this
process.
This is a good idea if the following hold:
1. Most of the bytes of your object are composed of data types that Dask's
custom serializtion already handles well, like Numpy arrays.
2. Your object doesn't require any special constructor logic, other than
object.__new__(cls)
Examples
--------
>>> import sklearn.base
>>> from distributed.protocol import register_generic
>>> register_generic(sklearn.base.BaseEstimator)
See Also
--------
dask_serialize
dask_deserialize
"""
object_with_dict_serializer = ObjectDictSerializer(serializer_name)
serialize_func.register(cls)(object_with_dict_serializer.serialize)
deserialize_func.register(cls)(object_with_dict_serializer.deserialize)
| ObjectDictSerializer |
python | django__django | tests/generic_inline_admin/admin.py | {
"start": 443,
"end": 713
} | class ____(GenericTabularInline):
model = Media
can_delete = False
site.register(Episode, EpisodeAdmin)
site.register(Contact, inlines=[PhoneNumberInline])
site.register(Category)
site.register(EpisodePermanent, inlines=[MediaPermanentInline])
| MediaPermanentInline |
python | wandb__wandb | wandb/vendor/pygments/lexers/basic.py | {
"start": 4739,
"end": 8034
} | class ____(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
| BlitzBasicLexer |
python | Textualize__textual | docs/examples/guide/compound/byte02.py | {
"start": 279,
"end": 1287
} | class ____(Widget):
"""A Switch with a numeric label above it."""
DEFAULT_CSS = """
BitSwitch {
layout: vertical;
width: auto;
height: auto;
}
BitSwitch > Label {
text-align: center;
width: 100%;
}
"""
class BitChanged(Message):
"""Sent when the 'bit' changes."""
def __init__(self, bit: int, value: bool) -> None:
super().__init__()
self.bit = bit
self.value = value
value = reactive(0) # (1)!
def __init__(self, bit: int) -> None:
self.bit = bit
super().__init__()
def compose(self) -> ComposeResult:
yield Label(str(self.bit))
yield Switch()
def on_switch_changed(self, event: Switch.Changed) -> None: # (2)!
"""When the switch changes, notify the parent via a message."""
event.stop() # (3)!
self.value = event.value # (4)!
self.post_message(self.BitChanged(self.bit, event.value))
| BitSwitch |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 5783,
"end": 6324
} | class ____(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of :class:`http.client.IncompleteRead` to allow int value
for ``partial`` to avoid creating large objects on streamed reads.
"""
def __init__(self, partial, expected):
super(IncompleteRead, self).__init__(partial, expected)
def __repr__(self):
return "IncompleteRead(%i bytes read, %i more expected)" % (
self.partial,
self.expected,
)
| IncompleteRead |
python | optuna__optuna | optuna/storages/_callbacks.py | {
"start": 245,
"end": 4521
} | class ____:
"""Retry a failed trial up to a maximum number of times.
When a trial fails, this callback can be used with a class in :mod:`optuna.storages` to
recreate the trial in ``TrialState.WAITING`` to queue up the trial to be run again.
The failed trial can be identified by the
:func:`~optuna.storages.RetryFailedTrialCallback.retried_trial_number` function.
Even if repetitive failure occurs (a retried trial fails again),
this method returns the number of the original trial.
To get a full list including the numbers of the retried trials as well as their original trial,
call the :func:`~optuna.storages.RetryFailedTrialCallback.retry_history` function.
This callback is helpful in environments where trials may fail due to external conditions,
such as being preempted by other processes.
Usage:
.. testcode::
import optuna
from optuna.storages import RetryFailedTrialCallback
storage = optuna.storages.RDBStorage(
url="sqlite:///:memory:",
heartbeat_interval=60,
grace_period=120,
failed_trial_callback=RetryFailedTrialCallback(max_retry=3),
)
study = optuna.create_study(
storage=storage,
)
.. seealso::
See :class:`~optuna.storages.RDBStorage`.
Args:
max_retry:
The max number of times a trial can be retried. Must be set to :obj:`None` or an
integer. If set to the default value of :obj:`None` will retry indefinitely.
If set to an integer, will only retry that many times.
inherit_intermediate_values:
Option to inherit `trial.intermediate_values` reported by
:func:`optuna.trial.Trial.report` from the failed trial. Default is :obj:`False`.
"""
def __init__(
self, max_retry: int | None = None, inherit_intermediate_values: bool = False
) -> None:
self._max_retry = max_retry
self._inherit_intermediate_values = inherit_intermediate_values
def __call__(self, study: "optuna.study.Study", trial: FrozenTrial) -> None:
system_attrs: dict[str, Any] = {
"failed_trial": trial.number,
"retry_history": [],
**trial.system_attrs,
}
system_attrs["retry_history"].append(trial.number)
if self._max_retry is not None:
if self._max_retry < len(system_attrs["retry_history"]):
return
study.add_trial(
optuna.create_trial(
state=optuna.trial.TrialState.WAITING,
params=trial.params,
distributions=trial.distributions,
user_attrs=trial.user_attrs,
system_attrs=system_attrs,
intermediate_values=(
trial.intermediate_values if self._inherit_intermediate_values else None
),
)
)
@staticmethod
@experimental_func("2.8.0")
def retried_trial_number(trial: FrozenTrial) -> int | None:
"""Return the number of the original trial being retried.
Args:
trial:
The trial object.
Returns:
The number of the first failed trial. If not retry of a previous trial,
returns :obj:`None`.
"""
return trial.system_attrs.get("failed_trial", None)
@staticmethod
@experimental_func("3.0.0")
def retry_history(trial: FrozenTrial) -> list[int]:
"""Return the list of retried trial numbers with respect to the specified trial.
Args:
trial:
The trial object.
Returns:
A list of trial numbers in ascending order of the series of retried trials.
The first item of the list indicates the original trial which is identical
to the :func:`~optuna.storages.RetryFailedTrialCallback.retried_trial_number`,
and the last item is the one right before the specified trial in the retry series.
If the specified trial is not a retry of any trial, returns an empty list.
"""
return trial.system_attrs.get("retry_history", [])
| RetryFailedTrialCallback |
python | tornadoweb__tornado | tornado/queues.py | {
"start": 2328,
"end": 10859
} | class ____(Generic[_T]):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
import asyncio
from tornado.ioloop import IOLoop
from tornado.queues import Queue
q = Queue(maxsize=2)
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
await asyncio.sleep(0.01)
finally:
q.task_done()
async def producer():
for item in range(5):
await q.put(item)
print('Put %s' % item)
async def main():
# Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
await producer() # Wait for producer to put all tasks.
await q.join() # Wait for consumer to finish all tasks.
print('Done')
asyncio.run(main())
.. testoutput::
Put 0
Put 1
Doing work on 0
Put 2
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
In versions of Python without native coroutines (before 3.5),
``consumer()`` could be written as::
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
# Exact type depends on subclass. Could be another generic
# parameter and use protocols to be more precise here.
_queue = None # type: Any
def __init__(self, maxsize: int = 0) -> None:
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # type: Deque[Future[_T]]
self._putters = collections.deque([]) # type: Deque[Tuple[_T, Future[None]]]
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self) -> int:
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self) -> int:
"""Number of items in the queue."""
return len(self._queue)
def empty(self) -> bool:
return not self._queue
def full(self) -> bool:
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(
self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None
) -> "Future[None]":
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future() # type: Future[None]
try:
self.put_nowait(item)
except QueueFull:
self._putters.append((item, future))
_set_timeout(future, timeout)
else:
future.set_result(None)
return future
def put_nowait(self, item: _T) -> None:
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(getter, self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(
self, timeout: Optional[Union[float, datetime.timedelta]] = None
) -> Awaitable[_T]:
"""Remove and return an item from the queue.
Returns an awaitable which resolves once an item is available, or raises
`tornado.util.TimeoutError` after a timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
.. note::
The ``timeout`` argument of this method differs from that
of the standard library's `queue.Queue.get`. That method
interprets numeric values as relative timeouts; this one
interprets them as absolute deadlines and requires
``timedelta`` objects for relative timeouts (consistent
with other timeouts in Tornado).
"""
future = Future() # type: Future[_T]
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self) -> _T:
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(putter, None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self) -> None:
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError("task_done() called too many times")
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(
self, timeout: Optional[Union[float, datetime.timedelta]] = None
) -> Awaitable[None]:
"""Block until all items in the queue are processed.
Returns an awaitable, which raises `tornado.util.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
def __aiter__(self) -> _QueueIterator[_T]:
return _QueueIterator(self)
# These three are overridable in subclasses.
def _init(self) -> None:
self._queue = collections.deque()
def _get(self) -> _T:
return self._queue.popleft()
def _put(self, item: _T) -> None:
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item: _T) -> None:
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self) -> None:
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self) -> str:
return f"<{type(self).__name__} at {hex(id(self))} {self._format()}>"
def __str__(self) -> str:
return f"<{type(self).__name__} {self._format()}>"
def _format(self) -> str:
result = f"maxsize={self.maxsize!r}"
if getattr(self, "_queue", None):
result += " queue=%r" % self._queue
if self._getters:
result += " getters[%s]" % len(self._getters)
if self._putters:
result += " putters[%s]" % len(self._putters)
if self._unfinished_tasks:
result += " tasks=%s" % self._unfinished_tasks
return result
| Queue |
python | doocs__leetcode | solution/3300-3399/3396.Minimum Number of Operations to Make Elements in Array Distinct/Solution.py | {
"start": 0,
"end": 245
} | class ____:
def minimumOperations(self, nums: List[int]) -> int:
s = set()
for i in range(len(nums) - 1, -1, -1):
if nums[i] in s:
return i // 3 + 1
s.add(nums[i])
return 0
| Solution |
python | openai__gym | gym/wrappers/human_rendering.py | {
"start": 152,
"end": 5051
} | class ____(gym.Wrapper):
"""Performs human rendering for an environment that only supports "rgb_array"rendering.
This wrapper is particularly useful when you have implemented an environment that can produce
RGB images but haven't implemented any code to render the images to the screen.
If you want to use this wrapper with your environments, remember to specify ``"render_fps"``
in the metadata of your environment.
The ``render_mode`` of the wrapped environment must be either ``'rgb_array'`` or ``'rgb_array_list'``.
Example:
>>> env = gym.make("LunarLander-v2", render_mode="rgb_array")
>>> wrapped = HumanRendering(env)
>>> wrapped.reset() # This will start rendering to the screen
The wrapper can also be applied directly when the environment is instantiated, simply by passing
``render_mode="human"`` to ``make``. The wrapper will only be applied if the environment does not
implement human-rendering natively (i.e. ``render_mode`` does not contain ``"human"``).
Example:
>>> env = gym.make("NoNativeRendering-v2", render_mode="human") # NoNativeRendering-v0 doesn't implement human-rendering natively
>>> env.reset() # This will start rendering to the screen
Warning: If the base environment uses ``render_mode="rgb_array_list"``, its (i.e. the *base environment's*) render method
will always return an empty list:
>>> env = gym.make("LunarLander-v2", render_mode="rgb_array_list")
>>> wrapped = HumanRendering(env)
>>> wrapped.reset()
>>> env.render()
[] # env.render() will always return an empty list!
"""
def __init__(self, env):
"""Initialize a :class:`HumanRendering` instance.
Args:
env: The environment that is being wrapped
"""
super().__init__(env)
assert env.render_mode in [
"rgb_array",
"rgb_array_list",
], f"Expected env.render_mode to be one of 'rgb_array' or 'rgb_array_list' but got '{env.render_mode}'"
assert (
"render_fps" in env.metadata
), "The base environment must specify 'render_fps' to be used with the HumanRendering wrapper"
self.screen_size = None
self.window = None
self.clock = None
@property
def render_mode(self):
"""Always returns ``'human'``."""
return "human"
def step(self, *args, **kwargs):
"""Perform a step in the base environment and render a frame to the screen."""
result = self.env.step(*args, **kwargs)
self._render_frame()
return result
def reset(self, *args, **kwargs):
"""Reset the base environment and render a frame to the screen."""
result = self.env.reset(*args, **kwargs)
self._render_frame()
return result
def render(self):
"""This method doesn't do much, actual rendering is performed in :meth:`step` and :meth:`reset`."""
return None
def _render_frame(self):
"""Fetch the last frame from the base environment and render it to the screen."""
try:
import pygame
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[box2d]`"
)
if self.env.render_mode == "rgb_array_list":
last_rgb_array = self.env.render()
assert isinstance(last_rgb_array, list)
last_rgb_array = last_rgb_array[-1]
elif self.env.render_mode == "rgb_array":
last_rgb_array = self.env.render()
else:
raise Exception(
f"Wrapped environment must have mode 'rgb_array' or 'rgb_array_list', actual render mode: {self.env.render_mode}"
)
assert isinstance(last_rgb_array, np.ndarray)
rgb_array = np.transpose(last_rgb_array, axes=(1, 0, 2))
if self.screen_size is None:
self.screen_size = rgb_array.shape[:2]
assert (
self.screen_size == rgb_array.shape[:2]
), f"The shape of the rgb array has changed from {self.screen_size} to {rgb_array.shape[:2]}"
if self.window is None:
pygame.init()
pygame.display.init()
self.window = pygame.display.set_mode(self.screen_size)
if self.clock is None:
self.clock = pygame.time.Clock()
surf = pygame.surfarray.make_surface(rgb_array)
self.window.blit(surf, (0, 0))
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
def close(self):
"""Close the rendering window."""
super().close()
if self.window is not None:
import pygame
pygame.display.quit()
pygame.quit()
| HumanRendering |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 68184,
"end": 72912
} | class ____(TestCase):
def test_draft_3(self):
schema = {"$schema": "http://json-schema.org/draft-03/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft3Validator,
)
schema = {"$schema": "http://json-schema.org/draft-03/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft3Validator,
)
def test_draft_4(self):
schema = {"$schema": "http://json-schema.org/draft-04/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft4Validator,
)
schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft4Validator,
)
def test_draft_6(self):
schema = {"$schema": "http://json-schema.org/draft-06/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft6Validator,
)
schema = {"$schema": "http://json-schema.org/draft-06/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft6Validator,
)
def test_draft_7(self):
schema = {"$schema": "http://json-schema.org/draft-07/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft7Validator,
)
schema = {"$schema": "http://json-schema.org/draft-07/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft7Validator,
)
def test_draft_201909(self):
schema = {"$schema": "https://json-schema.org/draft/2019-09/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft201909Validator,
)
schema = {"$schema": "https://json-schema.org/draft/2019-09/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft201909Validator,
)
def test_draft_202012(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft202012Validator,
)
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft202012Validator,
)
def test_True(self):
self.assertIs(
validators.validator_for(True),
validators._LATEST_VERSION,
)
def test_False(self):
self.assertIs(
validators.validator_for(False),
validators._LATEST_VERSION,
)
def test_custom_validator(self):
Validator = validators.create(
meta_schema={"id": "meta schema id"},
version="12",
id_of=lambda s: s.get("id", ""),
)
schema = {"$schema": "meta schema id"}
self.assertIs(
validators.validator_for(schema),
Validator,
)
def test_custom_validator_draft6(self):
Validator = validators.create(
meta_schema={"$id": "meta schema $id"},
version="13",
)
schema = {"$schema": "meta schema $id"}
self.assertIs(
validators.validator_for(schema),
Validator,
)
def test_validator_for_jsonschema_default(self):
self.assertIs(validators.validator_for({}), validators._LATEST_VERSION)
def test_validator_for_custom_default(self):
self.assertIs(validators.validator_for({}, default=None), None)
def test_warns_if_meta_schema_specified_was_not_found(self):
with self.assertWarns(DeprecationWarning) as cm:
validators.validator_for(schema={"$schema": "unknownSchema"})
self.assertEqual(cm.filename, __file__)
self.assertEqual(
str(cm.warning),
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future.",
)
def test_does_not_warn_if_meta_schema_is_unspecified(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
validators.validator_for(schema={}, default={})
self.assertFalse(w)
def test_validator_for_custom_default_with_schema(self):
schema, default = {"$schema": "mailto:foo@example.com"}, object()
self.assertIs(validators.validator_for(schema, default), default)
| TestValidatorFor |
python | pandas-dev__pandas | asv_bench/benchmarks/join_merge.py | {
"start": 1938,
"end": 3359
} | class ____:
params = (
[
"datetime64[ns]",
"int64",
"Int64",
"int64[pyarrow]",
"string[python]",
"string[pyarrow]",
],
["monotonic", "non_monotonic", "has_na"],
[0, 1],
[True, False],
)
param_names = ["dtype", "structure", "axis", "sort"]
def setup(self, dtype, structure, axis, sort):
N = 10_000
if dtype == "datetime64[ns]":
vals = date_range("1970-01-01", periods=N)
elif dtype in ("int64", "Int64", "int64[pyarrow]"):
vals = np.arange(N, dtype=np.int64)
elif dtype in ("string[python]", "string[pyarrow]"):
vals = Index([f"i-{i}" for i in range(N)], dtype=object)
else:
raise NotImplementedError
idx = Index(vals, dtype=dtype)
if structure == "monotonic":
idx = idx.sort_values()
elif structure == "non_monotonic":
idx = idx[::-1]
elif structure == "has_na":
if not idx._can_hold_na:
raise NotImplementedError
idx = Index([None], dtype=dtype).append(idx)
else:
raise NotImplementedError
self.series = [Series(i, idx[:-i]) for i in range(1, 6)]
def time_concat_series(self, dtype, structure, axis, sort):
concat(self.series, axis=axis, sort=sort)
| ConcatIndexDtype |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/sigma/customize-sigma-asset-defs.py | {
"start": 466,
"end": 1102
} | class ____(DagsterSigmaTranslator):
def get_asset_spec(self, data: SigmaWorkbookTranslatorData) -> dg.AssetSpec: # pyright: ignore[reportIncompatibleMethodOverride]
# We create the default asset spec using super()
default_spec = super().get_asset_spec(data)
# we customize the team owner tag for all Sigma assets
return default_spec.replace_attributes(owners=["team:my_team"])
sigma_specs = load_sigma_asset_specs(
sigma_organization, dagster_sigma_translator=MyCustomSigmaTranslator()
)
defs = dg.Definitions(assets=[*sigma_specs], resources={"sigma": sigma_organization})
| MyCustomSigmaTranslator |
python | marshmallow-code__marshmallow | examples/package_json_example.py | {
"start": 670,
"end": 1598
} | class ____(Schema):
name = fields.Str(required=True)
version = Version(required=True)
description = fields.Str(required=True)
main = fields.Str(required=False)
homepage = fields.URL(required=False)
scripts = fields.Dict(keys=fields.Str(), values=fields.Str())
license = fields.Str(required=True)
dependencies = fields.Dict(keys=fields.Str(), values=fields.Str(), required=False)
dev_dependencies = fields.Dict(
keys=fields.Str(),
values=fields.Str(),
required=False,
data_key="devDependencies",
)
class Meta:
# Include unknown fields in the deserialized output
unknown = INCLUDE
if __name__ == "__main__":
pkg = json.load(sys.stdin)
try:
pprint(PackageSchema().load(pkg))
except ValidationError as error:
print("ERROR: package.json is invalid")
pprint(error.messages)
sys.exit(1)
| PackageSchema |
python | doocs__leetcode | solution/0200-0299/0205.Isomorphic Strings/Solution2.py | {
"start": 0,
"end": 300
} | class ____:
def isIsomorphic(self, s: str, t: str) -> bool:
d1, d2 = [0] * 256, [0] * 256
for i, (a, b) in enumerate(zip(s, t), 1):
a, b = ord(a), ord(b)
if d1[a] != d2[b]:
return False
d1[a] = d2[b] = i
return True
| Solution |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 836,
"end": 950
} | class ____:
a = attr.ib(eq=str.lower, order=False)
b = attr.ib(eq=True)
@attr.s(order=True)
| EqCallableCSlots |
python | django__django | django/contrib/postgres/fields/jsonb.py | {
"start": 86,
"end": 406
} | class ____(BuiltinJSONField):
system_check_removed_details = {
"msg": (
"django.contrib.postgres.fields.JSONField is removed except for "
"support in historical migrations."
),
"hint": "Use django.db.models.JSONField instead.",
"id": "fields.E904",
}
| JSONField |
python | pennersr__django-allauth | allauth/socialaccount/providers/nextcloud/views.py | {
"start": 314,
"end": 1899
} | class ____(OAuth2Adapter):
provider_id = "nextcloud"
def _build_server_url(self, path):
settings = app_settings.PROVIDERS.get(self.provider_id, {})
server = settings.get("SERVER", "https://nextcloud.example.org")
# Prefer app based setting.
app = get_adapter().get_app(context.request, provider=self.provider_id)
server = app.settings.get("server", server)
ret = f"{server}{path}"
return ret
@property
def access_token_url(self):
return self._build_server_url("/apps/oauth2/api/v1/token")
@property
def authorize_url(self):
return self._build_server_url("/apps/oauth2/authorize")
@property
def profile_url(self):
return self._build_server_url("/ocs/v1.php/cloud/users/")
def complete_login(self, request, app, token: SocialToken, **kwargs):
extra_data = self.get_user_info(token, kwargs["response"]["user_id"])
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_user_info(self, token: SocialToken, user_id):
headers = {"Authorization": "Bearer {0}".format(token.token)}
resp = (
get_adapter()
.get_requests_session()
.get(self.profile_url + user_id, params={"format": "json"}, headers=headers)
)
resp.raise_for_status()
data = resp.json()["ocs"]["data"]
return data
oauth2_login = OAuth2LoginView.adapter_view(NextCloudOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(NextCloudOAuth2Adapter)
| NextCloudOAuth2Adapter |
python | rushter__MLAlgorithms | mla/svm/kernerls.py | {
"start": 214,
"end": 429
} | class ____(object):
def __init__(self, degree=2):
self.degree = degree
def __call__(self, x, y):
return np.dot(x, y.T) ** self.degree
def __repr__(self):
return "Poly kernel"
| Poly |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_indexing.py | {
"start": 18868,
"end": 21683
} | class ____(TestCase):
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
try:
assign(a, s_[...], np.ones((2, 1)))
except Exception as e:
self.assertTrue(isinstance(e, (ValueError, RuntimeError)))
assert_raises(
(ValueError, RuntimeError), assign, a, s_[[1, 2, 3],], np.ones((2, 1))
)
assert_raises(
(ValueError, RuntimeError), assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))
)
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
try:
assign(a, s_[...], np.zeros((5, 2)))
except Exception as e:
self.assertTrue(isinstance(e, (ValueError, RuntimeError)))
try:
assign(a, s_[...], np.zeros((5, 0)))
except Exception as e:
self.assertTrue(isinstance(e, (ValueError, RuntimeError)))
assert_raises(
(ValueError, RuntimeError), assign, a, s_[:, [0]], np.zeros((5, 2))
)
assert_raises(
(ValueError, RuntimeError), assign, a, s_[:, [0]], np.zeros((5, 0))
)
assert_raises(
(ValueError, RuntimeError), assign, a, s_[[0], :], np.zeros((2, 1))
)
@parametrize(
"index", [(..., [1, 2], slice(None)), ([0, 1], ..., 0), (..., [1, 2], [1, 2])]
)
def test_broadcast_error_reports_correct_shape(self, index):
values = np.zeros((100, 100)) # will never broadcast below
arr = np.zeros((3, 4, 5, 6, 7))
with pytest.raises((ValueError, RuntimeError)) as e:
arr[index] = values
shape = arr[index].shape
r_inner_shape = "".join(f"{side}, ?" for side in shape[:-1]) + str(shape[-1])
assert re.search(rf"[\(\[]{r_inner_shape}[\]\)]$", str(e.value))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:, None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
| TestBroadcastedAssignments |
python | pypa__twine | twine/exceptions.py | {
"start": 5298,
"end": 5511
} | class ____(TwineException):
"""Repository configuration tries to use PyPI with an incorrect URL.
For example, https://pypi.org instead of https://upload.pypi.org/legacy.
"""
pass
| InvalidPyPIUploadURL |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py | {
"start": 239,
"end": 2130
} | class ____(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: Tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
| ParserSyntaxError |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/chat_models/test_cache.py | {
"start": 663,
"end": 10541
} | class ____(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`."""
self._cache[prompt, llm_string] = return_val
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
def test_local_cache_sync() -> None:
"""Test that the local cache is being populated but not the global one."""
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=local_cache, responses=["hello", "goodbye"]
)
assert chat_model.invoke("How are you?").content == "hello"
# If the cache works we should get the same response since
# the prompt is the same
assert chat_model.invoke("How are you?").content == "hello"
# The global cache should be empty
assert global_cache._cache == {}
# The local cache should be populated
assert len(local_cache._cache) == 1
llm_result = list(local_cache._cache.values())
chat_generation = llm_result[0][0]
assert isinstance(chat_generation, ChatGeneration)
assert chat_generation.message.content == "hello"
# Verify that another prompt will trigger the call to the model
assert chat_model.invoke("meow?").content == "goodbye"
# The global cache should be empty
assert global_cache._cache == {}
# The local cache should be populated
assert len(local_cache._cache) == 2
finally:
set_llm_cache(None)
async def test_local_cache_async() -> None:
# Use MockCache as the cache
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=local_cache, responses=["hello", "goodbye"]
)
assert (await chat_model.ainvoke("How are you?")).content == "hello"
# If the cache works we should get the same response since
# the prompt is the same
assert (await chat_model.ainvoke("How are you?")).content == "hello"
# The global cache should be empty
assert global_cache._cache == {}
# The local cache should be populated
assert len(local_cache._cache) == 1
llm_result = list(local_cache._cache.values())
chat_generation = llm_result[0][0]
assert isinstance(chat_generation, ChatGeneration)
assert chat_generation.message.content == "hello"
# Verify that another prompt will trigger the call to the model
assert chat_model.invoke("meow?").content == "goodbye"
# The global cache should be empty
assert global_cache._cache == {}
# The local cache should be populated
assert len(local_cache._cache) == 2
finally:
set_llm_cache(None)
def test_global_cache_sync() -> None:
"""Test that the global cache gets populated when cache = True."""
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=True, responses=["hello", "goodbye", "meow", "woof"]
)
assert (chat_model.invoke("How are you?")).content == "hello"
# If the cache works we should get the same response since
# the prompt is the same
assert (chat_model.invoke("How are you?")).content == "hello"
# The global cache should be populated
assert len(global_cache._cache) == 1
llm_result = list(global_cache._cache.values())
chat_generation = llm_result[0][0]
assert isinstance(chat_generation, ChatGeneration)
assert chat_generation.message.content == "hello"
# Verify that another prompt will trigger the call to the model
assert chat_model.invoke("nice").content == "goodbye"
# The local cache should be populated
assert len(global_cache._cache) == 2
finally:
set_llm_cache(None)
async def test_global_cache_async() -> None:
"""Test that the global cache gets populated when cache = True."""
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=True, responses=["hello", "goodbye", "meow", "woof"]
)
assert (await chat_model.ainvoke("How are you?")).content == "hello"
# If the cache works we should get the same response since
# the prompt is the same
assert (await chat_model.ainvoke("How are you?")).content == "hello"
# The global cache should be populated
assert len(global_cache._cache) == 1
llm_result = list(global_cache._cache.values())
chat_generation = llm_result[0][0]
assert isinstance(chat_generation, ChatGeneration)
assert chat_generation.message.content == "hello"
# Verify that another prompt will trigger the call to the model
assert chat_model.invoke("nice").content == "goodbye"
# The local cache should be populated
assert len(global_cache._cache) == 2
finally:
set_llm_cache(None)
def test_no_cache_sync() -> None:
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=False, responses=["hello", "goodbye"]
) # Set cache=False
assert (chat_model.invoke("How are you?")).content == "hello"
# The global cache should not be populated since cache=False
# so we should get the second response
assert (chat_model.invoke("How are you?")).content == "goodbye"
# The global cache should not be populated since cache=False
assert len(global_cache._cache) == 0
finally:
set_llm_cache(None)
async def test_no_cache_async() -> None:
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=False, responses=["hello", "goodbye"]
) # Set cache=False
assert (await chat_model.ainvoke("How are you?")).content == "hello"
# The global cache should not be populated since cache=False
# so we should get the second response
assert (await chat_model.ainvoke("How are you?")).content == "goodbye"
# The global cache should not be populated since cache=False
assert len(global_cache._cache) == 0
finally:
set_llm_cache(None)
async def test_global_cache_abatch() -> None:
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=True, responses=["hello", "goodbye", "meow", "woof"]
)
results = await chat_model.abatch(["first prompt", "second prompt"])
assert results[0].content == "hello"
assert results[1].content == "goodbye"
# Now try with the same prompt
results = await chat_model.abatch(["first prompt", "first prompt"])
assert results[0].content == "hello"
assert results[1].content == "hello"
global_cache = InMemoryCache()
set_llm_cache(global_cache)
assert global_cache._cache == {}
results = await chat_model.abatch(["prompt", "prompt"])
assert results[0].content == "meow"
assert results[1].content == "meow"
finally:
set_llm_cache(None)
def test_global_cache_batch() -> None:
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
chat_model = FakeListChatModel(
cache=True, responses=["hello", "goodbye", "meow", "woof"]
)
results = chat_model.batch(["first prompt", "second prompt"])
# These may be in any order
assert {results[0].content, results[1].content} == {"hello", "goodbye"}
# Now try with the same prompt
results = chat_model.batch(["first prompt", "first prompt"])
# These could be either "hello" or "goodbye" and should be identical
assert results[0].content == results[1].content
assert {results[0].content, results[1].content}.issubset({"hello", "goodbye"})
# RACE CONDITION -- note behavior is different from async
# Now, reset cache and test the race condition
# For now we just hard-code the result, if this changes
# we can investigate further
global_cache = InMemoryCache()
set_llm_cache(global_cache)
assert global_cache._cache == {}
results = chat_model.batch(
[
"prompt",
"prompt",
]
)
assert {results[0].content, results[1].content} == {"meow"}
finally:
set_llm_cache(None)
@pytest.mark.xfail(reason="Abstraction does not support caching for streaming yet.")
def test_global_cache_stream() -> None:
"""Test streaming."""
global_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
messages = [
AIMessage(content="hello world"),
AIMessage(content="goodbye world"),
]
model = GenericFakeChatModel(messages=iter(messages), cache=True)
chunks = list(model.stream("some input"))
assert len(chunks) == 3
# Assert that streaming information gets cached
assert global_cache._cache != {}
finally:
set_llm_cache(None)
| InMemoryCache |
python | astropy__astropy | astropy/visualization/wcsaxes/transforms.py | {
"start": 645,
"end": 1415
} | class ____(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms.
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path.
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
| CurvedTransform |
python | coleifer__peewee | peewee.py | {
"start": 180014,
"end": 180260
} | class ____(Field):
def __init__(self, adapt=None, *args, **kwargs):
super(BareField, self).__init__(*args, **kwargs)
if adapt is not None:
self.adapt = adapt
def ddl_datatype(self, ctx):
return
| BareField |
python | wandb__wandb | wandb/automations/_filters/operators.py | {
"start": 4818,
"end": 5001
} | class ____(BaseOp):
val: Scalar = Field(alias="$lt")
@override
def __invert__(self) -> Gte:
"""Implements `~Lt(a) -> Gte(a)`."""
return Gte(val=self.val)
| Lt |
python | django__django | tests/model_formsets/models.py | {
"start": 2913,
"end": 2994
} | class ____(Place):
serves_pizza = models.BooleanField(default=False)
| Restaurant |
python | django-guardian__django-guardian | example_project/articles/models.py | {
"start": 1445,
"end": 1822
} | class ____(GroupObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(GroupObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*GroupObjectPermissionAbstract.Meta.indexes,
models.Index(fields=["content_type", "object_pk", "group"]),
]
| BigGroupObjectPermission |
python | wandb__wandb | wandb/sdk/wandb_init.py | {
"start": 3279,
"end": 3880
} | class ____:
base_no_artifacts: dict[str, Any]
"""The run config passed to `init()` minus any artifact-valued keys."""
sweep_no_artifacts: dict[str, Any]
"""The config loaded as part of a sweep minus any artifact-valued keys."""
launch_no_artifacts: dict[str, Any]
"""The config loaded as part of Launch minus any artifact-valued keys."""
artifacts: dict[str, Any]
"""Artifact keys removed from config dictionaries.
Due to implementation details of how a Run is constructed,
artifacts must be inserted into its config after initialization.
"""
| _ConfigParts |
python | tiangolo__fastapi | docs_src/separate_openapi_schemas/tutorial001_py39.py | {
"start": 90,
"end": 483
} | class ____(BaseModel):
name: str
description: Optional[str] = None
app = FastAPI()
@app.post("/items/")
def create_item(item: Item):
return item
@app.get("/items/")
def read_items() -> list[Item]:
return [
Item(
name="Portal Gun",
description="Device to travel through the multi-rick-verse",
),
Item(name="Plumbus"),
]
| Item |
python | django__django | tests/queries/models.py | {
"start": 1932,
"end": 2199
} | class ____(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
| Author |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_pep420_nspkg.py | {
"start": 341,
"end": 2258
} | class ____ (unittest.TestCase):
def importModule(self, name):
test_dir1 = os.path.join(gSrcDir, 'path1')
test_dir2 = os.path.join(gSrcDir, 'path2')
if '.' in name:
script = textwrap.dedent("""\
import site
site.addsitedir(%r)
site.addsitedir(%r)
try:
import %s
except ImportError:
import %s
print (%s.__name__)
""") %(test_dir1, test_dir2, name, name.rsplit('.', 1)[0], name)
else:
script = textwrap.dedent("""\
import site
site.addsitedir(%r)
site.addsitedir(%r)
import %s
print (%s.__name__)
""") %(test_dir1, test_dir2, name, name)
p = subprocess.Popen([sys.executable, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-relimport'),
encoding='utf8',
)
data = p.communicate()[0]
data = data.strip()
if data.endswith(' refs]'):
data = data.rsplit('\n', 1)[0].strip()
sts = p.wait()
if sts != 0:
print (data)
self.fail("import of %r failed"%(name,))
return data
def testToplevel(self):
m = self.importModule('package.sub1')
self.assertEqual(m, 'package.sub1')
m = self.importModule('package.sub2')
self.assertEqual(m, 'package.sub2')
def testSub(self):
m = self.importModule('package.subpackage.sub')
self.assertEqual(m, 'package.subpackage.sub')
m = self.importModule('package.nspkg.mod')
self.assertEqual(m, 'package.nspkg.mod')
| TestPythonBehaviour |
python | facebookresearch__faiss | tests/test_index_composite.py | {
"start": 635,
"end": 2010
} | class ____(unittest.TestCase):
def do_test(self, ntotal, removed):
d = 20
xt, xb, _ = get_dataset_2(d, ntotal, ntotal, 0)
index = faiss.index_factory(20, 'IDMap2,PQ5x4fs')
index.train(xt)
index.add_with_ids(xb, np.arange(ntotal).astype("int64"))
before = index.reconstruct_n(0, ntotal)
index.remove_ids(np.array(removed))
for i in range(ntotal):
if i in removed:
# should throw RuntimeError as this vector should be removed
try:
after = index.reconstruct(i)
assert False
except RuntimeError:
pass
else:
after = index.reconstruct(i)
np.testing.assert_array_equal(before[i], after)
assert index.ntotal == ntotal - len(removed)
def test_remove_last_vector(self):
self.do_test(993, [992])
# test remove element from every address 0 -> 31
# [0, 32 + 1, 2 * 32 + 2, ....]
# [0, 33 , 66 , 99, 132, .....]
def test_remove_every_address(self):
removed = (33 * np.arange(32)).tolist()
self.do_test(1100, removed)
# test remove range of vectors and leave ntotal divisible by 32
def test_leave_complete_block(self):
self.do_test(1000, np.arange(8).tolist())
| TestRemoveFastScan |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial001_py310.py | {
"start": 86,
"end": 352
} | class ____(BaseModel):
host: str
save_data: bool
if_modified_since: str | None = None
traceparent: str | None = None
x_tag: list[str] = []
@app.get("/items/")
async def read_items(headers: CommonHeaders = Header()):
return headers
| CommonHeaders |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 19608,
"end": 21422
} | class ____(Group):
"""
BusyBox group manipulation class for systems that have addgroup and delgroup.
It overrides the following methods:
- group_add()
- group_del()
- group_mod()
"""
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('addgroup', True)]
if self.gid is not None:
cmd.extend(['-g', str(self.gid)])
if self.system:
cmd.append('-S')
if self.gid_min is not None:
cmd.append('-K')
cmd.append('GID_MIN=' + str(self.gid_min))
if self.gid_max is not None:
cmd.append('-K')
cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('delgroup', True), self.name]
return self.execute_command(cmd)
def group_mod(self, **kwargs):
# Since there is no groupmod command, modify /etc/group directly
info = self.group_info()
if self.gid is not None and self.gid != info[2]:
with open('/etc/group', 'rb') as f:
b_groups = f.read()
b_name = to_bytes(self.name)
b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
if b':%d:' % self.gid in b_groups:
self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
if self.module.check_mode:
return 0, '', ''
b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
with open('/etc/group', 'wb') as f:
f.write(b_new_groups)
return 0, '', ''
return None, '', ''
| BusyBoxGroup |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/splice_h/package.py | {
"start": 217,
"end": 1197
} | class ____(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/splice-h-1.0.tar.gz"
version("1.0.2")
version("1.0.1")
version("1.0.0")
variant("foo", default=False, description="nope")
variant("bar", default=False, description="nope")
variant("baz", default=False, description="nope")
variant("compat", default=True, description="nope")
depends_on("splice-z")
depends_on("splice-z+foo", when="+foo")
provides("something")
provides("somethingelse")
provides("virtual-abi")
can_splice("splice-h@1.0.0 +compat", when="@1.0.1 +compat")
can_splice("splice-h@1.0.0:1.0.1 +compat", when="@1.0.2 +compat")
def install(self, spec, prefix):
with open(prefix.join("splice-h"), "w", encoding="utf-8") as f:
f.write("splice-h: {0}".format(prefix))
f.write("splice-z: {0}".format(spec["splice-z"].prefix))
| SpliceH |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_aliases.py | {
"start": 4233,
"end": 4309
} | class ____(NamedTuple):
values: Array
counts: Array
| UniqueCountsResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1406585,
"end": 1406862
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, RepositoryAuditEntryData):
"""Audit log entry for a repo.config.disable_contributors_only event."""
__schema__ = github_schema
__field_names__ = ()
| RepoConfigDisableContributorsOnlyAuditEntry |
python | kamyu104__LeetCode-Solutions | Python/longest-palindrome-by-concatenating-two-letter-words.py | {
"start": 50,
"end": 514
} | class ____(object):
def longestPalindrome(self, words):
"""
:type words: List[str]
:rtype: int
"""
cnt = collections.Counter(words)
result = remain = 0
for x, c in cnt.iteritems():
if x == x[::-1]:
result += c//2
remain |= c%2
elif x < x[::-1] and x[::-1] in cnt:
result += min(c, cnt[x[::-1]])
return result*4+remain*2
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_in_set.py | {
"start": 531,
"end": 2891
} | class ____(ColumnPairMapMetricProvider):
condition_metric_name = "column_pair_values.in_set"
condition_value_keys = ("value_pairs_set",)
condition_domain_keys = (
"batch_id",
"table",
"column_A",
"column_B",
"ignore_row_if",
)
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_A, column_B, **kwargs):
value_pairs_set = kwargs.get("value_pairs_set")
if value_pairs_set is None:
# vacuously true
return np.ones(len(column_A), dtype=np.bool_)
temp_df = pd.DataFrame({"A": column_A, "B": column_B})
value_pairs_set = {(x, y) for x, y in value_pairs_set}
results = []
for i, t in temp_df.iterrows():
if pd.isnull(t["A"]):
a = None
else:
a = t["A"]
if pd.isnull(t["B"]):
b = None
else:
b = t["B"]
results.append((a, b) in value_pairs_set)
return pd.Series(results)
@column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_A, column_B, **kwargs):
value_pairs_set = kwargs.get("value_pairs_set")
if value_pairs_set is None:
# vacuously true
return sa.case((column_A == column_B, True), else_=True)
value_pairs_set = [(x, y) for x, y in value_pairs_set]
# or_ implementation was required due to mssql issues with in_
conditions = [sa.or_(sa.and_(column_A == x, column_B == y)) for x, y in value_pairs_set]
row_wise_cond = sa.or_(*conditions)
return row_wise_cond
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_A, column_B, **kwargs):
value_pairs_set = kwargs.get("value_pairs_set")
if value_pairs_set is None:
# vacuously true
return column_A == column_B
value_pairs_set = [(x, y) for x, y in value_pairs_set]
conditions = [
(column_A.eqNullSafe(F.lit(x)) & column_B.eqNullSafe(F.lit(y)))
for x, y in value_pairs_set
]
row_wise_cond = reduce(lambda a, b: a | b, conditions)
return row_wise_cond
| ColumnPairValuesInSet |
python | huggingface__transformers | src/transformers/models/video_llama_3/modular_video_llama_3.py | {
"start": 10420,
"end": 10470
} | class ____(SiglipMLP):
pass
| VideoLlama3VisionMLP |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/parent_foo_bar_fee/package.py | {
"start": 217,
"end": 760
} | class ____(Package):
"""This package has a variant "bar", which is True by default, and depends on another
package which has the same variant defaulting to False.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/parent-foo-bar-fee-1.0.tar.gz"
version("1.0", md5="abcdefg01234567890123abcdefghfed")
variant("foo", default=True, description="")
variant("bar", default=True, description="")
variant("fee", default=False, description="")
depends_on("dependency-foo-bar")
| ParentFooBarFee |
python | sympy__sympy | sympy/physics/vector/point.py | {
"start": 167,
"end": 20569
} | class ____:
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
Parameters
==========
name : string
The display name of the Point
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> P = Point('P')
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> O.set_vel(N, u1 * N.x + u2 * N.y + u3 * N.z)
>>> O.acc(N)
u1'*N.x + u2'*N.y + u3'*N.z
``symbols()`` can be used to create multiple Points in a single step, for
example:
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> from sympy import symbols
>>> N = ReferenceFrame('N')
>>> u1, u2 = dynamicsymbols('u1 u2')
>>> A, B = symbols('A B', cls=Point)
>>> type(A)
<class 'sympy.physics.vector.point.Point'>
>>> A.set_vel(N, u1 * N.x + u2 * N.y)
>>> B.set_vel(N, u2 * N.x + u1 * N.y)
>>> A.acc(N) - B.acc(N)
(u1' - u2')*N.x + (-u1' + u2')*N.y
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _pdict_list(self, other, num):
"""Returns a list of points that gives the shortest path with respect
to position, velocity, or acceleration from this point to the provided
point.
Parameters
==========
other : Point
A point that may be related to this point by position, velocity, or
acceleration.
num : integer
0 for searching the position tree, 1 for searching the velocity
tree, and 2 for searching the acceleration tree.
Returns
=======
list of Points
A sequence of points from self to other.
Notes
=====
It is not clear if num = 1 or num = 2 actually works because the keys
to ``_vel_dict`` and ``_acc_dict`` are :class:`ReferenceFrame` objects
which do not have the ``_pdlist`` attribute.
"""
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist.copy()
for v in outlist:
templist = v[-1]._pdlist[num].keys()
for v2 in templist:
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for v in oldlist:
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x + q2 * B.y)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega.cross(v)) + a1 +
(alpha.cross(dist)) + (omega.cross(omega.cross(dist))))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha.cross(dist)) +
(omega.cross(omega.cross(dist))))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined
in.
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._acc_dict):
if self.vel(frame) != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return Vector(0)
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, str):
raise TypeError('Must supply a valid name')
if value == 0:
value = Vector(0)
value = _check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = Vector(0)
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's acceleration is defined
value : Vector
The vector value of this point's acceleration in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
otherpoint : Point
The other point which this point's location is defined relative to
value : Vector
The vector which defines the location of this point
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's velocity is defined
value : Vector
The vector value of this point's velocity in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x + q2 * B.y)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega.cross(dist)))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega.cross(dist)))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
Velocities will be automatically calculated if possible, otherwise a
``ValueError`` will be returned. If it is possible to calculate
multiple different velocities from the relative points, the points
defined most directly relative to this point will be used. In the case
of inconsistent relative positions of points, incorrect velocities may
be returned. It is up to the user to define prior relative positions
and velocities of points in a self-consistent way.
>>> p = Point('p')
>>> q = dynamicsymbols('q')
>>> p.set_vel(N, 10 * N.x)
>>> p2 = Point('p2')
>>> p2.set_pos(p, q*N.x)
>>> p2.vel(N)
(Derivative(q(t), t) + 10)*N.x
"""
_check_frame(frame)
if not (frame in self._vel_dict):
valid_neighbor_found = False
is_cyclic = False
visited = []
queue = [self]
candidate_neighbor = []
while queue: # BFS to find nearest point
node = queue.pop(0)
if node not in visited:
visited.append(node)
for neighbor, neighbor_pos in node._pos_dict.items():
if neighbor in visited:
continue
try:
# Checks if pos vector is valid
neighbor_pos.express(frame)
except ValueError:
continue
if neighbor in queue:
is_cyclic = True
try:
# Checks if point has its vel defined in req frame
neighbor_velocity = neighbor._vel_dict[frame]
except KeyError:
queue.append(neighbor)
continue
candidate_neighbor.append(neighbor)
if not valid_neighbor_found:
self.set_vel(frame, self.pos_from(neighbor).dt(frame) + neighbor_velocity)
valid_neighbor_found = True
if is_cyclic:
warn(filldedent("""
Kinematic loops are defined among the positions of points. This
is likely not desired and may cause errors in your calculations.
"""))
if len(candidate_neighbor) > 1:
warn(filldedent(f"""
Velocity of {self.name} automatically calculated based on point
{candidate_neighbor[0].name} but it is also possible from
points(s): {str(candidate_neighbor[1:])}. Velocities from these
points are not necessarily the same. This may cause errors in
your calculations."""))
if valid_neighbor_found:
return self._vel_dict[frame]
else:
raise ValueError(filldedent(f"""
Velocity of point {self.name} has not been defined in
ReferenceFrame {frame.name}."""))
return self._vel_dict[frame]
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial velocities of the linear velocity vector of this
point in the given frame with respect to one or more provided
generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> from sympy.physics.vector import dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> p = Point('p')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> p.set_vel(N, u1 * N.x + u2 * A.y)
>>> p.partial_velocity(N, u1)
N.x
>>> p.partial_velocity(N, u1, u2)
(N.x, A.y)
"""
from sympy.physics.vector.functions import partial_velocity
vel = self.vel(frame)
partials = partial_velocity([vel], gen_speeds, frame)[0]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
| Point |
python | django-extensions__django-extensions | django_extensions/management/commands/verify_named_urls.py | {
"start": 563,
"end": 5175
} | class ____(BaseCommand):
args = ""
help = "Verify named URLs in templates"
ignores = set(
[
"*.swp",
"*~",
]
)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--ignore-app",
action="append",
dest="ignore_apps",
default=["admin"],
help="Ignore these apps",
)
parser.add_argument(
"--urlconf",
"-c",
dest="urlconf",
default="ROOT_URLCONF",
help="Set the settings URL conf variable to use",
)
def ignore_filename(self, filename):
filename = os.path.basename(filename)
for ignore_pattern in self.ignores:
if fnmatch.fnmatch(filename, ignore_pattern):
return True
return False
@signalcommand
def handle(self, *args, **options):
style = no_style() if options["no_color"] else color_style()
self.names = defaultdict(list)
self.views = {}
self.collect_templates(options)
self.collect_views(options)
for name in sorted(self.names):
n = len(self.names[name])
color = style.MODULE
try:
v = self.views[name]
print(
style.INFO(
f"Name: {name} ({n} occurrences, handled in {v[0]}, {v[1]})"
)
)
except KeyError:
print(style.URL_NAME(f"Name: {name} ({n} occurrences, UNKNOWN VIEW)"))
color = style.URL_NAME
for item in self.names[name]:
print(color(f"* {item[0]}:{item[1]}"))
def collect_templates(self, options):
template_dirs = set(get_template_setting("DIRS", []))
for app in apps.get_app_configs():
if app.name.split(".")[-1] in options["ignore_apps"]:
continue
app_template_dir = os.path.join(app.path, "templates")
if os.path.isdir(app_template_dir):
template_dirs.add(app_template_dir)
settings.TEMPLATES[0]["DIRS"] = list(template_dirs)
self.template_parse_errors = 0
self.names_re = re.compile(r"\{%\s*url\s*['\"]([\w\-]+)['\"]")
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if self.ignore_filename(filename):
continue
filepath = os.path.join(root, filename)
self.process_template(filepath)
if self.template_parse_errors > 0:
self.stdout.write(
f"{self.template_parse_errors} template parse errors found"
)
def collect_views(self, options):
urlconf = options["urlconf"]
if not hasattr(settings, urlconf):
raise CommandError(
"Settings module {} does not have the attribute {}.".format(
settings, urlconf
)
)
try:
urlconf = __import__(getattr(settings, urlconf), {}, {}, [""])
except Exception as e:
raise CommandError(
"Error occurred while trying to load %s: %s"
% (getattr(settings, urlconf), str(e))
)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for func, regex, view in view_functions:
if view is not None:
if isinstance(func, functools.partial):
func = func.func
if hasattr(func, "view_class"):
func = func.view_class
if hasattr(func, "__name__"):
func_name = func.__name__
elif hasattr(func, "__class__"):
func_name = "%s()" % func.__class__.__name__
else:
func_name = re.sub(r" at 0x[0-9a-f]+", "", repr(func))
self.views[view] = (func_name, regex)
def process_template(self, filepath):
try:
get_template(filepath)
except Exception:
self.template_parse_errors += 1
self.stdout.write(f"Error parsing template {filepath}")
with open(filepath, "r") as file:
lineno = 1
for line in file:
for match in self.names_re.findall(line):
self.names[match].append((filepath, lineno))
lineno += 1
| Command |
python | pandas-dev__pandas | asv_bench/benchmarks/rolling.py | {
"start": 2964,
"end": 4299
} | class ____:
params = (
["DataFrame", "Series"],
["int", "float"],
[("rolling", {"window": 10}), ("expanding", {})],
[np.sum, lambda x: np.sum(x) + 5],
[True, False],
[None, 100],
)
param_names = [
"constructor",
"dtype",
"window_kwargs",
"function",
"parallel",
"cols",
]
def setup(self, constructor, dtype, window_kwargs, function, parallel, cols):
N = 10**3
window, kwargs = window_kwargs
shape = (N, cols) if cols is not None and constructor != "Series" else N
arr = (100 * np.random.random(shape)).astype(dtype)
data = getattr(pd, constructor)(arr)
# Warm the cache
with warnings.catch_warnings(record=True):
# Catch parallel=True not being applicable e.g. 1D data
self.window = getattr(data, window)(**kwargs)
self.window.apply(
function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
)
def test_method(self, constructor, dtype, window_kwargs, function, parallel, cols):
with warnings.catch_warnings(record=True):
self.window.apply(
function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
)
| NumbaEngineApply |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 39409,
"end": 140792
} | class ____(OrganizationDashboardDetailsTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.create_user_member_role()
self.widget_3 = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 3",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
)
self.widget_4 = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 4",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
)
self.widget_ids = [self.widget_1.id, self.widget_2.id, self.widget_3.id, self.widget_4.id]
def get_widget_queries(self, widget):
return DashboardWidgetQuery.objects.filter(widget=widget).order_by("order")
def assert_no_changes(self):
self.assert_dashboard_and_widgets(self.widget_ids)
def assert_dashboard_and_widgets(self, widget_ids):
assert Dashboard.objects.filter(
organization=self.organization, id=self.dashboard.id
).exists()
widgets = self.get_widgets(self.dashboard)
assert len(widgets) == len(list(widget_ids))
for widget, id in zip(widgets, widget_ids):
assert widget.id == id
def test_dashboard_does_not_exist(self) -> None:
response = self.do_request("put", self.url(1234567890))
assert response.status_code == 404
assert response.data == {"detail": "The requested resource does not exist"}
def test_feature_required(self) -> None:
with self.feature({"organizations:dashboards-edit": False}):
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard Hello"}
)
assert response.status_code == 404, response.data
def test_change_dashboard_title(self) -> None:
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard Hello"}
)
assert response.status_code == 200, response.data
assert Dashboard.objects.filter(
title="Dashboard Hello", organization=self.organization, id=self.dashboard.id
).exists()
def test_rename_dashboard_title_taken(self) -> None:
Dashboard.objects.create(
title="Dashboard 2", created_by_id=self.user.id, organization=self.organization
)
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard 2"}
)
assert response.status_code == 409, response.data
assert list(response.data) == ["Dashboard with that title already exists."]
def test_allow_put_when_no_project_access(self) -> None:
# disable Open Membership
self.organization.flags.allow_joinleave = False
self.organization.save()
# assign a project to a dashboard
self.dashboard.projects.set([self.project])
# user has no access to the above project
user_no_team = self.create_user(is_superuser=False)
self.create_member(
user=user_no_team, organization=self.organization, role="member", teams=[]
)
self.login_as(user_no_team)
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard Hello"}
)
assert response.status_code == 200, response.data
def test_disallow_put_when_no_project_access_and_no_edit_perms(self) -> None:
# set dashboard edit perms to be editable only by creator
self.dashboard.permissions = DashboardPermissions.objects.create(
is_editable_by_everyone=False, dashboard=self.dashboard
)
# disable Open Membership
self.organization.flags.allow_joinleave = False
self.organization.save()
# assign a project to a dashboard
self.dashboard.projects.set([self.project])
# user has no access to the above project
user_no_team = self.create_user(is_superuser=False)
self.create_member(
user=user_no_team, organization=self.organization, role="member", teams=[]
)
self.login_as(user_no_team)
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard Hello"}
)
assert response.status_code == 403, response.data
assert response.data == {"detail": "You do not have permission to perform this action."}
def test_disallow_put_when_has_project_access_and_no_edit_perms(self) -> None:
# set dashboard edit perms to be editable only by creator
self.dashboard.permissions = DashboardPermissions.objects.create(
is_editable_by_everyone=False, dashboard=self.dashboard
)
# disable Open Membership
self.organization.flags.allow_joinleave = False
self.organization.save()
# assign a project to a dashboard
self.dashboard.projects.set([self.project])
# user has access to the above project
user = self.create_user(id=3456)
team = self.create_team(organization=self.organization)
self.create_member(user=user, organization=self.organization, teams=[team])
self.project.add_team(team)
self.login_as(user)
response = self.do_request(
"put", self.url(self.dashboard.id), data={"title": "Dashboard Hello"}
)
assert response.status_code == 403, response.data
assert response.data == {"detail": "You do not have permission to perform this action."}
def test_allow_put_as_superuser_but_no_edit_perms(self) -> None:
self.create_user(id=12333)
dashboard = Dashboard.objects.create(
id=67,
title="Dashboard With Dataset Source",
created_by_id=12333,
organization=self.organization,
)
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=dashboard)
# Create and login as superuser
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 200, response.content
assert response.data["title"] == "New Dashboard 9"
def test_add_widget(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{"id": str(self.widget_2.id)},
{"id": str(self.widget_3.id)},
{"id": str(self.widget_4.id)},
{
"title": "Errors per project",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": ["count()", "project"],
"columns": ["project"],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
"datasetSource": "user",
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 5
last = list(widgets).pop()
self.assert_serialized_widget(data["widgets"][4], last)
queries = last.dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][4]["queries"][0], queries[0])
def test_add_widget_with_field_aliases(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "Errors per project",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": [],
"aggregates": ["count()"],
"columns": ["project"],
"fieldAliases": ["Errors quantity"],
"conditions": "event.type:error",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
for expected_widget, actual_widget in zip(data["widgets"], widgets):
self.assert_serialized_widget(expected_widget, actual_widget)
queries = actual_widget.dashboardwidgetquery_set.all()
for expected_query, actual_query in zip(expected_widget["queries"], queries):
self.assert_serialized_widget_query(expected_query, actual_query)
def test_add_widget_with_selected_aggregate(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "EPM Big Number",
"displayType": "big_number",
"queries": [
{
"name": "",
"fields": ["epm()"],
"columns": [],
"aggregates": ["epm()", "count()"],
"conditions": "",
"orderby": "",
"selectedAggregate": 1,
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_add_big_number_widget_with_equation(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "EPM Big Number",
"displayType": "big_number",
"queries": [
{
"name": "",
"fields": ["equation|count()"],
"columns": [],
"aggregates": ["count()", "equation|count()*2"],
"conditions": "",
"orderby": "",
"selectedAggregate": 1,
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_add_widget_with_aggregates_and_columns(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"columns": [],
"aggregates": [],
},
{
"id": str(self.widget_2.id),
"columns": [],
"aggregates": [],
},
{
"id": str(self.widget_3.id),
"columns": [],
"aggregates": [],
},
{
"id": str(self.widget_4.id),
"columns": [],
"aggregates": [],
},
{
"title": "Errors per project",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": [],
"aggregates": ["count()"],
"columns": ["project"],
"conditions": "event.type:error",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 5
last = list(widgets).pop()
self.assert_serialized_widget(data["widgets"][4], last)
queries = last.dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][4]["queries"][0], queries[0])
def test_add_widget_missing_title(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"displayType": "line",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Title is required during creation" in response.content
def test_add_widget_with_limit(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "Custom Widget",
"displayType": "line",
"interval": "5m",
"limit": None,
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "",
}
],
},
{
"title": "Duration Distribution",
"displayType": "bar",
"interval": "5m",
"limit": 10,
"queries": [
{
"name": "",
"fields": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"columns": [],
"aggregates": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 2
self.assert_serialized_widget(data["widgets"][0], widgets[0])
self.assert_serialized_widget(data["widgets"][1], widgets[1])
def test_add_widget_with_invalid_limit_above_maximum(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "Duration Distribution",
"displayType": "bar",
"interval": "5m",
"limit": 11,
"queries": [
{
"name": "",
"fields": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"columns": [],
"aggregates": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Ensure this value is less than or equal to 10" in response.content
def test_add_widget_with_invalid_limit_below_minimum(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "Duration Distribution",
"displayType": "bar",
"interval": "5m",
"limit": 0,
"queries": [
{
"name": "",
"fields": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"columns": [],
"aggregates": [
"p50(transaction.duration)",
"p75(transaction.duration)",
"p95(transaction.duration)",
],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Ensure this value is greater than or equal to 1" in response.content
def test_add_widget_display_type(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Errors",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"displayType is required during creation" in response.content
def test_add_widget_invalid_query(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Invalid fields",
"displayType": "line",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": ["p95(transaction.duration)"],
"columns": [],
"aggregates": ["p95(transaction.duration)"],
"conditions": "foo: bar:",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid conditions" in response.content
def test_add_widget_unknown_aggregation(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Invalid fields",
"displayType": "line",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": ["wrong()"],
"columns": [],
"aggregates": ["wrong()"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid fields" in response.content
def test_add_widget_invalid_aggregate_parameter(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Invalid fields",
"displayType": "line",
"queries": [
{
"name": "Errors",
"fields": ["p95(user)"],
"columns": [],
"aggregates": ["p95(user)"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid fields" in response.content
def test_add_widget_invalid_interval(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Invalid interval",
"displayType": "line",
"interval": "1q",
"queries": [
{
"name": "Durations",
"fields": ["p95(transaction.duration)"],
"columns": [],
"aggregates": ["p95(transaction.duration)"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid interval" in response.content
def test_add_widget_e2e_test_with_translation(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "transaction widget to translate",
"displayType": "line",
"interval": "5m",
"widgetType": "transaction-like",
"limit": 3,
"queries": [
{
"fields": [
"title",
"total.count",
"count()",
"count_web_vitals(measurements.lcp,good)",
],
"columns": ["title", "total.count"],
"aggregates": ["count()", "count_web_vitals(measurements.lcp,good)"],
"conditions": "title:foo",
"orderby": "-count_web_vitals(measurements.lcp,good)",
"order": 0,
}
],
}
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert DashboardWidget.objects.filter(title="transaction widget to translate").exists()
widget = DashboardWidget.objects.filter(title="transaction widget to translate").first()
assert widget is not None
assert widget.widget_type == DashboardWidgetTypes.TRANSACTION_LIKE
assert widget.dataset_source == DatasetSourcesTypes.USER.value
assert widget.display_type == DashboardWidgetDisplayTypes.LINE_CHART
assert widget.interval == "5m"
widget_queries = DashboardWidgetQuery.objects.filter(widget=widget)
assert widget_queries.count() == 1
widget_query = widget_queries.first()
assert widget_query is not None
assert widget_query.fields == [
"title",
"total.count",
"count()",
"count_web_vitals(measurements.lcp,good)",
]
assert widget_query.aggregates == ["count()", "count_web_vitals(measurements.lcp,good)"]
assert widget_query.columns == ["title", "total.count"]
assert widget_query.conditions == "title:foo"
assert widget_query.orderby == "-count_web_vitals(measurements.lcp,good)"
translated_widget = translate_dashboard_widget(widget)
assert translated_widget.widget_type == DashboardWidgetTypes.SPANS
assert (
translated_widget.dataset_source
== DashboardWidgetDatasetSourcesTypes.SPAN_MIGRATION_VERSION_5.value
)
assert translated_widget.display_type == DashboardWidgetDisplayTypes.LINE_CHART
assert translated_widget.interval == "5m"
translated_widget_queries = DashboardWidgetQuery.objects.filter(widget=translated_widget)
assert translated_widget_queries.count() == 1
translated_widget_query = translated_widget_queries.first()
assert translated_widget_query is not None
assert translated_widget_query.fields == ["transaction", "count(span.duration)"]
assert translated_widget_query.aggregates == ["count(span.duration)"]
assert translated_widget_query.columns == ["transaction"]
assert translated_widget_query.conditions == "(transaction:foo) AND is_transaction:1"
assert translated_widget_query.orderby == ""
def test_update_widget_title(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id), "title": "New title"},
{"id": str(self.widget_2.id)},
{"id": str(self.widget_3.id)},
{"id": str(self.widget_4.id)},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200
widgets = self.get_widgets(self.dashboard.id)
self.assert_serialized_widget(data["widgets"][0], widgets[0])
def test_update_widget_add_query(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "New title",
"queries": [
{
"id": str(self.widget_1_data_1.id),
"columns": [],
"aggregates": [],
},
{
"name": "transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
},
],
"datasetSource": "user",
},
{"id": str(self.widget_2.id)},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
# two widgets should be removed
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 2
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = self.get_widget_queries(widgets[0])
assert len(queries) == 2
assert data["widgets"][0]["queries"][0]["id"] == str(queries[0].id)
self.assert_serialized_widget_query(data["widgets"][0]["queries"][1], queries[1])
def test_update_widget_remove_and_update_query(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "New title",
"queries": [
{
"id": str(self.widget_1_data_1.id),
"name": "transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
},
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
# only one widget should remain
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = self.get_widget_queries(widgets[0])
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_update_widget_reorder_queries(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "New title",
"queries": [
{
"id": str(self.widget_1_data_2.id),
"columns": [],
"aggregates": [],
},
{
"id": str(self.widget_1_data_1.id),
"columns": [],
"aggregates": [],
},
],
},
{"id": str(self.widget_2.id)},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
# two widgets should be removed
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 2
queries = self.get_widget_queries(widgets[0])
assert len(queries) == 2
assert queries[0].id == self.widget_1_data_2.id
assert queries[1].id == self.widget_1_data_1.id
def test_update_widget_use_other_query(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "New title",
"queries": [
{
"id": str(self.widget_2_data_1.id),
"columns": [],
"aggregates": [],
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert "You cannot use a query not owned by this widget" in response.data
def test_update_widget_invalid_orderby(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"queries": [
{
"fields": ["title", "count()"],
"columns": ["title"],
"aggregates": ["count()"],
"conditions": "",
"orderby": "message",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Cannot sort by a field" in response.content
def test_remove_widget_and_add_new(self) -> None:
# Remove a widget from the middle of the set and put a new widget there
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{"id": str(self.widget_2.id)},
{
"title": "Errors over time",
"displayType": "line",
"queries": [
{
"name": "Errors",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
},
{"id": str(self.widget_4.id)},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 4
# Check ordering
assert self.widget_1.id == widgets[0].id
assert self.widget_2.id == widgets[1].id
assert self.widget_4.id == widgets[2].id
# The new widget was added to the end, this is because the order is based on the id
self.assert_serialized_widget(data["widgets"][2], widgets[3])
def test_update_widget_invalid_aggregate_parameter(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "Invalid fields",
"displayType": "line",
"queries": [
{
"name": "Errors",
"fields": ["p95(user)"],
"columns": [],
"aggregates": ["p95(user)"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid fields" in response.content
def test_update_widget_invalid_fields(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"id": str(self.widget_1.id),
"title": "Invalid fields",
"displayType": "line",
"queries": [
{
"name": "Errors",
"fields": ["p95()"],
"columns": [],
"aggregates": ["p95()"],
"conditions": "foo: bar:",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid conditions" in response.content
def test_update_migrated_spans_widget_reset_changed_reason(self) -> None:
new_dashboard = Dashboard.objects.create(
title="New dashboard",
organization=self.organization,
created_by_id=self.user.id,
)
spans_widget = DashboardWidget.objects.create(
dashboard=new_dashboard,
title="Spans widget",
widget_type=DashboardWidgetTypes.SPANS,
dataset_source=DashboardWidgetDatasetSourcesTypes.SPAN_MIGRATION_VERSION_1.value,
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
changed_reason=[
{
"orderby": [
{"orderby": "total.count", "reason": "fields were dropped: total.count"}
],
"equations": [],
"columns": ["total.count"],
}
],
)
data = {
"title": "New dashboard",
"widgets": [
{
"id": str(spans_widget.id),
"title": "updated spans widget",
"widgetType": "spans",
"datasetSource": "user",
"displayType": "line",
"changedReason": spans_widget.changed_reason,
"queries": [
{
"name": "Errors",
"fields": ["count(span.duration)"],
"columns": [],
"aggregates": ["count(span.duration)"],
"conditions": "",
}
],
}
],
}
response = self.do_request("put", self.url(new_dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["widgets"][0]["changedReason"] is None
spans_widget.refresh_from_db()
assert spans_widget.changed_reason is None
assert spans_widget.dataset_source == DashboardWidgetDatasetSourcesTypes.USER.value
def test_remove_widgets(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id), "title": "New title"},
{"id": str(self.widget_2.id), "title": "Other title"},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 2
self.assert_serialized_widget(data["widgets"][0], widgets[0])
self.assert_serialized_widget(data["widgets"][1], widgets[1])
def test_reorder_widgets_has_no_effect(self) -> None:
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"widgets": [
{"id": self.widget_3.id},
{"id": self.widget_2.id},
{"id": self.widget_1.id},
{"id": self.widget_4.id},
]
},
)
assert response.status_code == 200, response.data
# Reordering has no effect since the order is based on the id
self.assert_dashboard_and_widgets(
[self.widget_1.id, self.widget_2.id, self.widget_3.id, self.widget_4.id]
)
def test_update_widget_layouts(self) -> None:
layouts = {
self.widget_1.id: {"x": 0, "y": 0, "w": 2, "h": 5, "minH": 2},
self.widget_2.id: {"x": 2, "y": 0, "w": 1, "h": 1, "minH": 2},
self.widget_3.id: {"x": 3, "y": 0, "w": 2, "h": 2, "minH": 2},
self.widget_4.id: {"x": 0, "y": 5, "w": 2, "h": 5, "minH": 2},
}
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"widgets": [
{"id": widget.id, "layout": layouts[widget.id]}
for widget in [self.widget_1, self.widget_2, self.widget_3, self.widget_4]
]
},
)
assert response.status_code == 200, response.data
widgets = response.data["widgets"]
for widget in widgets:
assert widget["layout"] == layouts[int(widget["id"])]
def test_update_layout_with_invalid_data_fails(self) -> None:
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"widgets": [
{
"id": self.widget_1.id,
"layout": {
"x": "this type is unexpected",
"y": 0,
"w": 2,
"h": 5,
"minH": 2,
},
}
]
},
)
assert response.status_code == 400, response.data
def test_update_without_specifying_layout_does_not_change_saved_layout(self) -> None:
expected_layouts = {
self.widget_1.id: {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
self.widget_2.id: {"x": 1, "y": 0, "w": 1, "h": 1, "minH": 2},
self.widget_3.id: None,
self.widget_4.id: None,
}
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"widgets": [
{"id": widget.id} # Not specifying layout for any widget
for widget in [self.widget_1, self.widget_2, self.widget_3, self.widget_4]
]
},
)
assert response.status_code == 200, response.data
widgets = response.data["widgets"]
for widget in widgets:
assert widget["layout"] == expected_layouts[int(widget["id"])]
def test_ignores_certain_keys_in_layout(self) -> None:
expected_layouts = {
self.widget_1.id: {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
self.widget_2.id: {"x": 1, "y": 0, "w": 1, "h": 1, "minH": 2},
}
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"widgets": [
{
"id": widget.id,
"layout": {
**expected_layouts[widget.id],
"i": "this-should-be-ignored",
"static": "don't want this",
"moved": False,
},
}
for widget in [self.widget_1, self.widget_2]
]
},
)
assert response.status_code == 200, response.data
widgets = response.data["widgets"]
for widget in widgets:
assert widget["layout"] == expected_layouts[int(widget["id"])]
def test_update_prebuilt_dashboard(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "New title",
"displayType": "line",
"queries": [
{
"name": "transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
},
],
},
],
}
slug = "default-overview"
response = self.do_request("put", self.url(slug), data=data)
assert response.status_code == 200, response.data
dashboard_id = response.data["id"]
assert dashboard_id != slug
# Ensure widget and query were saved
widgets = self.get_widgets(dashboard_id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = self.get_widget_queries(widgets[0])
assert len(queries) == 1
assert DashboardTombstone.objects.filter(slug=slug).exists()
def test_update_unknown_prebuilt(self) -> None:
data = {
"title": "First dashboard",
}
slug = "nope-not-real"
response = self.client.put(self.url(slug), data=data)
assert response.status_code == 404
def test_partial_reordering_deletes_widgets(self) -> None:
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={
"title": "Changed the title",
"widgets": [{"id": self.widget_3.id}, {"id": self.widget_4.id}],
},
)
assert response.status_code == 200
self.assert_dashboard_and_widgets([self.widget_3.id, self.widget_4.id])
deleted_widget_ids = [self.widget_1.id, self.widget_2.id]
assert not DashboardWidget.objects.filter(id__in=deleted_widget_ids).exists()
assert not DashboardWidgetQuery.objects.filter(widget_id__in=deleted_widget_ids).exists()
def test_widget_does_not_belong_to_dashboard(self) -> None:
widget = DashboardWidget.objects.create(
dashboard=Dashboard.objects.create(
organization=self.organization, title="Dashboard 2", created_by_id=self.user.id
),
title="Widget 200",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
)
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={"widgets": [{"id": self.widget_4.id}, {"id": widget.id}]},
)
assert response.status_code == 400
assert response.data == ["You cannot update widgets that are not part of this dashboard."]
self.assert_no_changes()
def test_widget_does_not_exist(self) -> None:
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={"widgets": [{"id": self.widget_4.id}, {"id": 1234567890}]},
)
assert response.status_code == 400
assert response.data == ["You cannot update widgets that are not part of this dashboard."]
self.assert_no_changes()
def test_add_issue_widget_valid_query(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Issues",
"displayType": "table",
"widgetType": "issue",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:unresolved",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
def test_add_issue_widget_invalid_query(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Issues",
"displayType": "table",
"widgetType": "issue",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:())",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Parse error" in response.content
def test_add_discover_widget_invalid_issue_query(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Issues",
"displayType": "table",
"widgetType": "transaction-like",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:unresolved",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Invalid conditions" in response.content
def test_add_multiple_discover_and_issue_widget(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Unresolved Issues",
"displayType": "table",
"widgetType": "issue",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:unresolved",
}
],
},
{
"title": "Resolved Issues",
"displayType": "table",
"widgetType": "issue",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:resolved",
}
],
},
{
"title": "Transactions",
"displayType": "table",
"widgetType": "transaction-like",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
},
{
"title": "Errors",
"displayType": "table",
"widgetType": "error-events",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
def test_add_discover_widget_using_total_count(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Issues",
"displayType": "table",
"widgetType": "transaction-like",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()", "total.count"],
"columns": ["total.count"],
"aggregates": ["count()"],
"conditions": "",
}
],
},
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
def test_add_discover_widget_returns_validation_error(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{"id": str(self.widget_1.id)},
{
"title": "Issues",
"displayType": "table",
"widgetType": "discover",
"interval": "5m",
"queries": [
{
"name": "",
"fields": ["count()", "total.count"],
"columns": ["total.count"],
"aggregates": ["count()"],
"conditions": "",
}
],
},
],
}
with self.feature({"organizations:deprecate-discover-widget-type": True}):
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert (
"Attribute value `discover` is deprecated. Please use `error-events` or `transaction-like`"
in response.content.decode()
)
def test_update_dashboard_with_filters(self) -> None:
project1 = self.create_project(name="foo", organization=self.organization)
project2 = self.create_project(name="bar", organization=self.organization)
data = {
"title": "First dashboard",
"projects": [project1.id, project2.id],
"environment": ["alpha"],
"period": "7d",
"filters": {"release": ["v1"]},
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert sorted(response.data["projects"]) == [project1.id, project2.id]
assert response.data["environment"] == ["alpha"]
assert response.data["period"] == "7d"
assert response.data["filters"]["release"] == ["v1"]
def test_update_dashboard_with_invalid_project_filter(self) -> None:
other_project = self.create_project(name="other", organization=self.create_organization())
data = {
"title": "First dashboard",
"projects": [other_project.id],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 403, response.data
def test_update_dashboard_with_all_projects(self) -> None:
data = {
"title": "First dashboard",
"projects": [-1],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["projects"] == [-1]
def test_update_dashboard_with_my_projects_after_setting_all_projects(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Filters",
created_by_id=self.user.id,
organization=self.organization,
filters={"all_projects": True},
)
data = {
"title": "First dashboard",
"projects": [],
}
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["projects"] == []
def test_update_dashboard_with_more_widgets_than_max(self) -> None:
data = {
"title": "Too many widgets",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": f"Widget {i}",
"limit": 5,
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": ["transaction"],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
}
for i in range(Dashboard.MAX_WIDGETS + 1)
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400, response.data
assert (
f"Number of widgets must be less than {Dashboard.MAX_WIDGETS}"
in response.content.decode()
)
def test_update_dashboard_with_widget_filter_requiring_environment(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Widget",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "release.stage:adopted",
}
],
}
],
}
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
def test_update_dashboard_permissions_with_put(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"permissions": {"isEditableByEveryone": "False"},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["isEditableByEveryone"] is False
def test_update_dashboard_permissions_to_false(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"permissions": {"isEditableByEveryone": "false"},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["isEditableByEveryone"] is False
def test_update_dashboard_permissions_when_already_created(self) -> None:
mock_project = self.create_project()
permission = DashboardPermissions.objects.create(
is_editable_by_everyone=True, dashboard=self.dashboard
)
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"permissions": {"isEditableByEveryone": "false"},
}
assert permission.is_editable_by_everyone is True
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["isEditableByEveryone"] is False
permission.refresh_from_db()
assert permission.is_editable_by_everyone is False
def test_update_dashboard_permissions_with_invalid_value(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"permissions": {"isEditableByEveryone": "something-invalid"},
}
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 400, response.data
assert "isEditableByEveryone" in response.data["permissions"]
def test_edit_dashboard_with_edit_permissions_not_granted(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=12333,
organization=self.organization,
)
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=dashboard)
user = self.create_user(id=3456)
self.create_member(user=user, organization=self.organization)
self.login_as(user)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 403
def test_all_users_can_edit_dashboard_with_edit_permissions_disabled(self) -> None:
self.create_user(id=12333)
dashboard = Dashboard.objects.create(
id=67,
title="Dashboard With Dataset Source",
created_by_id=12333,
organization=self.organization,
)
DashboardPermissions.objects.create(is_editable_by_everyone=True, dashboard=dashboard)
user = self.create_user(id=3456)
self.create_member(user=user, organization=self.organization)
self.login_as(user)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 200, response.content
assert response.data["title"] == "New Dashboard 9"
def test_creator_can_edit_dashboard(self) -> None:
user = self.create_user(id=12333)
self.create_member(user=user, organization=self.organization)
self.login_as(user)
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=12333,
organization=self.organization,
)
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=dashboard)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 200, response.content
assert response.data["title"] == "New Dashboard 9"
def test_user_in_team_with_access_can_edit_dashboard(self) -> None:
self.create_user(id=11452)
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=11452,
organization=self.organization,
)
permissions = DashboardPermissions.objects.create(
is_editable_by_everyone=False, dashboard=dashboard
)
# Create team and add to dashboard permissions
team = self.create_team(organization=self.organization)
permissions.teams_with_edit_access.set([team])
# Create user and add to team
user = self.create_user(id=12345)
self.create_member(user=user, organization=self.organization, teams=[team])
self.login_as(user)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 200, response.content
def test_user_in_team_without_access_cannot_edit_dashboard(self) -> None:
self.create_user(id=11452)
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=11452,
organization=self.organization,
)
permissions = DashboardPermissions.objects.create(
is_editable_by_everyone=False, dashboard=dashboard
)
# Create team and add to dashboard permissions
team = self.create_team(organization=self.organization)
permissions.teams_with_edit_access.set([team])
# Create user not in team
user = self.create_user(id=12345)
self.login_as(user)
response = self.do_request("put", self.url(dashboard.id), data={"title": "New Dashboard 9"})
assert response.status_code == 403
def test_user_tries_to_update_dashboard_edit_perms(self) -> None:
DashboardPermissions.objects.create(is_editable_by_everyone=True, dashboard=self.dashboard)
user = self.create_user(id=28193)
self.create_member(user=user, organization=self.organization)
self.login_as(user)
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={"permissions": {"is_editable_by_everyone": False}},
)
assert response.status_code == 400
assert (
"Only the Dashboard Creator may modify Dashboard Edit Access"
in response.content.decode()
)
def test_only_owner_can_update_dashboard_edit_perms(self) -> None:
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=self.dashboard)
user = self.create_user(id=28193)
self.create_member(user=user, organization=self.organization, role="manager")
self.login_as(user)
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={"permissions": {"is_editable_by_everyone": False}},
)
assert response.status_code == 403
user = self.create_user(id=28194)
self.create_member(user=user, organization=self.organization, role="owner")
self.login_as(user)
response = self.do_request(
"put",
self.url(self.dashboard.id),
data={"permissions": {"is_editable_by_everyone": False}},
)
assert response.status_code == 200
def test_update_dashboard_permissions_with_new_teams(self) -> None:
mock_project = self.create_project()
permission = DashboardPermissions.objects.create(
is_editable_by_everyone=True, dashboard=self.dashboard
)
self.create_environment(project=mock_project, name="mock_env")
assert permission.is_editable_by_everyone is True
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
data = {
"title": "Dashboard",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": [str(team1.id), str(team2.id)],
},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["isEditableByEveryone"] is False
assert response.data["permissions"]["teamsWithEditAccess"] == [team1.id, team2.id]
updated_perms = DashboardPermissions.objects.get(dashboard=self.dashboard)
assert set(updated_perms.teams_with_edit_access.all()) == {team1, team2}
def test_update_teams_in_dashboard_permissions(self) -> None:
mock_project = self.create_project()
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
perms = DashboardPermissions.objects.create(
is_editable_by_everyone=True, dashboard=self.dashboard
)
perms.teams_with_edit_access.add(team1)
perms.teams_with_edit_access.add(team2)
assert set(perms.teams_with_edit_access.all()) == {team1, team2}
self.create_environment(project=mock_project, name="mock_env")
assert perms.is_editable_by_everyone is True
new_team1 = self.create_team(organization=self.organization)
new_team2 = self.create_team(organization=self.organization)
data = {
"title": "Dashboard",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": [str(team1.id), str(new_team1.id), str(new_team2.id)],
},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["teamsWithEditAccess"] == [
team1.id,
new_team1.id,
new_team2.id,
]
updated_perms = DashboardPermissions.objects.get(dashboard=self.dashboard)
assert set(updated_perms.teams_with_edit_access.all()) == {team1, new_team1, new_team2}
def test_update_dashboard_permissions_with_invalid_teams(self) -> None:
mock_project = self.create_project()
permission = DashboardPermissions.objects.create(
is_editable_by_everyone=True, dashboard=self.dashboard
)
self.create_environment(project=mock_project, name="mock_env")
assert permission.is_editable_by_everyone is True
data = {
"title": "Dashboard",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": ["6", "23134", "0", "1"],
},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 400
assert (
"Cannot update dashboard edit permissions. Teams with IDs 0, 23134, 6, and 1 do not exist."
in response.content.decode()
)
def test_update_dashboard_permissions_with_teams_from_different_org(self) -> None:
mock_project = self.create_project()
test_org = self.create_organization(name="TOrg", owner=self.user)
team_1 = self.create_team(organization=self.organization)
team_test_org = self.create_team(organization=test_org)
data = {
"title": "Dashboard",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": [str(team_1.id), str(team_test_org.id)],
},
}
self.create_environment(project=mock_project, name="mock_env")
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 400
assert (
f"Cannot update dashboard edit permissions. Teams with IDs {team_test_org.id} do not exist."
in response.content.decode()
)
def test_update_dashboard_permissions_with_none_does_not_create_permissions_object(
self,
) -> None:
data = {
"title": "Dashboard",
"permissions": None,
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["permissions"] is None
assert not DashboardPermissions.objects.filter(dashboard=self.dashboard).exists()
def test_select_everyone_in_dashboard_permissions_clears_all_teams(self) -> None:
mock_project = self.create_project()
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
perms = DashboardPermissions.objects.create(
is_editable_by_everyone=False, dashboard=self.dashboard
)
perms.teams_with_edit_access.add(team1)
perms.teams_with_edit_access.add(team2)
assert set(perms.teams_with_edit_access.all()) == {team1, team2}
self.create_environment(project=mock_project, name="mock_env")
assert perms.is_editable_by_everyone is False
data = {
"title": "Dashboard",
"permissions": {
"isEditableByEveryone": "true",
"teamsWithEditAccess": [str(team1.id), str(team2.id)],
},
}
user = User(id=self.dashboard.created_by_id) # type: ignore[misc]
self.login_as(user=user)
response = self.do_request(
"put", f"{self.url(self.dashboard.id)}?environment=mock_env", data=data
)
assert response.status_code == 200, response.data
assert response.data["permissions"]["teamsWithEditAccess"] == []
updated_perms = DashboardPermissions.objects.get(dashboard=self.dashboard)
assert set(updated_perms.teams_with_edit_access.all()) == set()
def test_update_dashboard_without_projects_does_not_clear_projects(self) -> None:
project1 = self.create_project(name="foo", organization=self.organization)
project2 = self.create_project(name="bar", organization=self.organization)
dashboard = self.create_dashboard(title="First dashboard", organization=self.organization)
dashboard.projects.add(project1)
dashboard.projects.add(project2)
data = {
"title": "Modified Title",
}
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 200, response.data
assert sorted(response.data["projects"]) == [project1.id, project2.id]
def test_save_widget_with_custom_measurement_in_equation_tables(self) -> None:
BaseMetricsTestCase.store_metric(
self.organization.id,
self.project.id,
"d:transactions/measurements.custom_duration@millisecond",
{},
int(before_now(days=1).timestamp()),
1,
)
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "EPM table",
"widgetType": "transaction-like",
"displayType": "table",
"queries": [
{
"name": "",
"fields": [
"transaction.duration",
"measurements.custom_duration",
"equation|measurements.custom_duration / transaction.duration",
],
"columns": [
"transaction.duration",
"measurements.custom_duration",
],
"aggregates": [
"equation|measurements.custom_duration / transaction.duration"
],
"conditions": "",
"orderby": "",
"selectedAggregate": 1,
}
],
},
],
}
with self.feature({"organizations:performance-use-metrics": True}):
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_save_widget_with_custom_measurement_in_equation_line_chart(self) -> None:
BaseMetricsTestCase.store_metric(
self.organization.id,
self.project.id,
"d:transactions/measurements.custom_duration@millisecond",
{},
int(before_now(days=1).timestamp()),
1,
)
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "EPM line",
"displayType": "line",
"limit": 3,
"queries": [
{
"name": "",
"fields": [
"transaction.duration",
"measurements.custom_duration",
"equation|avg(measurements.custom_duration) / avg(transaction.duration)",
],
"columns": [
"transaction.duration",
"measurements.custom_duration",
],
"aggregates": [
"equation|avg(measurements.custom_duration) / avg(transaction.duration)"
],
"conditions": "",
"orderby": "",
"selectedAggregate": 1,
}
],
},
],
}
with self.feature({"organizations:performance-use-metrics": True}):
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_dashboard_release_widget_resets_to_errors(self) -> None:
dashboard = self.create_dashboard(
title="dataset reset issue", organization=self.organization
)
widget = DashboardWidget.objects.create(
dashboard=dashboard,
title="Custom Widget",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
dataset_source=DatasetSourcesTypes.USER.value,
)
DashboardWidgetQuery.objects.create(
widget=widget,
name="",
fields=["count()"],
columns=[],
aggregates=["count()"],
conditions="",
orderby="-count()",
order=0,
)
data = {
"title": "dataset reset issue",
"widgets": [
{
"id": str(widget.id),
"title": "Custom Widget",
"displayType": "table",
"queries": [
{
"name": "",
"fields": ["crash_free_rate(session)"],
"fieldAliases": [],
"columns": [],
"aggregates": ["crash_free_rate(session)"],
"conditions": "",
"orderby": "-crash_free_rate(session)",
}
],
"widgetType": "metrics",
"thresholds": None,
"description": None,
},
],
}
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["widgets"][0]["widgetType"] == "metrics"
widget = DashboardWidget.objects.get(id=widget.id)
assert widget.discover_widget_split is None
assert widget.dataset_source == DatasetSourcesTypes.UNKNOWN.value
def test_dashboard_widget_missing_columns_can_successfully_save(self) -> None:
data = {
"title": "First dashboard",
"widgets": [
{
"title": "Issue Widget",
"displayType": "table",
"interval": "5m",
"widgetType": DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ISSUE),
"queries": [
{
"name": "Errors",
"fields": ["issue", "title"],
"aggregates": [],
"conditions": "",
"orderby": "",
}
],
}
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data["widgets"][0]["queries"][0]["columns"] == []
assert response.data["widgets"][0]["queries"][0]["fields"] == ["issue", "title"]
assert response.data["widgets"][0]["widgetType"] == "issue"
def test_dashboard_transaction_widget_deprecation_with_flag(self) -> None:
with self.feature("organizations:discover-saved-queries-deprecation"):
data = {
"title": "Test Dashboard",
"widgets": [
{
"title": "Transaction Widget",
"displayType": "table",
"widgetType": DashboardWidgetTypes.get_type_name(
DashboardWidgetTypes.TRANSACTION_LIKE
),
"queries": [
{
"name": "Transaction Widget",
"fields": ["count()"],
"aggregates": ["count()"],
"conditions": "",
"orderby": "-count()",
"columns": [],
"fieldAliases": [],
}
],
}
],
}
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 400
assert (
response.data["widgets"][0]["widgetType"][0]
== "The transactions dataset is being deprecated. Please use the spans dataset with the `is_transaction:true` filter instead."
)
def test_dashboard_exisiting_transaction_widget_deprecation_with_flag(self) -> None:
with self.feature("organizations:discover-saved-queries-deprecation"):
data = {
"title": "Test Dashboard",
"widgets": [
{
"id": self.widget_1.id,
"title": "Transaction Widget",
"displayType": "table",
"widgetType": DashboardWidgetTypes.get_type_name(
DashboardWidgetTypes.TRANSACTION_LIKE
),
"queries": [
{
"name": "Transaction Widget",
"fields": ["count()"],
"aggregates": ["count()"],
"conditions": "",
"orderby": "-count()",
"columns": [],
"fieldAliases": [],
}
],
},
{
"title": "Error Widget",
"displayType": "table",
"widgetType": DashboardWidgetTypes.get_type_name(
DashboardWidgetTypes.ERROR_EVENTS
),
"queries": [
{
"name": "Error Widget",
"fields": ["count()"],
"aggregates": ["count()"],
"conditions": "",
"orderby": "-count()",
"columns": [],
"fieldAliases": [],
}
],
},
],
}
# should be able to add widget to dashboard with existing transaction widgets
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200
def test_create_widget_with_field_links(self) -> None:
# Create a second dashboard to link to
linked_dashboard = Dashboard.objects.create(
title="Linked Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
data: dict[str, Any] = {
"title": "Dashboard with Field Links",
"widgets": [
{
"title": "Widget with Links",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Query with Links",
"fields": ["count()", "project"],
"columns": ["project"],
"aggregates": ["count()"],
"conditions": "event.type:error",
"linkedDashboards": [
{"field": "project", "dashboardId": linked_dashboard.id}
],
}
],
"datasetSource": "user",
}
],
}
with self.feature("organizations:dashboards-drilldown-flow"):
response = self.do_request("put", self.url(self.dashboard.id), data=data)
assert response.status_code == 200, response.data
assert response.data.get("widgets")[0].get("queries")[0].get("linkedDashboards") == [
{
"field": "project",
"dashboardId": linked_dashboard.id,
}
]
widgets = self.get_widgets(self.dashboard.id)
assert len(widgets) == 1
widget = widgets[0]
queries = widget.dashboardwidgetquery_set.all()
assert len(queries) == 1
# Verify field links were created
field_links = DashboardFieldLink.objects.filter(dashboard_widget_query=queries[0])
assert len(field_links) == 1
field_link = field_links[0]
assert field_link.field == "project"
assert field_link.dashboard_id == linked_dashboard.id
assert field_link.dashboard_widget_query_id == queries[0].id
def test_update_widget_with_field_links(self) -> None:
dashboard = self.create_dashboard(
title="Dashboard with Links", organization=self.organization
)
linked_dashboard = Dashboard.objects.create(
title="Linked Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
widget = DashboardWidget.objects.create(
dashboard=dashboard,
title="Widget with Links",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
dataset_source=DatasetSourcesTypes.USER.value,
)
widget_query = DashboardWidgetQuery.objects.create(
widget=widget,
name="",
fields=["count()"],
columns=[],
aggregates=["count()"],
conditions="",
orderby="-count()",
order=0,
)
DashboardFieldLink.objects.create(
dashboard_widget_query=widget_query,
field="project",
dashboard_id=linked_dashboard.id,
)
data: dict[str, Any] = {
"title": "Dashboard with Links",
"widgets": [
{
"id": str(widget.id),
"title": "Widget with Links",
"displayType": "table",
"interval": "5m",
"queries": [
{
"id": str(widget_query.id),
"name": "Query with Links",
"fields": ["count()", "project", "environment"],
"columns": ["project"],
"aggregates": ["count()"],
"conditions": "event.type:error",
"linkedDashboards": [
{"field": "project", "dashboardId": linked_dashboard.id},
{"field": "environment", "dashboardId": linked_dashboard.id},
],
}
],
}
],
}
with self.feature("organizations:dashboards-drilldown-flow"):
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
widget = widgets[0]
queries = widget.dashboardwidgetquery_set.all()
assert len(queries) == 1
field_links = DashboardFieldLink.objects.filter(dashboard_widget_query=queries[0]).order_by(
"field"
)
assert len(field_links) == 2
# Verify the field links were updated correctly
assert field_links[0].field == "environment"
assert field_links[0].dashboard_id == linked_dashboard.id
assert field_links[0].dashboard_widget_query_id == queries[0].id
assert field_links[1].field == "project"
assert field_links[1].dashboard_id == linked_dashboard.id
assert field_links[1].dashboard_widget_query_id == queries[0].id
def test_deletes_widget_with_field_links(self) -> None:
dashboard = self.create_dashboard(
title="Dashboard with Links", organization=self.organization
)
linked_dashboard = Dashboard.objects.create(
title="Linked Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
widget = DashboardWidget.objects.create(
dashboard=dashboard,
title="Widget with Links",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
dataset_source=DatasetSourcesTypes.USER.value,
)
widget_query = DashboardWidgetQuery.objects.create(
widget=widget,
name="",
fields=["count()"],
columns=[],
aggregates=["count()"],
conditions="",
orderby="-count()",
order=0,
)
DashboardFieldLink.objects.create(
dashboard_widget_query=widget_query,
field="project",
dashboard_id=linked_dashboard.id,
)
data: dict[str, Any] = {
"title": "Dashboard with Links",
"widgets": [
{
"id": str(widget.id),
"title": "Widget with Links",
"displayType": "table",
"interval": "5m",
"queries": [
{
"id": str(widget_query.id),
"name": "Query with Links",
"fields": ["count()", "project", "environment"],
"columns": ["project"],
"aggregates": ["count()"],
"conditions": "event.type:error",
"linkedDashboards": [],
}
],
}
],
}
with self.feature("organizations:dashboards-drilldown-flow"):
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 200, response.data
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
widget = widgets[0]
queries = widget.dashboardwidgetquery_set.all()
assert len(queries) == 1
field_links = DashboardFieldLink.objects.filter(dashboard_widget_query=queries[0])
assert len(field_links) == 0
def test_does_not_update_non_table_dashboard_links(self) -> None:
dashboard = self.create_dashboard(
title="Dashboard with Links", organization=self.organization
)
linked_dashboard = Dashboard.objects.create(
title="Linked Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
widget = DashboardWidget.objects.create(
dashboard=dashboard,
title="Widget with Links",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
dataset_source=DatasetSourcesTypes.USER.value,
)
widget_query = DashboardWidgetQuery.objects.create(
widget=widget,
name="",
fields=["count()"],
columns=[],
aggregates=["count()"],
conditions="",
orderby="-count()",
order=0,
)
DashboardFieldLink.objects.create(
dashboard_widget_query=widget_query,
field="project",
dashboard_id=self.dashboard.id,
)
data: dict[str, Any] = {
"title": "Dashboard with Links",
"widgets": [
{
"id": str(widget.id),
"title": "Widget with Links",
"displayType": "line",
"interval": "5m",
"queries": [
{
"id": str(widget_query.id),
"name": "Query with Links",
"fields": ["count()", "project", "environment"],
"linkedDashboards": [
{"field": "project", "dashboardId": linked_dashboard.id},
],
}
],
}
],
}
with self.feature("organizations:dashboards-drilldown-flow"):
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Field links are only supported for table widgets" in response.content
def test_does_not_update_if_linked_dashboard_does_not_appear_in_fields(self) -> None:
dashboard = self.create_dashboard(
title="Dashboard with Links", organization=self.organization
)
linked_dashboard = Dashboard.objects.create(
title="Linked Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
widget = DashboardWidget.objects.create(
dashboard=dashboard,
title="Widget with Links",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
dataset_source=DatasetSourcesTypes.USER.value,
)
widget_query = DashboardWidgetQuery.objects.create(
widget=widget,
name="",
fields=["count()"],
columns=[],
aggregates=["count()"],
conditions="",
orderby="-count()",
order=0,
)
DashboardFieldLink.objects.create(
dashboard_widget_query=widget_query,
field="project",
dashboard_id=linked_dashboard.id,
)
data: dict[str, Any] = {
"title": "Dashboard with Links",
"widgets": [
{
"id": str(widget.id),
"title": "Widget with Links",
"displayType": "table",
"interval": "5m",
"queries": [
{
"id": str(widget_query.id),
"name": "Query with Links",
"fields": ["count()", "user.email"],
"columns": ["user.email"],
"aggregates": ["count()"],
"conditions": "event.type:error",
"linkedDashboards": [
{"field": "project", "dashboardId": linked_dashboard.id},
],
}
],
}
],
}
with self.feature("organizations:dashboards-drilldown-flow"):
response = self.do_request("put", self.url(dashboard.id), data=data)
assert response.status_code == 400, response.data
assert b"Linked dashboard does not appear in the fields of the query" in response.content
def test_cannot_delete_prebuilt_insights_dashboard(self) -> None:
dashboard = Dashboard.objects.create(
title="Frontend Session Health",
organization=self.organization,
prebuilt_id=PrebuiltDashboardId.FRONTEND_SESSION_HEALTH,
)
response = self.do_request("delete", self.url(dashboard.id))
assert response.status_code == 409
assert "Cannot delete prebuilt Dashboards." in response.content.decode()
def test_cannot_edit_prebuilt_insights_dashboard(self) -> None:
dashboard = Dashboard.objects.create(
title="Frontend Session Health",
organization=self.organization,
prebuilt_id=PrebuiltDashboardId.FRONTEND_SESSION_HEALTH,
)
response = self.do_request(
"put", self.url(dashboard.id), data={"title": "Frontend Session Health Edited"}
)
assert response.status_code == 409
assert "Cannot edit prebuilt Dashboards." in response.content.decode()
| OrganizationDashboardDetailsPutTest |
python | doocs__leetcode | lcof2/剑指 Offer II 105. 岛屿的最大面积/Solution.py | {
"start": 0,
"end": 546
} | class ____:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
def dfs(i: int, j: int) -> int:
if grid[i][j] == 0:
return 0
ans = 1
grid[i][j] = 0
dirs = (-1, 0, 1, 0, -1)
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n:
ans += dfs(x, y)
return ans
m, n = len(grid), len(grid[0])
return max(dfs(i, j) for i in range(m) for j in range(n))
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_strategy_test.py | {
"start": 12923,
"end": 15750
} | class ____(strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def tearDown(self):
super(MirroredCollectiveOpTest, self).tearDown()
context._reset_context()
def testAllCpu(self):
@def_function.function
def fn():
strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"])
if ops.executing_eagerly_outside_functions():
self.assertIsInstance(
strategy.extended._collective_ops,
cross_device_ops_lib.CollectiveAllReduce)
self.assertEqual(
strategy.extended._collective_ops._options.implementation,
collective_util.CommunicationImplementation.RING)
else:
self.assertIsInstance(strategy.extended._collective_ops,
cross_device_ops_lib.ReductionToOneDevice)
fn()
def testMixedDevices(self):
@def_function.function
def fn():
strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "GPU:0"])
self.assertIsInstance(
strategy.extended._collective_ops,
cross_device_ops_lib.ReductionToOneDevice)
fn()
def testAllPhysicalGpu(self):
@def_function.function
def fn():
strategy = mirrored_strategy.MirroredStrategy(["GPU:0", "GPU:1"])
self.assertIsInstance(
strategy.extended._collective_ops,
cross_device_ops_lib.CollectiveAllReduce)
self.assertEqual(
strategy.extended._collective_ops._options.implementation,
collective_util.CommunicationImplementation.NCCL)
fn()
def testVirtualGpu(self):
# Logical devices cannot be changed after context initialization.
context._reset_context()
physical_gpus = context.context().list_physical_devices(device_type="GPU")
context.context().set_logical_device_configuration(physical_gpus[1], [
context.LogicalDeviceConfiguration(memory_limit=1024),
context.LogicalDeviceConfiguration(memory_limit=1024)
])
@def_function.function
def fn():
strategy = mirrored_strategy.MirroredStrategy(["GPU:0", "GPU:1", "GPU:2"])
if ops.executing_eagerly_outside_functions():
self.assertIsInstance(
strategy.extended._collective_ops,
cross_device_ops_lib.CollectiveAllReduce)
self.assertEqual(
strategy.extended._collective_ops._options.implementation,
collective_util.CommunicationImplementation.RING)
else:
self.assertEqual(strategy.extended._collective_ops,
cross_device_ops_lib.ReductionToOneDevice)
fn()
@combinations.generate(
combinations.combine(
mode=["graph", "eager"], required_gpus=[2], use_default=[True, False]))
| MirroredCollectiveOpTest |
python | mlflow__mlflow | mlflow/gateway/providers/mistral.py | {
"start": 4928,
"end": 7108
} | class ____(BaseProvider):
NAME = "Mistral"
CONFIG_TYPE = MistralConfig
def __init__(self, config: EndpointConfig) -> None:
super().__init__(config)
if config.model.config is None or not isinstance(config.model.config, MistralConfig):
raise TypeError(f"Unexpected config type {config.model.config}")
self.mistral_config: MistralConfig = config.model.config
@property
def headers(self) -> dict[str, str]:
return {"Authorization": f"Bearer {self.mistral_config.mistral_api_key}"}
@property
def base_url(self) -> str:
return "https://api.mistral.ai/v1"
@property
def adapter_class(self) -> type[ProviderAdapter]:
return MistralAdapter
def get_endpoint_url(self, route_type: str) -> str:
if route_type == "llm/v1/chat":
return f"{self.base_url}/chat/completions"
else:
raise ValueError(f"Invalid route type {route_type}")
async def _request(self, path: str, payload: dict[str, Any]) -> dict[str, Any]:
return await send_request(
headers=self.headers,
base_url=self.base_url,
path=path,
payload=payload,
)
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await self._request(
"chat/completions",
MistralAdapter.completions_to_model(payload, self.config),
)
return MistralAdapter.model_to_completions(resp, self.config)
async def embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await self._request(
"embeddings",
MistralAdapter.embeddings_to_model(payload, self.config),
)
return MistralAdapter.model_to_embeddings(resp, self.config)
| MistralProvider |
python | PrefectHQ__prefect | src/prefect/settings/models/experiments.py | {
"start": 370,
"end": 1747
} | class ____(PrefectBaseSettings):
"""
Settings for configuring the experimental plugin system
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("experiments", "plugins")
)
enabled: bool = Field(
default=False,
description="Enable the experimental plugin system.",
)
allow: Annotated[
Union[set[str], None],
BeforeValidator(partial(validate_set_T_from_delim_string, type_=str)),
] = Field(
default=None,
description="Comma-separated list of plugin names to allow. If set, only these plugins will be loaded.",
)
deny: Annotated[
Union[set[str], None],
BeforeValidator(partial(validate_set_T_from_delim_string, type_=str)),
] = Field(
default=None,
description="Comma-separated list of plugin names to deny. These plugins will not be loaded.",
)
setup_timeout_seconds: float = Field(
default=20.0,
description="Maximum time in seconds for all plugins to complete their setup hooks.",
)
strict: bool = Field(
default=False,
description="If True, exit if a required plugin fails during setup.",
)
safe_mode: bool = Field(
default=False,
description="If True, load plugins but do not execute their hooks. Useful for testing.",
)
| PluginsSettings |
python | sympy__sympy | sympy/functions/special/polynomials.py | {
"start": 24850,
"end": 28821
} | class ____(OrthogonalPolynomial):
r"""
``legendre(n, x)`` gives the $n$th Legendre polynomial of $x$, $P_n(x)$
Explanation
===========
The Legendre polynomials are orthogonal on $[-1, 1]$ with respect to
the constant weight 1. They satisfy $P_n(1) = 1$ for all $n$; further,
$P_n$ is odd for odd $n$ and even for even $n$.
Examples
========
>>> from sympy import legendre, diff
>>> from sympy.abc import x, n
>>> legendre(0, x)
1
>>> legendre(1, x)
x
>>> legendre(2, x)
3*x**2/2 - 1/2
>>> legendre(n, x)
legendre(n, x)
>>> diff(legendre(n,x), x)
n*(x*legendre(n, x) - legendre(n - 1, x))/(x**2 - 1)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
assoc_legendre,
hermite, hermite_prob,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.hermite_prob_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Legendre_polynomial
.. [2] https://mathworld.wolfram.com/LegendrePolynomial.html
.. [3] https://functions.wolfram.com/Polynomials/LegendreP/
.. [4] https://functions.wolfram.com/Polynomials/LegendreP2/
"""
_ortho_poly = staticmethod(legendre_poly)
@classmethod
def eval(cls, n, x):
if not n.is_Number:
# Symbolic result L_n(x)
# L_n(-x) ---> (-1)**n * L_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * legendre(n, -x)
# L_{-n}(x) ---> L_{n-1}(x)
if n.could_extract_minus_sign() and not(-n - 1).could_extract_minus_sign():
return legendre(-n - S.One, x)
# We can evaluate for some special values of x
if x.is_zero:
return sqrt(S.Pi)/(gamma(S.Half - n/2)*gamma(S.One + n/2))
elif x == S.One:
return S.One
elif x is S.Infinity:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial;
# L_{-n}(x) ---> L_{n-1}(x)
if n.is_negative:
n = -n - S.One
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
# Find better formula, this is unsuitable for x = +/-1
# https://www.autodiff.org/ad16/Oral/Buecker_Legendre.pdf says
# at x = 1:
# n*(n + 1)/2 , m = 0
# oo , m = 1
# -(n-1)*n*(n+1)*(n+2)/4 , m = 2
# 0 , m = 3, 4, ..., n
#
# at x = -1
# (-1)**(n+1)*n*(n + 1)/2 , m = 0
# (-1)**n*oo , m = 1
# (-1)**n*(n-1)*n*(n+1)*(n+2)/4 , m = 2
# 0 , m = 3, 4, ..., n
n, x = self.args
return n/(x**2 - 1)*(x*legendre(n, x) - legendre(n - 1, x))
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Sum(self, n, x, **kwargs):
from sympy.concrete.summations import Sum
k = Dummy("k")
kern = S.NegativeOne**k*binomial(n, k)**2*((1 + x)/2)**(n - k)*((1 - x)/2)**k
return Sum(kern, (k, 0, n))
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
# This function is just kept for backwards compatibility
# but should not be used
return self._eval_rewrite_as_Sum(n, x, **kwargs)
| legendre |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_vertex_ai.py | {
"start": 4729,
"end": 9017
} | class ____:
def setup_method(self, method):
self.trigger = BaseVertexAIJobTrigger(
conn_id=TEST_CONN_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_HPT_JOB_ID,
poll_interval=TEST_POLL_INTERVAL,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_serialize(self):
classpath, kwargs = self.trigger.serialize()
assert classpath == VERTEX_AI_TRIGGER_PATH.format("BaseVertexAIJobTrigger")
assert kwargs == dict(
conn_id=TEST_CONN_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_HPT_JOB_ID,
poll_interval=TEST_POLL_INTERVAL,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("job_state", "job_name", "status", "message"),
[
(
JobState.JOB_STATE_CANCELLED,
"test_job_name_0",
"error",
"Vertex AI Job test_job_name_0 completed with status JOB_STATE_CANCELLED",
),
(
JobState.JOB_STATE_FAILED,
"test_job_name_1",
"error",
"Vertex AI Job test_job_name_1 completed with status JOB_STATE_FAILED",
),
(
JobState.JOB_STATE_PAUSED,
"test_job_name_2",
"success",
"Vertex AI Job test_job_name_2 completed with status JOB_STATE_PAUSED",
),
(
JobState.JOB_STATE_SUCCEEDED,
"test_job_name_3",
"success",
"Vertex AI Job test_job_name_3 completed with status JOB_STATE_SUCCEEDED",
),
],
)
@mock.patch(VERTEX_AI_TRIGGER_PATH.format("BaseVertexAIJobTrigger._serialize_job"))
async def test_run(self, mock_serialize_job, job_state, job_name, status, message):
mock_job = mock.MagicMock()
mock_job.state = job_state
mock_job.name = job_name
mock_serialized_job = mock.MagicMock()
mock_serialize_job.return_value = mock_serialized_job
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
with mock.patch.object(self.trigger, "_wait_job") as mock_wait_job:
mock_wait_job.side_effect = mock.AsyncMock(return_value=mock_job)
generator = self.trigger.run()
event_actual = await generator.asend(None)
mock_wait_job.assert_awaited_once()
mock_serialize_job.assert_called_once_with(mock_job)
assert event_actual == TriggerEvent(
{
"status": status,
"message": message,
"job": mock_serialized_job,
}
)
@pytest.mark.asyncio
async def test_run_exception(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
with mock.patch.object(self.trigger, "_wait_job") as mock_wait_job:
mock_wait_job.side_effect = AirflowException(TEST_ERROR_MESSAGE)
generator = self.trigger.run()
event_actual = await generator.asend(None)
assert event_actual == TriggerEvent(
{
"status": "error",
"message": TEST_ERROR_MESSAGE,
}
)
@pytest.mark.asyncio
async def test_wait_job(self):
with pytest.raises(NotImplementedError):
await self.trigger._wait_job()
@mock.patch(VERTEX_AI_TRIGGER_PATH.format("BaseVertexAIJobTrigger.job_serializer_class"))
def test_serialize_job(self, mock_job_serializer_class):
mock_job = mock.MagicMock()
mock_job_serialized = mock.MagicMock()
mock_to_dict = mock.MagicMock(return_value=mock_job_serialized)
mock_job_serializer_class.to_dict = mock_to_dict
result = self.trigger._serialize_job(mock_job)
mock_to_dict.assert_called_once_with(mock_job)
assert result == mock_job_serialized
| TestBaseVertexAIJobTrigger |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 17225,
"end": 17540
} | class ____(LazyModuleMixin, torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def initialize_parameters(self, *args, **kwargs):
with torch.no_grad():
self.layer = LazyLayerWithInputs()
def forward(self, x, y):
return self.layer(x, y=y)
| LazyModuleKwArgs |
python | getsentry__sentry | tests/sentry/sentry_apps/models/test_sentryappinstallationtoken.py | {
"start": 513,
"end": 1989
} | class ____(TestCase):
def setUp(self) -> None:
self.application = ApiApplication.objects.create(owner=self.user)
self.provider = "provider"
sentry_app = self.create_internal_integration(
webhook_url=None,
name="Vercel Internal Integration",
organization=self.organization,
)
self.api_token = ApiToken.objects.create(
user=self.user, scope_list=(), refresh_token=None, expires_at=None
)
self.install = SentryAppInstallation.objects.create(
sentry_app=sentry_app,
organization_id=self.organization.id,
)
SentryAppInstallationForProvider.objects.create(
organization_id=self.organization.id,
provider=self.provider,
sentry_app_installation=self.install,
)
def test_get_token_empty(self) -> None:
assert not SentryAppInstallationToken.objects.get_token(self.organization.id, self.provider)
def test_get_token_invalid(self) -> None:
assert not SentryAppInstallationToken.objects.get_token(self.organization.id, "")
def test_get_token(self) -> None:
SentryAppInstallationToken.objects.create(
api_token=self.api_token, sentry_app_installation=self.install
)
token = SentryAppInstallationToken.objects.get_token(self.organization.id, self.provider)
assert token == self.api_token.token
| SentryAppInstallationTokenTest |
python | huggingface__transformers | src/transformers/models/video_llama_3/image_processing_video_llama_3.py | {
"start": 3877,
"end": 25758
} | class ____(BaseImageProcessor):
r"""
Constructs a VideoLLaMA3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 1):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 1):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = ["pixel_values", "image_grid_thw", "image_merge_sizes"]
valid_kwargs = VideoLlama3ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
min_pixels: Optional[int] = None,
max_pixels: Optional[int] = None,
patch_size: int = 14,
temporal_patch_size: int = 1,
merge_size: int = 1,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
else:
size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}
# backward compatibility: override size with min_pixels and max_pixels if they are provided
if min_pixels is not None:
size["shortest_edge"] = min_pixels
if max_pixels is not None:
size["longest_edge"] = max_pixels
self.min_pixels = size["shortest_edge"]
self.max_pixels = size["longest_edge"]
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
if self.temporal_patch_size != 1:
raise ValueError("`temporal_patch_size` must be 1 for VideoLLaMA3")
def _preprocess(
self,
images: Union[ImageInput, VideoInput],
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose(0, 3, 1, 2)
if patches.shape[0] % temporal_patch_size != 0:
repeats = np.repeat(
patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size), axis=0
)
patches = np.concatenate([patches, repeats], axis=0)
channel = patches.shape[1]
grid_t = patches.shape[0] // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.reshape(
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
flatten_patches = patches.reshape(
grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
)
return flatten_patches, (grid_t, grid_h, grid_w)
def preprocess(
self,
images: ImageInput,
videos: Optional[VideoInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
min_pixels: Optional[int] = None,
max_pixels: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
videos (`VideoInput`):
Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
min_pixels (`int`, *optional*, defaults to `self.min_pixels`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `self.max_pixels`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
min_pixels = min_pixels if min_pixels is not None else self.min_pixels
max_pixels = max_pixels if max_pixels is not None else self.max_pixels
if size is not None:
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
min_pixels = size["shortest_edge"]
elif min_pixels is not None and max_pixels is not None:
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = {"shortest_edge": min_pixels, "longest_edge": max_pixels}
else:
size = {**self.size}
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
if images is not None:
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
data.update(
{
"pixel_values": np.array(pixel_values),
"image_grid_thw": np.array(vision_grid_thws),
"image_merge_sizes": np.array([merge_size] * len(vision_grid_thws)),
}
)
return BatchFeature(data=data, tensor_type=return_tensors)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
min_pixels = images_kwargs["min_pixels"] if "min_pixels" in images_kwargs else self.size["shortest_edge"]
max_pixels = images_kwargs["max_pixels"] if "max_pixels" in images_kwargs else self.size["longest_edge"]
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
__all__ = ["VideoLlama3ImageProcessor"]
| VideoLlama3ImageProcessor |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 407317,
"end": 415431
} | class ____(DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefnumber):
"""
OpacityDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
@overload
def bandPosition(self, _: float, /) -> OpacityDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> OpacityDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> OpacityDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> OpacityDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> OpacityDatum: ...
@overload
def type(self, _: Type_T, /) -> OpacityDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
| OpacityDatum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.