language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/markuplm/test_modeling_markuplm.py | {
"start": 1294,
"end": 9484
} | class ____:
"""You can also import this e.g from .test_modeling_markuplm import MarkupLMModelTester"""
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
max_xpath_tag_unit_embeddings=20,
max_xpath_subs_unit_embeddings=30,
tag_pad_id=2,
subs_pad_id=2,
max_depth=10,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
self.tag_pad_id = tag_pad_id
self.subs_pad_id = subs_pad_id
self.max_depth = max_depth
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
xpath_tags_seq = ids_tensor(
[self.batch_size, self.seq_length, self.max_depth], self.max_xpath_tag_unit_embeddings
)
xpath_subs_seq = ids_tensor(
[self.batch_size, self.seq_length, self.max_depth], self.max_xpath_subs_unit_embeddings
)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return (
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
)
def get_config(self):
return MarkupLMConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
max_xpath_tag_unit_embeddings=self.max_xpath_tag_unit_embeddings,
max_xpath_subs_unit_embeddings=self.max_xpath_subs_unit_embeddings,
tag_pad_id=self.tag_pad_id,
subs_pad_id=self.subs_pad_id,
max_depth=self.max_depth,
)
def create_and_check_model(
self,
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
model = MarkupLMModel(config=config)
model.to(torch_device)
model.eval()
print("Configs:", model.config.tag_pad_id, model.config.subs_pad_id)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_sequence_classification(
self,
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
config.num_labels = self.num_labels
model = MarkupLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self,
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
config.num_labels = self.num_labels
model = MarkupLMForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self,
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
model = MarkupLMForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
xpath_tags_seq,
xpath_subs_seq,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"xpath_tags_seq": xpath_tags_seq,
"xpath_subs_seq": xpath_subs_seq,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
| MarkupLMModelTester |
python | pypa__warehouse | tests/unit/manage/test_forms.py | {
"start": 20541,
"end": 25107
} | class ____:
@pytest.mark.parametrize("selected_project", [None, "foo"])
def test_creation(self, selected_project):
user_id = pretend.stub()
macaroon_service = pretend.stub(get_macaroon_by_description=lambda *a: None)
project_names = ["foo"]
form = forms.CreateMacaroonForm(
formdata=MultiDict(
{"description": "description", "token_scope": "token:user"}
),
user_id=user_id,
macaroon_service=macaroon_service,
project_names=project_names,
selected_project=selected_project,
)
assert form.user_id is user_id
assert form.macaroon_service is macaroon_service
assert form.project_names is project_names
assert form.validate()
def test_validate_description_missing(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict({"token_scope": "scope:user"}),
user_id=pretend.stub(),
macaroon_service=pretend.stub(),
project_names=pretend.stub(),
)
assert not form.validate()
assert form.description.errors.pop() == "Specify a token name"
def test_validate_description_in_use(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict({"description": "dummy", "token_scope": "scope:user"}),
user_id=pretend.stub(),
macaroon_service=pretend.stub(
get_macaroon_by_description=lambda *a: pretend.stub()
),
project_names=pretend.stub(),
)
assert not form.validate()
assert form.description.errors.pop() == "API token name already in use"
def test_validate_token_scope_missing(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict({"description": "dummy"}),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=pretend.stub(),
)
assert not form.validate()
assert form.token_scope.errors.pop() == "Specify the token scope"
def test_validate_token_scope_unspecified(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict(
{"description": "dummy", "token_scope": "scope:unspecified"}
),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=pretend.stub(),
)
assert not form.validate()
assert form.token_scope.errors.pop() == "Specify the token scope"
@pytest.mark.parametrize(
("scope"), ["not a real scope", "scope:project", "scope:foo:bar"]
)
def test_validate_token_scope_invalid_format(self, scope):
form = forms.CreateMacaroonForm(
formdata=MultiDict({"description": "dummy", "token_scope": scope}),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=pretend.stub(),
)
assert not form.validate()
assert form.token_scope.errors.pop() == f"Unknown token scope: {scope}"
def test_validate_token_scope_invalid_project(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict(
{"description": "dummy", "token_scope": "scope:project:foo"}
),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=["bar"],
)
assert not form.validate()
assert form.token_scope.errors.pop() == "Unknown or invalid project name: foo"
def test_validate_token_scope_valid_user(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict({"description": "dummy", "token_scope": "scope:user"}),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=pretend.stub(),
)
assert form.validate()
def test_validate_token_scope_valid_project(self):
form = forms.CreateMacaroonForm(
formdata=MultiDict(
{"description": "dummy", "token_scope": "scope:project:foo"}
),
user_id=pretend.stub(),
macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None),
project_names=["foo"],
)
assert form.validate()
| TestCreateMacaroonForm |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/radialaxis/_autorangeoptions.py | {
"start": 235,
"end": 5919
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar.radialaxis"
_path_str = "layout.polar.radialaxis.autorangeoptions"
_valid_props = {
"clipmax",
"clipmin",
"include",
"includesrc",
"maxallowed",
"minallowed",
}
@property
def clipmax(self):
"""
Clip autorange maximum if it goes beyond this value. Has no
effect when `autorangeoptions.maxallowed` is provided.
The 'clipmax' property accepts values of any type
Returns
-------
Any
"""
return self["clipmax"]
@clipmax.setter
def clipmax(self, val):
self["clipmax"] = val
@property
def clipmin(self):
"""
Clip autorange minimum if it goes beyond this value. Has no
effect when `autorangeoptions.minallowed` is provided.
The 'clipmin' property accepts values of any type
Returns
-------
Any
"""
return self["clipmin"]
@clipmin.setter
def clipmin(self, val):
self["clipmin"] = val
@property
def include(self):
"""
Ensure this value is included in autorange.
The 'include' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["include"]
@include.setter
def include(self, val):
self["include"] = val
@property
def includesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `include`.
The 'includesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["includesrc"]
@includesrc.setter
def includesrc(self, val):
self["includesrc"] = val
@property
def maxallowed(self):
"""
Use this value exactly as autorange maximum.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Use this value exactly as autorange minimum.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def _prop_descriptions(self):
return """\
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
"""
def __init__(
self,
arg=None,
clipmax=None,
clipmin=None,
include=None,
includesrc=None,
maxallowed=None,
minallowed=None,
**kwargs,
):
"""
Construct a new Autorangeoptions object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.r
adialaxis.Autorangeoptions`
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
Returns
-------
Autorangeoptions
"""
super().__init__("autorangeoptions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.radialaxis.Autorangeoptions
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Autorangeoptions`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("clipmax", arg, clipmax)
self._set_property("clipmin", arg, clipmin)
self._set_property("include", arg, include)
self._set_property("includesrc", arg, includesrc)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Autorangeoptions |
python | spyder-ide__spyder | spyder/plugins/externalterminal/widgets/run_conf.py | {
"start": 932,
"end": 4035
} | class ____(RunExecutorConfigurationGroup):
"""External terminal Python run configuration options."""
def __init__(self, parent, context: Context, input_extension: str,
input_metadata: RunConfigurationMetadata):
super().__init__(parent, context, input_extension, input_metadata)
self.dir = None
# --- Interpreter ---
interpreter_group = QGroupBox(_("Python interpreter"))
interpreter_layout = QVBoxLayout(interpreter_group)
# --- System terminal ---
external_group = QWidget(self)
external_layout = QGridLayout()
external_group.setLayout(external_layout)
self.interact_cb = QCheckBox(
_("Interact with the interpreter after execution")
)
external_layout.addWidget(self.interact_cb, 1, 0, 1, -1)
self.pclo_cb = QCheckBox(_("Interpreter options:"))
external_layout.addWidget(self.pclo_cb, 3, 0)
self.pclo_edit = QLineEdit(self)
self.pclo_cb.toggled.connect(self.pclo_edit.setEnabled)
self.pclo_edit.setEnabled(False)
self.pclo_edit.setToolTip(
_("<b>-u</b> is added to the other options you set here")
)
external_layout.addWidget(self.pclo_edit, 3, 1)
interpreter_layout.addWidget(external_group)
# --- General settings ----
common_group = QGroupBox(_("Bash/Batch script settings"))
common_layout = QGridLayout(common_group)
self.clo_cb = QCheckBox(_("Command line options:"))
common_layout.addWidget(self.clo_cb, 0, 0)
self.clo_edit = QLineEdit(self)
self.clo_edit.setMinimumWidth(300)
self.clo_cb.toggled.connect(self.clo_edit.setEnabled)
self.clo_edit.setEnabled(False)
common_layout.addWidget(self.clo_edit, 0, 1)
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(interpreter_group)
layout.addWidget(common_group)
layout.addStretch(100)
@staticmethod
def get_default_configuration() -> dict:
return {
'args_enabled': False,
'args': '',
'interact': True,
'python_args_enabled': False,
'python_args': '',
}
def set_configuration(self, config: dict):
interact = config['interact']
args_enabled = config['args_enabled']
args = config['args']
py_args_enabled = config['python_args_enabled']
py_args = config['python_args']
self.interact_cb.setChecked(interact)
self.pclo_cb.setChecked(args_enabled)
self.pclo_edit.setText(args)
self.clo_cb.setChecked(py_args_enabled)
self.clo_edit.setText(py_args)
def get_configuration(self) -> dict:
return {
'args_enabled': self.pclo_cb.isChecked(),
'args': self.pclo_edit.text(),
'interact': self.interact_cb.isChecked(),
'python_args_enabled': self.clo_cb.isChecked(),
'python_args': self.clo_edit.text(),
}
| ExternalTerminalPyConfiguration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 922470,
"end": 922858
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ReleaseAsset", graphql_name="node")
"""The item at the end of the edge."""
| ReleaseAssetEdge |
python | ray-project__ray | python/ray/tune/schedulers/pbt.py | {
"start": 8881,
"end": 42957
} | class ____(FIFOScheduler):
"""Implements the Population Based Training (PBT) algorithm.
https://www.deepmind.com/blog/population-based-training-of-neural-networks
PBT trains a group of models (or agents) in parallel. Periodically, poorly
performing models clone the state of the top performers, and a random
mutation is applied to their hyperparameters in the hopes of
outperforming the current top models.
Unlike other hyperparameter search algorithms, PBT mutates hyperparameters
during training time. This enables very fast hyperparameter discovery and
also automatically discovers good annealing schedules.
This Tune PBT implementation considers all trials added as part of the
PBT population. If the number of trials exceeds the cluster capacity,
they will be time-multiplexed as to balance training progress across the
population. To run multiple trials, use `tune.TuneConfig(num_samples=<int>)`.
In {LOG_DIR}/{MY_EXPERIMENT_NAME}/, all mutations are logged in
`pbt_global.txt` and individual policy perturbations are recorded
in pbt_policy_{i}.txt. Tune logs: [target trial tag, clone trial tag,
target trial iteration, clone trial iteration, old config, new config]
on each perturbation step.
Args:
time_attr: The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric: The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
perturbation_interval: Models will be considered for
perturbation at this interval of `time_attr`. Note that
perturbation incurs checkpoint overhead, so you shouldn't set this
to be too frequent.
burn_in_period: Models will not be considered for
perturbation before this interval of `time_attr` has passed. This
guarantees that models are trained for at least a certain amount
of time or timesteps before being perturbed.
hyperparam_mutations: Hyperparams to mutate. The format is
as follows: for each key, either a list, function,
or a tune search space object (tune.loguniform, tune.uniform,
etc.) can be provided. A list specifies an allowed set of
categorical values. A function or tune search space object
specifies the distribution of a continuous parameter. You must
use tune.choice, tune.uniform, tune.loguniform, etc.. Arbitrary
tune.sample_from objects are not supported.
A key can also hold a dict for nested hyperparameters.
You must specify at least one of `hyperparam_mutations` or
`custom_explore_fn`.
Tune will sample the search space provided by
`hyperparam_mutations` for the initial hyperparameter values if the
corresponding hyperparameters are not present in a trial's initial `config`.
quantile_fraction: Parameters are transferred from the top
`quantile_fraction` fraction of trials to the bottom
`quantile_fraction` fraction. Needs to be between 0 and 0.5.
Setting it to 0 essentially implies doing no exploitation at all.
resample_probability: The probability of resampling from the
original distribution when applying `hyperparam_mutations`. If not
resampled, the value will be perturbed by a factor chosen from
`perturbation_factors` if continuous, or changed to an adjacent value
if discrete.
perturbation_factors: Scaling factors to choose between when mutating
a continuous hyperparameter.
custom_explore_fn: You can also specify a custom exploration
function. This function is invoked as `f(config)` after built-in
perturbations from `hyperparam_mutations` are applied, and should
return `config` updated as needed. You must specify at least one of
`hyperparam_mutations` or `custom_explore_fn`.
log_config: Whether to log the ray config of each model to
local_dir at each exploit. Allows config schedule to be
reconstructed.
require_attrs: Whether to require time_attr and metric to appear
in result for every iteration. If True, error will be raised
if these values are not present in trial result.
synch: If False, will use asynchronous implementation of
PBT. Trial perturbations occur every perturbation_interval for each
trial independently. If True, will use synchronous implementation
of PBT. Perturbations will occur only after all trials are
synced at the same time_attr every perturbation_interval.
Defaults to False. See Appendix A.1 here
https://arxiv.org/pdf/1711.09846.pdf.
.. code-block:: python
import random
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
perturbation_interval=10, # every 10 `time_attr` units
# (training_iterations in this case)
hyperparam_mutations={
# Perturb factor1 by scaling it by 0.8 or 1.2. Resampling
# resets it to a value sampled from the lambda function.
"factor_1": lambda: random.uniform(0.0, 20.0),
# Alternatively, use tune search space primitives.
# The search space for factor_1 is equivalent to factor_2.
"factor_2": tune.uniform(0.0, 20.0),
# Perturb factor3 by changing it to an adjacent value, e.g.
# 10 -> 1 or 10 -> 100. Resampling will choose at random.
"factor_3": [1, 10, 100, 1000, 10000],
# Using tune.choice is NOT equivalent to the above.
# factor_4 is treated as a continuous hyperparameter.
"factor_4": tune.choice([1, 10, 100, 1000, 10000]),
})
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
scheduler=pbt,
num_samples=8,
),
)
tuner.fit()
"""
def __init__(
self,
time_attr: str = "time_total_s",
metric: Optional[str] = None,
mode: Optional[str] = None,
perturbation_interval: float = 60.0,
burn_in_period: float = 0.0,
hyperparam_mutations: Dict[
str, Union[dict, list, tuple, Callable, Domain]
] = None,
quantile_fraction: float = 0.25,
resample_probability: float = 0.25,
perturbation_factors: Tuple[float, float] = (1.2, 0.8),
custom_explore_fn: Optional[Callable] = None,
log_config: bool = True,
require_attrs: bool = True,
synch: bool = False,
):
hyperparam_mutations = hyperparam_mutations or {}
for value in hyperparam_mutations.values():
if not isinstance(value, (dict, list, tuple, Domain, Callable)):
raise TypeError(
"`hyperparam_mutation` values must be either "
"a List, Tuple, Dict, a tune search space object, or "
"a callable."
)
if isinstance(value, Function):
raise ValueError(
"arbitrary tune.sample_from objects are not "
"supported for `hyperparam_mutation` values."
"You must use other built in primitives like"
"tune.uniform, tune.loguniform, etc."
)
if not hyperparam_mutations and not custom_explore_fn:
raise TuneError(
"You must specify at least one of `hyperparam_mutations` "
"or `custom_explore_fn` to use PBT."
)
if quantile_fraction > 0.5 or quantile_fraction < 0:
raise ValueError(
"You must set `quantile_fraction` to a value between 0 and"
"0.5. Current value: '{}'".format(quantile_fraction)
)
if perturbation_interval <= 0:
raise ValueError(
"perturbation_interval must be a positive number greater "
"than 0. Current value: '{}'".format(perturbation_interval)
)
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super().__init__()
self._metric = metric
self._mode = mode
self._metric_op = None
if self._mode == "max":
self._metric_op = 1.0
elif self._mode == "min":
self._metric_op = -1.0
self._time_attr = time_attr
self._perturbation_interval = perturbation_interval
self._burn_in_period = burn_in_period
self._hyperparam_mutations = hyperparam_mutations
self._quantile_fraction = quantile_fraction
self._resample_probability = resample_probability
self._perturbation_factors = perturbation_factors
self._trial_state: dict[Trial, _PBTTrialState] = {}
self._custom_explore_fn = custom_explore_fn
self._log_config = log_config
self._require_attrs = require_attrs
self._synch = synch
self._next_perturbation_sync = max(
self._perturbation_interval,
self._burn_in_period,
)
# Metrics
self._num_checkpoints = 0
self._num_perturbations = 0
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], **spec
) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.0
elif self._mode == "min":
self._metric_op = -1.0
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, tune_controller: "TuneController", trial: Trial):
if tune_controller.search_alg is not None and isinstance(
tune_controller.search_alg, SearchGenerator
):
raise ValueError(
"Search algorithms cannot be used with {} "
"schedulers. Please remove {}.".format(
self.__class__.__name__, tune_controller.search_alg
)
)
if not self._metric or not self._metric_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.TuneConfig()`".format(
self.__class__.__name__, self._metric, self._mode
)
)
checkpoint_config = trial.run_metadata.checkpoint_manager.checkpoint_config
if (
checkpoint_config.num_to_keep
and checkpoint_config.num_to_keep <= 2
and log_once("pbt_num_to_keep")
):
warnings.warn(
"Using `CheckpointConfig.num_to_keep <= 2` with PBT can lead to "
"restoration problems when checkpoint are deleted too early for "
"other trials to exploit them. If this happens, increase the value "
"of `num_to_keep`."
)
self._trial_state[trial] = _PBTTrialState(trial)
for attr in self._hyperparam_mutations.keys():
if attr not in trial.config:
if log_once(attr + "-missing"):
logger.debug(
"Cannot find {} in config. Using search "
"space provided by hyperparam_mutations."
)
# Add attr to trial's config by sampling search space from
# hyperparam_mutations.
_fill_config(trial.config, attr, self._hyperparam_mutations[attr])
# Make sure this attribute is added to CLI output.
trial.evaluated_params[attr] = trial.config[attr]
def on_trial_result(
self, tune_controller: "TuneController", trial: Trial, result: Dict
) -> str:
if self._time_attr not in result:
time_missing_msg = (
"Cannot find time_attr {} "
"in trial result {}. Make sure that this "
"attribute is returned in the "
"results of your Trainable.".format(self._time_attr, result)
)
if self._require_attrs:
raise RuntimeError(
time_missing_msg
+ "If this error is expected, you can change this to "
"a warning message by "
"setting PBT(require_attrs=False)"
)
else:
if log_once("pbt-time_attr-error"):
logger.warning(time_missing_msg)
if self._metric not in result:
metric_missing_msg = (
"Cannot find metric {} in trial result {}. "
"Make sure that this attribute is returned "
"in the "
"results of your Trainable.".format(self._metric, result)
)
if self._require_attrs:
raise RuntimeError(
metric_missing_msg + "If this error is expected, "
"you can change this to a warning message by "
"setting PBT(require_attrs=False)"
)
else:
if log_once("pbt-metric-error"):
logger.warning(metric_missing_msg)
if self._metric not in result or self._time_attr not in result:
return TrialScheduler.CONTINUE
time = result[self._time_attr]
state = self._trial_state[trial]
# Continue training if burn-in period has not been reached, yet.
if time < self._burn_in_period:
logger.debug(f"Still in burn-in period: {time} < {self._burn_in_period}")
return TrialScheduler.CONTINUE
# Continue training if perturbation interval has not been reached, yet.
time_since_perturb = time - state.last_perturbation_time
if time_since_perturb < self._perturbation_interval:
logger.debug(
f"Perturbation interval not reached: "
f"{time_since_perturb} < {self._perturbation_interval}"
)
return TrialScheduler.CONTINUE # avoid checkpoint overhead
logger.debug(f"Updating trial state for trial {trial} at time {time}")
self._save_trial_state(state, time, result, trial)
if not self._synch:
state.last_perturbation_time = time
lower_quantile, upper_quantile = self._quantiles()
decision = TrialScheduler.CONTINUE
for other_trial in tune_controller.get_trials():
if other_trial.status in [Trial.PENDING, Trial.PAUSED]:
decision = TrialScheduler.PAUSE
break
self._checkpoint_or_exploit(
trial, tune_controller, upper_quantile, lower_quantile
)
return TrialScheduler.NOOP if trial.status == Trial.PAUSED else decision
else:
# Synchronous mode.
if any(
self._trial_state[t].last_train_time < self._next_perturbation_sync
and t != trial
for t in tune_controller.get_live_trials()
):
logger.debug(
f"Sync: Other trials are not at perturb time, yet. "
f"Pausing trial {trial} to wait."
)
else:
# All trials are synced at the same timestep.
logger.debug("Sync: All trials are at perturb time.")
lower_quantile, upper_quantile = self._quantiles()
all_trials = tune_controller.get_trials()
not_in_quantile = []
for t in all_trials:
if t not in lower_quantile and t not in upper_quantile:
not_in_quantile.append(t)
logger.debug(
"Trial statistics\n"
f"Upper quantile: {upper_quantile}\n"
f"Lower quantile: {lower_quantile}\n"
f"Not in quantile: {not_in_quantile}"
)
# Move upper quantile trials to beginning and lower quantile
# to end. This ensures that checkpointing of strong trials
# occurs before exploiting of weaker ones.
all_trials = upper_quantile + not_in_quantile + lower_quantile
for t in all_trials:
logger.debug(f"Perturbing trial {t}")
self._trial_state[t].last_perturbation_time = time
self._checkpoint_or_exploit(
t, tune_controller, upper_quantile, lower_quantile
)
all_train_times = [
self._trial_state[t].last_train_time
for t in tune_controller.get_trials()
]
max_last_train_time = max(all_train_times)
self._next_perturbation_sync = max(
self._next_perturbation_sync + self._perturbation_interval,
max_last_train_time,
)
logger.debug(f"Next perturb at time {self._next_perturbation_sync}")
# In sync mode we should pause all trials once result comes in.
# Once a perturbation step happens for all trials, they should
# still all be paused.
# choose_trial_to_run will then pick the next trial to run out of
# the paused trials.
return (
TrialScheduler.NOOP
if trial.status == Trial.PAUSED
else TrialScheduler.PAUSE
)
def _save_trial_state(
self, state: _PBTTrialState, time: int, result: Dict, trial: Trial
):
"""Saves necessary trial information when result is received.
Args:
state: The state object for the trial.
time: The current timestep of the trial.
result: The trial's result dictionary.
trial: The trial object.
"""
# This trial has reached its perturbation interval.
# Record new state in the state object.
score = self._metric_op * result[self._metric]
state.last_score = score
state.last_train_time = time
state.last_result = result
return score
def _checkpoint_or_exploit(
self,
trial: Trial,
tune_controller: "TuneController",
upper_quantile: List[Trial],
lower_quantile: List[Trial],
):
"""Checkpoint if in upper quantile, exploits if in lower."""
state = self._trial_state[trial]
if trial in upper_quantile:
# The trial last result is only updated after the scheduler
# callback. So, we override with the current result.
logger.debug(f"Trial {trial} is in upper quantile. Saving checkpoint.")
if trial.status == Trial.PAUSED:
if trial.temporary_state.saving_to and isinstance(
trial.temporary_state.saving_to, _FutureTrainingResult
):
logger.debug(f"Trial {trial} is still saving.")
state.last_checkpoint = trial.temporary_state.saving_to
else:
# Paused trial will always have an in-memory checkpoint.
logger.debug(
f"Trial {trial} is paused. Use last available "
f"checkpoint {trial.checkpoint}."
)
state.last_checkpoint = trial.checkpoint
else:
logger.debug(f"Instructing {trial} to save.")
state.last_checkpoint = tune_controller._schedule_trial_save(
trial, result=state.last_result
)
self._num_checkpoints += 1
else:
state.last_checkpoint = None # not a top trial
if trial in lower_quantile:
trial_to_clone = random.choice(upper_quantile)
assert trial is not trial_to_clone
clone_state = self._trial_state[trial_to_clone]
last_checkpoint = clone_state.last_checkpoint
logger.debug(
f"Trial {trial} is in lower quantile. "
f"Exploiting trial {trial_to_clone}."
)
if isinstance(last_checkpoint, _FutureTrainingResult):
training_result = last_checkpoint.resolve()
if training_result:
clone_state.last_result = training_result.metrics
clone_state.last_checkpoint = training_result.checkpoint
last_checkpoint = clone_state.last_checkpoint
else:
logger.debug(
"PBT-scheduled checkpoint save resolved to None. Trial "
f"{trial_to_clone} didn't save any checkpoint before "
f"and can't be exploited."
)
last_checkpoint = None
if not last_checkpoint:
logger.info(
f"[pbt]: no checkpoint for trial {trial_to_clone}."
f" Skip exploit for Trial {trial}"
)
return
self._exploit(tune_controller, trial, trial_to_clone)
def _log_config_on_step(
self,
trial_state: _PBTTrialState,
new_state: _PBTTrialState,
trial: Trial,
trial_to_clone: Trial,
new_config: Dict,
):
"""Logs transition during exploit/exploit step.
For each step, logs: [target trial tag, clone trial tag, target trial
iteration, clone trial iteration, old config, new config].
"""
trial_name, trial_to_clone_name = (trial_state.orig_tag, new_state.orig_tag)
trial_id = trial.trial_id
trial_to_clone_id = trial_to_clone.trial_id
trial_path = os.path.join(
trial.local_experiment_path, "pbt_policy_" + trial_id + ".txt"
)
trial_to_clone_path = os.path.join(
trial_to_clone.local_dir, "pbt_policy_" + trial_to_clone_id + ".txt"
)
policy = [
trial_name,
trial_to_clone_name,
trial.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.config,
new_config,
]
# Log to global file.
with open(
os.path.join(trial.local_experiment_path, "pbt_global.txt"), "a+"
) as f:
print(json.dumps(policy, cls=SafeFallbackEncoder), file=f)
# Overwrite state in target trial from trial_to_clone.
if os.path.exists(trial_to_clone_path):
shutil.copyfile(trial_to_clone_path, trial_path)
# Log new exploit in target trial log.
with open(trial_path, "a+") as f:
f.write(json.dumps(policy, cls=SafeFallbackEncoder) + "\n")
def _get_new_config(self, trial: Trial, trial_to_clone: Trial) -> Tuple[Dict, Dict]:
"""Gets new config for trial by exploring trial_to_clone's config.
Args:
trial: The current trial that decided to exploit trial_to_clone.
trial_to_clone: The top-performing trial with a hyperparameter config
that the current trial will explore by perturbing.
Returns:
new_config: New hyperparameter configuration (after random mutations).
operations: Map of hyperparams -> strings describing mutation operations
performed
"""
return _explore(
trial_to_clone.config,
self._hyperparam_mutations,
self._resample_probability,
self._perturbation_factors,
self._custom_explore_fn,
)
def _summarize_hyperparam_changes(
self,
old_params: Dict,
new_params: Dict,
operations: Optional[Dict] = None,
prefix: str = "",
) -> str:
"""Generates a summary of hyperparameter changes from a PBT "explore" step.
Example:
Given the following hyperparam_mutations:
hyperparam_mutations = {
"a": tune.uniform(0, 1),
"b": list(range(5)),
"c": {
"d": tune.uniform(2, 3),
"e": {"f": [-1, 0, 1]},
},
}
This is an example summary output of the operations performed on old_params
to get new_params:
a : 0.5 --- (* 0.8) --> 0.4
b : 2 --- (resample) --> 4
c :
d : 2.5 --- (* 1.2) --> 3.0
e :
f : 0 --- (shift right) --> 1
The summary shows the old and new hyperparameter values, with the operation
used to perturb labeled in between.
If the operation for a certain hyperparameter is not provided, then the summary
will just contain arrows without a label. (ex: a : 0.5 -----> 0.4)
Args:
old_params: Old values of hyperparameters that are perturbed to generate
the new config
new_params: The newly generated hyperparameter config from PBT exploration
operations: Map of hyperparams -> string descriptors the operations
performed to generate the values in `new_params`
prefix: Helper argument to format nested dict hyperparam configs
Returns:
summary_str: The hyperparameter change summary to print/log.
"""
summary_str = ""
if not old_params:
return summary_str
for param_name in old_params:
old_val = old_params[param_name]
assert param_name in new_params, (
"`old_params` and `new_params` "
f"must both contain the key: '{param_name}'\n"
f"old_params.keys() = {old_params.keys()}\n"
f"new_params.keys() = {new_params.keys()}"
)
new_val = new_params[param_name]
summary_str += f"{prefix}{param_name} : "
if isinstance(old_val, Dict):
# Handle nested hyperparameters by recursively summarizing
summary_str += "\n"
nested_operations = operations.get(param_name, {})
summary_str += self._summarize_hyperparam_changes(
old_val,
new_val,
operations=nested_operations,
prefix=prefix + " " * 4,
)
else:
op = operations.get(param_name, None)
if not op:
arrow = "----->"
else:
arrow = f"--- ({op}) -->"
summary_str += f"{old_val} {arrow} {new_val}\n"
return summary_str
def _exploit(
self,
tune_controller: "TuneController",
trial: Trial,
trial_to_clone: Trial,
):
"""Transfers perturbed state from trial_to_clone -> trial.
If specified, also logs the updated hyperparam state.
"""
trial_state = self._trial_state[trial]
new_state = self._trial_state[trial_to_clone]
class_name = self.__class__.__name__
logger.info(
f"\n\n[{class_name}] [Exploit] Cloning trial "
"{} (score = {:4f}) into trial {} (score = {:4f})\n".format(
trial_to_clone.trial_id,
new_state.last_score,
trial.trial_id,
trial_state.last_score,
)
)
new_config, operations = self._get_new_config(trial, trial_to_clone)
# Only log mutated hyperparameters and not entire config.
old_params = _filter_mutated_params_from_config(
trial_to_clone.config, self._hyperparam_mutations
)
new_params = _filter_mutated_params_from_config(
new_config, self._hyperparam_mutations
)
explore_info_str = (
f"\n\n[{class_name}] [Explore] Perturbed the hyperparameter config of trial"
f"{trial.trial_id}:\n"
)
explore_info_str += (
self._summarize_hyperparam_changes(old_params, new_params, operations)
or "No hyperparameters mutated."
)
logger.info(explore_info_str)
if self._log_config:
self._log_config_on_step(
trial_state, new_state, trial, trial_to_clone, new_config
)
new_tag = _make_experiment_tag(
trial_state.orig_tag, new_config, self._hyperparam_mutations
)
if trial.status == Trial.PAUSED:
# If trial is paused we update it with a new checkpoint.
# When the trial is started again, the new checkpoint is used.
if not self._synch:
raise TuneError(
"Trials should be paused here only if in "
"synchronous mode. If you encounter this error"
" please raise an issue on Ray Github."
)
else:
tune_controller.pause_trial(trial, should_checkpoint=False)
trial.set_experiment_tag(new_tag)
# Clone hyperparameters from the `trial_to_clone`
trial.set_config(new_config)
# Resume training from a shallow copy of `trial_to_clone`'s latest
# checkpoint
checkpoint_to_exploit: Checkpoint = copy.copy(new_state.last_checkpoint)
trial.run_metadata.checkpoint_manager._latest_checkpoint_result = (
_TrainingResult(
checkpoint=checkpoint_to_exploit, metrics=new_state.last_result
)
)
self._num_perturbations += 1
# Transfer over the last perturbation time as well
trial_state.last_perturbation_time = new_state.last_perturbation_time
trial_state.last_train_time = new_state.last_train_time
def _quantiles(self) -> Tuple[List[Trial], List[Trial]]:
"""Returns trials in the lower and upper `quantile` of the population.
If there is not enough data to compute this, returns empty lists.
"""
trials = []
for trial, state in self._trial_state.items():
logger.debug("Trial {}, state {}".format(trial, state))
if trial.is_finished():
logger.debug("Trial {} is finished".format(trial))
if state.last_score is not None and not trial.is_finished():
trials.append(trial)
# last_score is by construction never None
trials.sort(key=lambda t: self._trial_state[t].last_score) # type: ignore[arg-type,return-value]
if len(trials) <= 1:
return [], []
else:
num_trials_in_quantile = int(
math.ceil(len(trials) * self._quantile_fraction)
)
if num_trials_in_quantile > len(trials) / 2:
num_trials_in_quantile = int(math.floor(len(trials) / 2))
return (trials[:num_trials_in_quantile], trials[-num_trials_in_quantile:])
def choose_trial_to_run(self, tune_controller: "TuneController") -> Optional[Trial]:
"""Ensures all trials get fair share of time (as defined by time_attr).
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
"""
candidates = []
for trial in tune_controller.get_trials():
if trial.status in [
Trial.PENDING,
Trial.PAUSED,
]:
if not self._synch:
candidates.append(trial)
elif (
self._trial_state[trial].last_train_time
< self._next_perturbation_sync
):
candidates.append(trial)
candidates.sort(key=lambda trial: self._trial_state[trial].last_train_time)
return candidates[0] if candidates else None
# Unit test only. TODO(xwjiang): Remove test-specific APIs.
def reset_stats(self):
self._num_perturbations = 0
self._num_checkpoints = 0
# Unit test only. TODO(xwjiang): Remove test-specific APIs.
def last_scores(self, trials: List[Trial]) -> List[float]:
scores = []
for trial in trials:
state = self._trial_state[trial]
if state.last_score is not None and not trial.is_finished():
scores.append(state.last_score)
return scores
def debug_string(self) -> str:
return "PopulationBasedTraining: {} checkpoints, {} perturbs".format(
self._num_checkpoints, self._num_perturbations
)
@PublicAPI
| PopulationBasedTraining |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/cli/parser.py | {
"start": 5201,
"end": 10811
} | class ____(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
def __init__(
self,
*args: Any,
name: str,
isolated: bool = False,
**kwargs: Any,
) -> None:
self.name = name
self.config = Configuration(isolated)
assert self.name
super().__init__(*args, **kwargs)
def check_default(self, option: optparse.Option, key: str, val: Any) -> Any:
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print(f"An error occurred during configuration: {exc}")
sys.exit(3)
def _get_ordered_configuration_items(
self,
) -> Generator[Tuple[str, Any], None, None]:
# Configuration gives keys in an unordered manner. Order them.
override_order = ["global", self.name, ":env:"]
# Pool the options into different groups
section_items: Dict[str, List[Tuple[str, Any]]] = {
name: [] for name in override_order
}
for section_key, val in self.config.items():
# ignore empty values
if not val:
logger.debug(
"Ignoring configuration key '%s' as it's value is empty.",
section_key,
)
continue
section, key = section_key.split(".", 1)
if section in override_order:
section_items[section].append((key, val))
# Yield each group in their override order
for section in override_order:
for key, val in section_items[section]:
yield key, val
def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]:
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in self._get_ordered_configuration_items():
# '--' because configuration supports only long names
option = self.get_option("--" + key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
assert option.dest is not None
if option.action in ("store_true", "store_false"):
try:
val = strtobool(val)
except ValueError:
self.error(
f"{val} is not a valid value for {key} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead."
)
elif option.action == "count":
with suppress(ValueError):
val = strtobool(val)
with suppress(ValueError):
val = int(val)
if not isinstance(val, int) or val < 0:
self.error(
f"{val} is not a valid value for {key} option, "
"please instead specify either a non-negative integer "
"or a boolean value like yes/no or false/true "
"which is equivalent to 1/0."
)
elif option.action == "append":
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == "callback":
assert option.callback is not None
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def get_default_values(self) -> optparse.Values:
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
# Load the configuration, or error out in case of an error
try:
self.config.load()
except ConfigurationError as err:
self.exit(UNKNOWN_ERROR, str(err))
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
assert option.dest is not None
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg: str) -> None:
self.print_usage(sys.stderr)
self.exit(UNKNOWN_ERROR, f"{msg}\n")
| ConfigOptionParser |
python | geekcomputers__Python | encrypter-decrypter-gui.py | {
"start": 1096,
"end": 8645
} | class ____:
def __init__(self, parent):
self.parent = parent
# ========== Data Key ==========
self.data_dic = {
"A": "Q",
"B": "W",
"C": "E",
"D": "R",
"E": "T",
"F": "Y",
"G": "U",
"H": "I",
"I": "O",
"J": "P",
"K": "A",
"L": "S",
"M": "D",
"N": "F",
"O": "G",
"P": "H",
"Q": "J",
"R": "K",
"S": "L",
"T": "Z",
"U": "X",
"V": "C",
"W": "V",
"X": "B",
"Y": "N",
"Z": "M",
"a": "q",
"b": "w",
"c": "e",
"d": "r",
"e": "t",
"f": "y",
"g": "u",
"h": "i",
"i": "o",
"j": "p",
"k": "a",
"l": "s",
"m": "d",
"n": "f",
"o": "g",
"p": "h",
"q": "j",
"r": "k",
"s": "l",
"t": "z",
"u": "x",
"v": "c",
"w": "v",
"x": "b",
"y": "n",
"z": "m",
"1": "_",
"2": "-",
"3": "|",
"4": "?",
"5": "*",
"6": "!",
"7": "@",
"8": "#",
"9": "$",
"0": "~",
".": "/",
",": "+",
" ": "&",
}
# ==============================
# ----- Notebook With Two Pages -----
self.nb = ttk.Notebook(self.parent)
self.page1 = ttk.Frame(self.nb)
self.page2 = ttk.Frame(self.nb)
self.nb.add(self.page1, text="Encrypt The Words")
self.nb.add(self.page2, text="Decrypt The Words")
self.nb.pack(expand=True, fill="both")
# ----- LabelFrames -----
self.page1_main_label = ttk.LabelFrame(
self.page1, text="Encrypt Any Text"
) # <----- Page1 LabelFrame1
self.page1_main_label.grid(row=0, column=0, pady=20, padx=2, ipadx=20)
self.page1_output_label = ttk.LabelFrame(self.page1, text="Decrypted Text")
self.page1_output_label.grid(row=1, column=0, pady=10, padx=2)
self.page2_main_label = ttk.LabelFrame(
self.page2, text="Decrypt Any Text"
) # <----- Page1 LabelFrame1
self.page2_main_label.grid(row=0, column=0, pady=20, padx=2, ipadx=20)
self.page2_output_label = ttk.LabelFrame(self.page2, text="Real Text")
self.page2_output_label.grid(row=1, column=0, pady=10, padx=2)
# <---Scrolled Text Global
self.decrypted_text_box = ScrolledText(
self.page1_output_label, width=30, height=5, state="normal"
)
self.decrypted_text_box.grid(row=1, column=0, padx=2, pady=10)
self.text_box = ScrolledText(
self.page2_output_label, width=30, height=5, state="normal"
)
self.text_box.grid(row=1, column=0, padx=2, pady=10)
# ----- Variables -----
self.user_text = tk.StringVar()
self.decrypted_user_text = tk.StringVar()
self.user_text2 = tk.StringVar()
self.real_text = tk.StringVar()
# ----- Getting Inside Page1 -----
self.page1_inside()
self.page2_inside()
def page1_inside(self):
style = ttk.Style()
user_text_label = ttk.Label(
self.page1_main_label, text="Enter Your Text Here : ", font=("", 14)
)
user_text_label.grid(row=0, column=0, pady=10)
user_entry_box = ttk.Entry(
self.page1_main_label, width=35, textvariable=self.user_text
)
user_entry_box.grid(row=1, column=0)
style.configure(
"TButton",
foreground="black",
background="white",
relief="groove",
font=("", 12),
)
encrypt_btn = ttk.Button(
self.page1_main_label,
text="Encrypt Text",
style="TButton",
command=self.encrypt_now,
)
encrypt_btn.grid(row=2, column=0, pady=15)
# ---------- Page1 Button Binding Function ----------
def encrypt_now(self):
user_text = self.user_text.get()
if user_text == "":
showerror(
"Nothing Found", "Please Enter Something In Entry Box To Encrypt...!"
)
return
else:
self.decrypted_user_text = self.backend_work("Encrypt", user_text)
self.decrypted_text_box.insert(tk.INSERT, self.decrypted_user_text, tk.END)
# --------------------------------------------------Binding Functions of Page1 End Here
# Page2 ------------------>
def page2_inside(self):
style = ttk.Style()
user_text_label = ttk.Label(
self.page2_main_label, text="Enter Decrypted Text Here : ", font=("", 14)
)
user_text_label.grid(row=0, column=0, pady=10)
user_entry_box = ttk.Entry(
self.page2_main_label, width=35, textvariable=self.user_text2
)
user_entry_box.grid(row=1, column=0)
style.configure(
"TButton",
foreground="black",
background="white",
relief="groove",
font=("", 12),
)
encrypt_btn = ttk.Button(
self.page2_main_label,
text="Decrypt Text",
style="TButton",
command=self.decrypt_now,
)
encrypt_btn.grid(row=2, column=0, pady=15)
# ---------- Page1 Button Binding Function ----------
def decrypt_now(self):
user_text = self.user_text2.get()
if user_text == "":
showerror(
"Nothing Found", "Please Enter Something In Entry Box To Encrypt...!"
)
return
else:
self.real_text = self.backend_work("Decrypt", user_text)
self.text_box.insert(tk.INSERT, self.real_text, tk.END)
def backend_work(self, todo, text_coming):
text_to_return = ""
if todo == "Encrypt":
try:
text_coming = str(
text_coming
) # <----- Lowering the letters as dic in lower letter
for word in text_coming:
for key, value in self.data_dic.items():
if word == key:
# print(word, " : ", key)
text_to_return += value
except ValueError:
showerror("Unknown", "Something Went Wrong, Please Restart Application")
return text_to_return
elif todo == "Decrypt":
try:
text_coming = str(text_coming)
for word in text_coming:
for key, value in self.data_dic.items():
if word == value:
text_to_return += key
except ValueError:
showerror("Unknown", "Something Went Wrong, Please Restart Application")
return text_to_return
else:
showerror("No Function", "Function Could not get what to do...!")
# =============================================================
# ==================== Classes End Here ... ! =================
if __name__ == "__main__":
run = Main()
Notebook(run)
run.mainloop()
| Notebook |
python | kamyu104__LeetCode-Solutions | Python/minimum-falling-path-sum.py | {
"start": 31,
"end": 326
} | class ____(object):
def minFallingPathSum(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
for i in xrange(1, len(A)):
for j in xrange(len(A[i])):
A[i][j] += min(A[i-1][max(j-1, 0):j+2])
return min(A[-1])
| Solution |
python | EpistasisLab__tpot | tpot/search_spaces/pipelines/wrapper.py | {
"start": 5024,
"end": 6169
} | class ____(SearchSpace):
def __init__(
self,
method: type,
space: ConfigurationSpace,
estimator_search_space: SearchSpace,
hyperparameter_parser: callable = None,
wrapped_param_name: str = None
) -> None:
"""
This search space is for wrapping a sklearn estimator with a method that takes another estimator and hyperparameters as arguments.
For example, this can be used with sklearn.ensemble.BaggingClassifier or sklearn.ensemble.AdaBoostClassifier.
"""
self.estimator_search_space = estimator_search_space
self.method = method
self.space = space
self.hyperparameter_parser=hyperparameter_parser
self.wrapped_param_name = wrapped_param_name
def generate(self, rng=None):
rng = np.random.default_rng(rng)
return WrapperPipelineIndividual(method=self.method, space=self.space, estimator_search_space=self.estimator_search_space, hyperparameter_parser=self.hyperparameter_parser, wrapped_param_name=self.wrapped_param_name, rng=rng) | WrapperPipeline |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/custom_param_types.py | {
"start": 8878,
"end": 9513
} | class ____(BetterChoice):
"""Extends choice with dynamic version number."""
def __init__(self, *args):
super().__init__(*args)
self.all_choices = [*self.choices, "<airflow_version>", "<owner/repo:branch>", "<pr_number>"]
def convert(self, value, param, ctx):
if re.match(r"^\d*\.\d*\.\d*\S*$", value):
return value
if re.match(GITHUB_REPO_BRANCH_PATTERN, value):
return value
# Check if it's a PR number (digits only)
if re.match(PR_NUMBER_PATTERN, value):
return value
return super().convert(value, param, ctx)
| UseAirflowVersionType |
python | ipython__ipython | IPython/core/alias.py | {
"start": 4859,
"end": 4907
} | class ____(AliasError):
pass
| InvalidAliasError |
python | joke2k__faker | faker/providers/address/fr_FR/__init__.py | {
"start": 71,
"end": 11925
} | class ____(AddressProvider):
city_suffixes = (
"Ville",
"Bourg",
"-les-Bains",
"-sur-Mer",
"-la-Forêt",
"boeuf",
"nec",
"dan",
)
city_prefixes = ("Saint", "Sainte")
street_prefixes = ("rue", "rue", "chemin", "avenue", "boulevard")
city_formats = (
"{{city_prefix}} {{first_name}}",
"{{city_prefix}} {{first_name}}{{city_suffix}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}-sur-{{last_name}}",
)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{first_name}} {{last_name}}",
"{{street_prefix}} de {{last_name}}",
)
street_address_formats = (
"{{street_name}}",
"{{building_number}}, {{street_name}}",
"{{building_number}}, {{street_name}}",
"{{building_number}}, {{street_name}}",
"{{building_number}}, {{street_name}}",
"{{building_number}}, {{street_name}}",
)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("%", "%#", "%#", "%#", "%##")
countries = (
"Afghanistan",
"Afrique du sud",
"Albanie",
"Algérie",
"Allemagne",
"Andorre",
"Angola",
"Anguilla",
"Antarctique",
"Antigua et Barbuda",
"Antilles néerlandaises",
"Arabie saoudite",
"Argentine",
"Arménie",
"Aruba",
"Australie",
"Autriche",
"Azerbaïdjan",
"Bahamas",
"Bahrain",
"Bangladesh",
"Belgique",
"Belize",
"Benin",
"Bermudes (Les)",
"Bhoutan",
"Biélorussie",
"Bolivie",
"Bosnie-Herzégovine",
"Botswana",
"Bouvet (Îles)",
"Brunei",
"Brésil",
"Bulgarie",
"Burkina Faso",
"Burundi",
"Cambodge",
"Cameroun",
"Canada",
"Cap Vert",
"Cayman (Îles)",
"Chili",
"Chine (Rép. pop.)",
"Christmas (Île)",
"Chypre",
"Cocos (Îles)",
"Colombie",
"Comores",
"Cook (Îles)",
"Corée du Nord",
"Corée, Sud",
"Costa Rica",
"Croatie",
"Cuba",
"Côte d'Ivoire",
"Danemark",
"Djibouti",
"Dominique",
"Égypte",
"El Salvador",
"Émirats arabes unis",
"Équateur",
"Érythrée",
"Espagne",
"Estonie",
"États-Unis",
"Ethiopie",
"Falkland (Île)",
"Fidji (République des)",
"Finlande",
"France",
"Féroé (Îles)",
"Gabon",
"Gambie",
"Ghana",
"Gibraltar",
"Grenade",
"Groenland",
"Grèce",
"Guadeloupe",
"Guam",
"Guatemala",
"Guinée",
"Guinée Equatoriale",
"Guinée-Bissau",
"Guyane",
"Guyane française",
"Géorgie",
"Géorgie du Sud et Sandwich du Sud (Îles)",
"Haïti",
"Heard et McDonald (Îles)",
"Honduras",
"Hong Kong",
"Hongrie",
"Îles Mineures Éloignées des États-Unis",
"Inde",
"Indonésie",
"Irak",
"Iran",
"Irlande",
"Islande",
"Israël",
"Italie",
"Jamaïque",
"Japon",
"Jordanie",
"Kazakhstan",
"Kenya",
"Kirghizistan",
"Kiribati",
"Koweit",
"La Barbad",
"Laos",
"Lesotho",
"Lettonie",
"Liban",
"Libye",
"Libéria",
"Liechtenstein",
"Lithuanie",
"Luxembourg",
"Macau",
"Macédoine du Nord",
"Madagascar",
"Malaisie",
"Malawi",
"Maldives (Îles)",
"Mali",
"Malte",
"Mariannes du Nord (Îles)",
"Maroc",
"Marshall (Îles)",
"Martinique",
"Maurice",
"Mauritanie",
"Mayotte",
"Mexique",
"Micronésie (États fédérés de)",
"Moldavie",
"Monaco",
"Mongolie",
"Montserrat",
"Mozambique",
"Myanmar",
"Namibie",
"Nauru",
"Nepal",
"Nicaragua",
"Niger",
"Nigeria",
"Niue",
"Norfolk (Îles)",
"Norvège",
"Nouvelle Calédonie",
"Nouvelle-Zélande",
"Oman",
"Ouganda",
"Ouzbékistan",
"Pakistan",
"Palau",
"Panama",
"Papouasie-Nouvelle-Guinée",
"Paraguay",
"Pays-Bas",
"Philippines",
"Pitcairn (Îles)",
"Pologne",
"Polynésie française",
"Porto Rico",
"Portugal",
"Pérou",
"Qatar",
"Roumanie",
"Royaume-Uni",
"Russie",
"Rwanda",
"Rép. Dém. du Congo",
"République centrafricaine",
"République Dominicaine",
"République tchèque",
"Réunion (La)",
"Sahara Occidental",
"Saint Pierre et Miquelon",
"Saint Vincent et les Grenadines",
"Saint-Kitts et Nevis",
"Saint-Marin (Rép. de)",
"Sainte Hélène",
"Sainte Lucie",
"Samoa",
"Samoa",
"Seychelles",
"Sierra Leone",
"Singapour",
"Slovaquie",
"Slovénie",
"Somalie",
"Soudan",
"Sri Lanka",
"Suisse",
"Suriname",
"Suède",
"Svalbard et Jan Mayen (Îles)",
"Swaziland",
"Syrie",
"São Tomé et Príncipe (Rép.)",
"Sénégal",
"Tadjikistan",
"Taiwan",
"Tanzanie",
"Tchad",
"Territoire britannique de l'océan Indien",
"Territoires français du sud",
"Thailande",
"Timor",
"Togo",
"Tokelau",
"Tonga",
"Trinité et Tobago",
"Tunisie",
"Turkménistan",
"Turks et Caïques (Îles)",
"Turquie",
"Tuvalu",
"Ukraine",
"Uruguay",
"Vanuatu",
"Vatican (Etat du)",
"Venezuela",
"Vierges (Îles)",
"Vierges britanniques (Îles)",
"Vietnam",
"Wallis et Futuna (Îles)",
"Yemen",
"Yougoslavie",
"Zambie",
"Zaïre",
"Zimbabwe",
)
regions = (
"Alsace",
"Aquitaine",
"Auvergne",
"Bourgogne",
"Bretagne",
"Centre",
"Champagne-Ardenne",
"Corse",
"Franche-Comté",
"Île-de-France",
"Languedoc-Roussillon",
"Limousin",
"Lorraine",
"Midi-Pyrénées",
"Nord-Pas-de-Calais",
"Basse-Normandie",
"Haute-Normandie",
"Pays-de-Loire",
"Picardie",
"Poitou-Charentes",
"Province-Alpes-Côte d'Azur",
"Rhone-Alpes",
"Guadeloupe",
"Martinique",
"Guyane",
"Réunion",
"Saint-Pierre-et-Miquelon",
"Mayotte",
"Saint-Barthélémy",
"Saint-Martin",
"Wallis-et-Futuna",
"Polynésie française",
"Nouvelle-Calédonie",
)
departments = (
("01", "Ain"),
("02", "Aisne"),
("03", "Allier"),
("04", "Alpes-de-Haute-Provence"),
("05", "Hautes-Alpes"),
("06", "Alpes-Maritimes"),
("07", "Ardèche"),
("08", "Ardennes"),
("09", "Ariège"),
("10", "Aube"),
("11", "Aude"),
("12", "Aveyron"),
("13", "Bouches-du-Rhône"),
("14", "Calvados"),
("15", "Cantal"),
("16", "Charente"),
("17", "Charente-Maritime"),
("18", "Cher"),
("19", "Corrèze"),
("2A", "Corse-du-Sud"),
("2B", "Haute-Corse"),
("21", "Côte-d'Or"),
("22", "Côtes-d'Armor"),
("23", "Creuse"),
("24", "Dordogne"),
("25", "Doubs"),
("26", "Drôme"),
("27", "Eure"),
("28", "Eure-et-Loir"),
("29", "Finistère"),
("30", "Gard"),
("31", "Haute-Garonne"),
("32", "Gers"),
("33", "Gironde"),
("34", "Hérault"),
("35", "Ille-et-Vilaine"),
("36", "Indre"),
("37", "Indre-et-Loire"),
("38", "Isère"),
("39", "Jura"),
("40", "Landes"),
("41", "Loir-et-Cher"),
("42", "Loire"),
("43", "Haute-Loire"),
("44", "Loire-Atlantique"),
("45", "Loiret"),
("46", "Lot"),
("47", "Lot-et-Garonne"),
("48", "Lozère"),
("49", "Maine-et-Loire"),
("50", "Manche"),
("51", "Marne"),
("52", "Haute-Marne"),
("53", "Mayenne"),
("54", "Meurthe-et-Moselle"),
("55", "Meuse"),
("56", "Morbihan"),
("57", "Moselle"),
("58", "Nièvre"),
("59", "Nord"),
("60", "Oise"),
("61", "Orne"),
("62", "Pas-de-Calais"),
("63", "Puy-de-Dôme"),
("64", "Pyrénées-Atlantiques"),
("65", "Hautes-Pyrénées"),
("66", "Pyrénées-Orientales"),
("67", "Bas-Rhin"),
("68", "Haut-Rhin"),
("69", "Rhône"),
("70", "Haute-Saône"),
("71", "Saône-et-Loire"),
("72", "Sarthe"),
("73", "Savoie"),
("74", "Haute-Savoie"),
("75", "Paris"),
("76", "Seine-Maritime"),
("77", "Seine-et-Marne"),
("78", "Yvelines"),
("79", "Deux-Sèvres"),
("80", "Somme"),
("81", "Tarn"),
("82", "Tarn-et-Garonne"),
("83", "Var"),
("84", "Vaucluse"),
("85", "Vendée"),
("86", "Vienne"),
("87", "Haute-Vienne"),
("88", "Vosges"),
("89", "Yonne"),
("90", "Territoire de Belfort"),
("91", "Essonne"),
("92", "Hauts-de-Seine"),
("93", "Seine-Saint-Denis"),
("94", "Val-de-Marne"),
("95", "Val-d'Oise"),
("971", "Guadeloupe"),
("972", "Martinique"),
("973", "Guyane"),
("974", "La Réunion"),
("976", "Mayotte"),
)
def street_prefix(self) -> str:
"""
:example: 'rue'
"""
return self.random_element(self.street_prefixes)
def city_prefix(self) -> str:
"""
:example: 'rue'
"""
return self.random_element(self.city_prefixes)
def administrative_unit(self) -> str:
"""
:example: 'Guadeloupe'
"""
return self.random_element(self.regions)
region = administrative_unit
def department(self) -> Tuple[str, str]:
"""
Randomly returns a french department ('departmentNumber' , 'departmentName').
:example: ('2B' . 'Haute-Corse')
"""
return self.random_element(self.departments)
def department_name(self) -> str:
"""
Randomly returns a french department name.
:example: 'Ardèche'
"""
return self.department()[1]
def department_number(self) -> str:
"""
Randomly returns a french department number.
:example: '59'
"""
return self.department()[0]
def postcode(self) -> str:
"""
Randomly returns a postcode generated from existing french department number.
exemple: '33260'
"""
department = self.department_number()
if department in ["2A", "2B"]:
department = "20"
return f"{department}{self.random_number(digits=5 - len(department), fix_len=True)}"
| Provider |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 644726,
"end": 645207
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("team", "team_name", "team_resource_path", "team_url")
team = sgqlc.types.Field("Team", graphql_name="team")
team_name = sgqlc.types.Field(String, graphql_name="teamName")
team_resource_path = sgqlc.types.Field(URI, graphql_name="teamResourcePath")
team_url = sgqlc.types.Field(URI, graphql_name="teamUrl")
| TeamAuditEntryData |
python | huggingface__transformers | src/transformers/models/swin2sr/modeling_swin2sr.py | {
"start": 4843,
"end": 6164
} | class ____(nn.Module):
def __init__(self, config, normalize_patches=True):
super().__init__()
num_channels = config.embed_dim
image_size, patch_size = config.image_size, config.patch_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
patches_resolution = [image_size[0] // patch_size[0], image_size[1] // patch_size[1]]
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.projection = nn.Conv2d(num_channels, config.embed_dim, kernel_size=patch_size, stride=patch_size)
self.layernorm = nn.LayerNorm(config.embed_dim) if normalize_patches else None
def forward(self, embeddings: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
embeddings = self.projection(embeddings)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
if self.layernorm is not None:
embeddings = self.layernorm(embeddings)
return embeddings, output_dimensions
| Swin2SRPatchEmbeddings |
python | graphql-python__graphene | graphene/relay/tests/test_global_id.py | {
"start": 181,
"end": 245
} | class ____(Node):
class Meta:
name = "Node"
| CustomNode |
python | getsentry__sentry | src/sentry/tempest/endpoints/tempest_credentials_details.py | {
"start": 600,
"end": 1550
} | class ____(ProjectEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.GDX
permission_classes = (TempestCredentialsPermission,)
def delete(self, request: Request, project: Project, tempest_credentials_id: int) -> Response:
if not has_tempest_access(project.organization):
raise NotFound
try:
credentials = TempestCredentials.objects.get(project=project, id=tempest_credentials_id)
except TempestCredentials.DoesNotExist:
raise NotFound
data = credentials.get_audit_log_data()
credentials.delete()
self.create_audit_entry(
request,
organization=project.organization,
target_object=credentials.id,
event=audit_log.get_event_id("TEMPEST_CLIENT_ID_REMOVE"),
data=data,
)
return Response(status=204)
| TempestCredentialsDetailsEndpoint |
python | celery__celery | t/unit/backends/test_rpc.py | {
"start": 181,
"end": 546
} | class ____:
def get_backend(self):
return RPCBackend(app=self.app)
def get_consumer(self):
return self.get_backend().result_consumer
def test_drain_events_before_start(self):
consumer = self.get_consumer()
# drain_events shouldn't crash when called before start
consumer.drain_events(0.001)
| test_RPCResultConsumer |
python | ansible__ansible | test/integration/targets/protomatter/action_plugins/transform_factory.py | {
"start": 84,
"end": 314
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
self._display.deprecated("a deprecation warning", version="2.99")
self._display.warning("a warning")
return dict(changed=False)
| ActionModule |
python | gwtw__py-sorting | test/base_positive_integer_sort_test.py | {
"start": 0,
"end": 1104
} | class ____(object):
def test_sorts_empty_array(self):
self.assertEqual([], self.sort([]))
def test_sorts_small_sorted_array(self):
self.assertEqual([1,2,3,4,5], self.sort([1,2,3,4,5]))
def test_sorts_small_reverse_sorted_array(self):
self.assertEqual([1,2,3,4,5], self.sort([5,4,3,2,1]))
def test_sorts_small_sorted_array_with_two_values_swapped(self):
self.assertEqual([1,2,3,4,5], self.sort([1,2,5,4,3]))
def test_sorts_large_sorted_array(self):
self.assertEqual(
[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],
self.sort([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]))
def test_sorts_large_reverse_sorted_array(self):
self.assertEqual(
[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],
self.sort([20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]))
def test_sorts_large_sorted_array_with_two_values_swapped(self):
self.assertEqual(
[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],
self.sort([0,1,2,8,4,5,6,7,3,9,10,11,12,13,14,15,16,17,18,19,20]))
| BasePositiveIntegerSortTest |
python | pytorch__pytorch | torch/distributed/checkpoint/_pg_transport.py | {
"start": 1981,
"end": 6154
} | class ____:
"""
This is the metadata for a state dict that is used to transfer checkpoints.
It contains the step, the pytree spec of the state dict and the metadata for
each tensor in the state dict.
This must be pickleable so that it can be sent over the wire.
Args:
step: the step of the checkpoint to verify consistency
treespec: the pytree spec of the state dict
paths: the path of each leaf in the state dict
non_tensor_leaves: the metadata for each tensor in the state dict and any
non-tensor leaves in the state dict
"""
treespec: TreeSpec
paths: list[KeyPath]
non_tensor_leaves: list[
Union[object, _TensorMeta, _DTensorMeta, _ShardedTensorMeta]
]
@contextmanager
def _timeit(name: str) -> Generator[None, None, None]:
start = time.perf_counter()
yield
dur = time.perf_counter() - start
logger.info("%s took %ss", name, dur)
def _prepare_tensor(tensor: torch.Tensor) -> tuple[torch.Tensor, _TensorMeta]:
return (
_cast_tensor(tensor, torch.uint8),
_TensorMeta(
shape=tensor.shape,
dtype=tensor.dtype,
storage_offset=cast(int, tensor.storage_offset()),
stride=tensor.stride(),
nbytes=tensor.untyped_storage().nbytes(),
),
)
def _prepare_state_dict(
state_dict: object,
device: torch.device,
) -> tuple[_StateDictMeta, list[torch.Tensor]]:
leaves: list[tuple[KeyPath, object]]
leaves, treespec = tree_flatten_with_path(state_dict)
paths: list[KeyPath] = []
non_tensor_leaves: list[
Union[object, _TensorMeta, _DTensorMeta, _ShardedTensorMeta]
] = []
tensors: list[torch.Tensor] = []
for key_path, v in leaves:
paths.append(key_path)
if isinstance(v, DTensor):
tensor, tensor_meta = _prepare_tensor(v._local_tensor)
tensors.append(tensor)
non_tensor_leaves.append(
_DTensorMeta(
local=tensor_meta,
spec=v._spec,
)
)
elif isinstance(v, ShardedTensor):
# Handle ShardedTensor by extracting all local shards
local_shards = v.local_shards()
# Prepare metadata for all local shards
local_shards_meta = []
local_shards_shard_metadata = []
for shard in local_shards:
tensor, tensor_meta = _prepare_tensor(shard.tensor)
tensors.append(tensor)
local_shards_meta.append(tensor_meta)
local_shards_shard_metadata.append(shard.metadata)
non_tensor_leaves.append(
_ShardedTensorMeta(
local_shards_meta=local_shards_meta,
local_shards_shard_metadata=local_shards_shard_metadata,
sharded_tensor_metadata=v.metadata(), # Complete metadata
)
)
elif isinstance(v, torch.Tensor):
tensor, tensor_meta = _prepare_tensor(v)
tensors.append(tensor)
non_tensor_leaves.append(tensor_meta)
else:
non_tensor_leaves.append(v)
return (
_StateDictMeta(
treespec=treespec,
paths=paths,
non_tensor_leaves=non_tensor_leaves,
),
tensors,
)
def _cast_tensor(tensor: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
"""
Casts the underlying storage to a tensor of the given dtype.
The returned tensor will be of size ``storage.nbytes``.
This works for all datatypes and supports strided/offset tensors with the
caveat that the cast tensor may be larger than the original tensor due to
the differences in striding.
"""
if type(tensor) is not torch.Tensor:
raise AssertionError(f"can only cast standard tensors not {type(tensor)}")
storage = tensor.untyped_storage()
ret = torch.tensor(storage, dtype=dtype, device=tensor.device)
if ret.untyped_storage() is not storage:
raise AssertionError("storage should be the same")
return ret
| _StateDictMeta |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 9031,
"end": 10149
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("el_GR")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
prefix = random.choice([True, False])
vat_id = self.fake.vat_id(prefix=prefix)
assert re.search(r"^(EL)?\d{9}$", vat_id)
assert vat_id[2 if prefix else 0] in ("7", "8", "9", "0")
assert str(gr_tin_checksum(vat_id[2:-1] if prefix else vat_id[:-1])) == vat_id[-1]
def test_tin(self):
for _ in range(100):
tin = self.fake.tin()
assert re.search(r"^\d{9}$", tin)
assert tin[0] in ("1", "2", "3", "4")
assert str(gr_tin_checksum(tin[:-1])) == tin[-1]
def test_ssn(self):
for _ in range(100):
ssn = self.fake.ssn()
assert re.search(r"^\d{11}$", ssn)
assert datetime.strptime(ssn[:6], "%d%m%y")
assert luhn_checksum(ssn) == 0
def test_police_id(self):
for _ in range(100):
assert re.search(r"^[ΑΒΕΖΗΙΚΜΝΟΡΤΥΧ]{1,2}\d{6}$", self.fake.police_id())
| TestElGr |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 4707,
"end": 4983
} | class ____(models.Model):
catid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "UUID categories"
| UUIDCategory |
python | allegroai__clearml | clearml/backend_api/services/v2_23/auth.py | {
"start": 19275,
"end": 20170
} | class ____(Response):
"""
Response of auth.login endpoint.
:param token: Token string
:type token: str
"""
_service = "auth"
_action = "login"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"token": {"description": "Token string", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, token: Optional[str] = None, **kwargs: Any) -> None:
super(LoginResponse, self).__init__(**kwargs)
self.token = token
@schema_property("token")
def token(self) -> Optional[str]:
return self._property_token
@token.setter
def token(self, value: Optional[str]) -> None:
if value is None:
self._property_token = None
return
self.assert_isinstance(value, "token", six.string_types)
self._property_token = value
| LoginResponse |
python | doocs__leetcode | solution/0000-0099/0039.Combination Sum/Solution.py | {
"start": 0,
"end": 525
} | class ____:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
def dfs(i: int, s: int):
if s == 0:
ans.append(t[:])
return
if s < candidates[i]:
return
for j in range(i, len(candidates)):
t.append(candidates[j])
dfs(j, s - candidates[j])
t.pop()
candidates.sort()
t = []
ans = []
dfs(0, target)
return ans
| Solution |
python | getsentry__sentry | src/sentry/rules/processing/buffer_processing.py | {
"start": 671,
"end": 1737
} | class ____(Protocol):
def get_hash(self, model: type[models.Model], field: dict[str, Any]) -> dict[str, str]: ...
def get_hash_length(self, model: type[models.Model], field: dict[str, Any]) -> int: ...
def push_to_hash_bulk(
self, model: type[models.Model], filters: dict[str, Any], data: dict[str, str]
) -> None: ...
def push_to_hash(
self, model: type[models.Model], filters: dict[str, Any], field: str, value: str
) -> None: ...
def delete_hash(
self, model: type[models.Model], filters: dict[str, Any], fields: list[str]
) -> None: ...
def get_sorted_set(self, key: str, min: float, max: float) -> list[tuple[int, float]]: ...
def push_to_sorted_set(self, key: str, value: list[int] | int) -> None: ...
def bulk_get_sorted_set(
self, keys: list[str], min: float, max: float
) -> dict[int, list[float]]: ...
def delete_key(self, key: str, min: float, max: float) -> None: ...
def delete_keys(self, keys: list[str], min: float, max: float) -> None: ...
| BufferProtocol |
python | realpython__materials | python-class/stack.py | {
"start": 0,
"end": 358
} | class ____:
def __init__(self, items=None):
if items is None:
self._items = []
else:
self._items = list(items)
def push(self, item):
self._items.append(item)
def pop(self):
return self._items.pop()
def __repr__(self) -> str:
return f"{type(self).__name__}({self._items})"
| Stack |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/execute_step.py | {
"start": 3032,
"end": 30676
} | class ____(Output):
"""This is a marker subclass that represents an Output that was produced from an AssetResult."""
def _process_asset_results_to_events(
step_context: StepExecutionContext,
user_event_sequence: Iterator[OpOutputUnion],
) -> Iterator[OpOutputUnion]:
"""Handle converting MaterializeResult (& AssetCheckResult soon) to their appropriate events.
MaterializeResults get converted to an Output event, which is later use to derive an AssetMaterialization.
AssetCheckResult get converted to two events:
- An Output, which allows downstream steps to depend on it
- An AssetCheckEvaluation, which combines the check result with information from the context
to create a full picture of the asset check's evaluation.
"""
for user_event in user_event_sequence:
yield from _process_user_event(step_context, user_event)
def _process_user_event(
step_context: StepExecutionContext, user_event: OpOutputUnion
) -> Iterator[OpOutputUnion]:
if isinstance(user_event, AssetResult):
assets_def = _get_assets_def_for_step(step_context, user_event)
asset_key = _resolve_asset_result_asset_key(user_event, assets_def)
output_name = assets_def.get_output_name_for_asset_key(asset_key)
for check_result in user_event.check_results or []:
yield from _process_user_event(step_context, check_result)
with disable_dagster_warnings():
if isinstance(user_event, MaterializeResult):
value = user_event.value
else:
value = None
yield AssetResultOutput(
value=value,
output_name=output_name,
metadata=user_event.metadata,
data_version=user_event.data_version,
tags=user_event.tags,
)
elif isinstance(user_event, AssetCheckResult):
asset_check_evaluation = user_event.to_asset_check_evaluation(step_context)
assets_def = _get_assets_def_for_step(step_context, user_event)
spec = check.not_none(
assets_def.get_spec_for_check_key(asset_check_evaluation.asset_check_key),
"If we were able to create an AssetCheckEvaluation from the AssetCheckResult, then"
" there should be a spec for the check",
)
# If the check is explicitly selected, we need to yield an Output event for it.
if spec.key in assets_def.check_keys:
output_name = check.not_none(
step_context.job_def.asset_layer.get_op_output_name(spec.key),
f"No output name found for check key {spec.key} in step {step_context.step.key}. This likely indicates that the currently executing AssetsDefinition has no check specified for the key.",
)
output = Output(value=None, output_name=output_name)
yield output
else:
step_context.log.warning(
f"AssetCheckResult for check '{spec.name}' for asset '{spec.asset_key.to_user_string()}' was yielded which is not selected. Letting it through."
)
yield asset_check_evaluation
else:
yield user_event
def _get_assets_def_for_step(
step_context: StepExecutionContext, user_event: OpOutputUnion
) -> AssetsDefinition:
assets_def = step_context.job_def.asset_layer.get_assets_def_for_node(step_context.node_handle)
if not assets_def:
raise DagsterInvariantViolationError(
f"{user_event.__class__.__name__} is only valid within asset computations, no backing"
" AssetsDefinition found."
)
return assets_def
def _resolve_asset_result_asset_key(
asset_result: AssetResult, assets_def: AssetsDefinition
) -> AssetKey:
if asset_result.asset_key:
return asset_result.asset_key
else:
if len(assets_def.keys) != 1:
raise DagsterInvariantViolationError(
f"{asset_result.__class__.__name__} did not include asset_key and it can not be inferred."
f" Specify which asset_key, options are: {assets_def.keys}."
)
return assets_def.key
def _step_output_error_checked_user_event_sequence(
step_context: StepExecutionContext, user_event_sequence: Iterator[OpOutputUnion]
) -> Iterator[OpOutputUnion]:
"""Process the event sequence to check for invariant violations in the event
sequence related to Output events emitted from the compute_fn.
This consumes and emits an event sequence.
"""
check.inst_param(step_context, "step_context", StepExecutionContext)
check.iterator_param(user_event_sequence, "user_event_sequence")
step = step_context.step
op_label = step_context.describe_op()
output_names = list([output_def.name for output_def in step.step_outputs])
selected_output_names = step_context.selected_output_names
for user_event in user_event_sequence:
if not isinstance(user_event, (Output, DynamicOutput)):
yield user_event
continue
# do additional processing on Outputs
output = user_event
if not step.has_step_output(cast("str", output.output_name)):
raise DagsterInvariantViolationError(
f'Core compute for {op_label} returned an output "{output.output_name}" that does '
f"not exist. The available outputs are {output_names}"
)
if output.output_name not in selected_output_names:
raise DagsterInvariantViolationError(
f'Core compute for {op_label} returned an output "{output.output_name}" that is '
f"not selected. The selected outputs are {selected_output_names}"
)
step_output = step.step_output_named(cast("str", output.output_name))
output_def = step_context.job_def.get_node(step_output.node_handle).output_def_named(
step_output.name
)
if isinstance(output, Output):
if step_context.has_seen_output(output.output_name):
raise DagsterInvariantViolationError(
f'Compute for {op_label} returned an output "{output.output_name}" multiple '
"times"
)
if output_def.is_dynamic:
raise DagsterInvariantViolationError(
f'Compute for {op_label} for output "{output.output_name}" defined as dynamic '
"must yield DynamicOutput, got Output."
)
# For any output associated with an asset, make sure that none of its dependent assets
# have already been yielded. If this condition (outputs yielded in topological order) is
# not satisfied, automatic data version computation can yield wrong results.
#
# We look for dependent keys that have already been yielded rather than dependency keys
# that have not yet been yielded. This is because we don't always know which
# dependencies will actually be computed within the step. If A depends on B, it is
# possible that a cached version of B will be used and B will never be yielded. In
# contrast, if both A and B are yielded, A should never precede B.
asset_layer = step_context.job_def.asset_layer
node_handle = step_context.node_handle
asset_key = asset_layer.get_asset_key_for_node_output(node_handle, output_def.name)
if asset_key is not None and asset_key in asset_layer.get_selected_entity_keys_for_node(
node_handle
):
asset_node = asset_layer.get(asset_key)
assets_def = asset_node.assets_def
all_dependent_keys = asset_node.child_keys
step_local_asset_keys = step_context.get_output_asset_keys()
step_local_dependent_keys = all_dependent_keys & step_local_asset_keys
for dependent_key in step_local_dependent_keys:
output_name = assets_def.get_output_name_for_asset_key(dependent_key)
# Need to skip self-dependent assets (possible with partitions)
self_dep = dependent_key in asset_node.parent_keys
if not self_dep and step_context.has_seen_output(output_name):
raise DagsterInvariantViolationError(
f'Asset "{dependent_key.to_user_string()}" was yielded before its'
f' dependency "{asset_key.to_user_string()}".Multiassets'
" yielding multiple asset outputs must yield them in topological"
" order."
)
step_context.observe_output(output.output_name)
metadata = step_context.get_output_metadata(output.output_name)
with disable_dagster_warnings():
output = output.with_metadata(
metadata={**output.metadata, **normalize_metadata(metadata or {})}
)
else:
if not output_def.is_dynamic:
raise DagsterInvariantViolationError(
f"Compute for {op_label} yielded a DynamicOutput, but did not use "
"DynamicOutputDefinition."
)
if step_context.has_seen_output(output.output_name, output.mapping_key):
raise DagsterInvariantViolationError(
f"Compute for {op_label} yielded a DynamicOutput with mapping_key "
f'"{output.mapping_key}" multiple times.'
)
step_context.observe_output(output.output_name, output.mapping_key)
metadata = step_context.get_output_metadata(
output.output_name, mapping_key=output.mapping_key
)
output = DynamicOutput(
value=output.value,
output_name=output.output_name,
metadata={**output.metadata, **normalize_metadata(metadata or {})},
mapping_key=output.mapping_key,
)
yield output
for step_output in step.step_outputs:
step_output_def = step_context.op_def.output_def_named(step_output.name)
if not step_context.has_seen_output(step_output_def.name) and not step_output_def.optional:
asset_layer = step_context.job_def.asset_layer
asset_key = asset_layer.get_asset_key_for_node_output(
step_context.node_handle, step_output_def.name
)
# We require explicitly returned/yielded for asset observations
is_observable_asset = asset_key is not None and asset_layer.get(asset_key).is_observable
if step_output_def.dagster_type.is_nothing and not is_observable_asset:
if step_output.name in selected_output_names:
step_context.log.info(
f'Emitting implicit Nothing for output "{step_output_def.name}" on {op_label}'
)
yield Output(output_name=step_output_def.name, value=None)
elif not step_output_def.is_dynamic:
raise DagsterStepOutputNotFoundError(
f"Core compute for {op_label} did not return an output for non-optional "
f'output "{step_output_def.name}"',
step_key=step.key,
output_name=step_output_def.name,
)
def do_type_check(context: TypeCheckContext, dagster_type: DagsterType, value: Any) -> TypeCheck:
type_check = dagster_type.type_check(context, value)
if not isinstance(type_check, TypeCheck):
return TypeCheck(
success=False,
description=(
f"Type checks must return TypeCheck. Type check for type {dagster_type.display_name} returned "
f"value of type {type(type_check)} when checking runtime value of type {type(value)}."
),
)
return type_check
def _create_step_input_event(
step_context: StepExecutionContext, input_name: str, type_check: TypeCheck, success: bool
) -> DagsterEvent:
return DagsterEvent.step_input_event(
step_context,
StepInputData(
input_name=input_name,
type_check_data=TypeCheckData(
success=success,
label=input_name,
description=type_check.description if type_check else None,
metadata=type_check.metadata if type_check else {},
),
),
)
def _type_checked_event_sequence_for_input(
step_context: StepExecutionContext,
input_name: str,
input_value: Any,
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.str_param(input_name, "input_name")
step_input = step_context.step.step_input_named(input_name)
input_def = step_context.op_def.input_def_named(step_input.name)
check.invariant(
input_def.name == input_name,
f"InputDefinition name does not match, expected {input_name} got {input_def.name}",
)
dagster_type = input_def.dagster_type
type_check_context = step_context.for_type(dagster_type)
input_type = type(input_value)
op_label = step_context.describe_op()
with user_code_error_boundary(
DagsterTypeCheckError,
lambda: (
f'Error occurred while type-checking input "{input_name}" of {op_label}, with Python'
f" type {input_type} and Dagster type {dagster_type.display_name}"
),
log_manager=type_check_context.log,
):
type_check = do_type_check(type_check_context, dagster_type, input_value)
yield _create_step_input_event(
step_context, input_name, type_check=type_check, success=type_check.success
)
if not type_check.success:
raise DagsterTypeCheckDidNotPass(
description=(
f'Type check failed for step input "{input_name}" - '
f'expected type "{dagster_type.display_name}". '
f"Description: {type_check.description}"
),
metadata=type_check.metadata,
dagster_type=dagster_type,
)
def _type_check_output(
step_context: StepExecutionContext,
step_output_handle: StepOutputHandle,
output: Any,
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.inst_param(output, "output", (Output, DynamicOutput))
step_output = step_context.step.step_output_named(output.output_name)
step_output_def = step_context.op_def.output_def_named(step_output.name)
dagster_type = step_output_def.dagster_type
type_check_context = step_context.for_type(dagster_type)
op_label = step_context.describe_op()
output_type = type(output.value)
with user_code_error_boundary(
DagsterTypeCheckError,
lambda: (
f'Error occurred while type-checking output "{output.output_name}" of {op_label}, with'
f" Python type {output_type} and Dagster type {dagster_type.display_name}"
),
log_manager=type_check_context.log,
):
type_check = do_type_check(type_check_context, dagster_type, output.value)
yield DagsterEvent.step_output_event(
step_context=step_context,
step_output_data=StepOutputData(
step_output_handle=step_output_handle,
type_check_data=TypeCheckData(
success=type_check.success,
label=step_output_handle.output_name,
description=type_check.description if type_check else None,
metadata=type_check.metadata if type_check else {},
),
metadata=output.metadata,
),
)
if not type_check.success:
raise DagsterTypeCheckDidNotPass(
description=(
f'Type check failed for step output "{output.output_name}" - '
f'expected type "{dagster_type.display_name}". '
f"Description: {type_check.description}"
),
metadata=type_check.metadata,
dagster_type=dagster_type,
)
def core_dagster_event_sequence_for_step(
step_context: StepExecutionContext,
) -> Iterator[DagsterEvent]:
"""Execute the step within the step_context argument given the in-memory
events. This function yields a sequence of DagsterEvents, but without
catching any exceptions that have bubbled up during the computation
of the step.
"""
check.inst_param(step_context, "step_context", StepExecutionContext)
if step_context.previous_attempt_count > 0:
yield DagsterEvent.step_restarted_event(step_context, step_context.previous_attempt_count)
else:
yield DagsterEvent.step_start_event(step_context)
with (
time_execution_scope() as timer_result,
enter_execution_context(step_context) as compute_context,
):
inputs = {}
if step_context.is_sda_step:
step_context.fetch_external_input_asset_version_info()
for step_input in step_context.step.step_inputs:
input_def = step_context.op_def.input_def_named(step_input.name)
dagster_type = input_def.dagster_type
if dagster_type.is_nothing:
continue
for event_or_input_value in step_input.source.load_input_object(
step_context, input_def
):
if isinstance(event_or_input_value, DagsterEvent):
yield event_or_input_value
else:
check.invariant(step_input.name not in inputs)
inputs[step_input.name] = event_or_input_value
for input_name, input_value in inputs.items():
for evt in check.generator(
_type_checked_event_sequence_for_input(step_context, input_name, input_value)
):
yield evt
# The core execution loop expects a compute generator in a specific format: a generator that
# takes a context and dictionary of inputs as input, yields output events. If an op definition
# was generated from the @op decorator, then compute_fn needs to be coerced
# into this format. If the op definition was created directly, then it is expected that the
# compute_fn is already in this format.
if isinstance(step_context.op_def.compute_fn, DecoratedOpFunction):
core_gen = create_op_compute_wrapper(step_context.op_def)
else:
core_gen = step_context.op_def.compute_fn
user_event_sequence = execute_core_compute(
step_context,
inputs,
core_gen,
compute_context,
)
failed_blocking_asset_check_evaluations = []
# It is important for this loop to be indented within the
# timer block above in order for time to be recorded accurately.
for user_event in _step_output_error_checked_user_event_sequence(
step_context,
_process_asset_results_to_events(step_context, user_event_sequence),
):
if isinstance(user_event, DagsterEvent):
yield user_event
elif isinstance(user_event, (Output, DynamicOutput)):
for evt in _type_check_and_store_output(step_context, user_event):
yield evt
# for now, I'm ignoring AssetMaterializations yielded manually, but we might want
# to do something with these in the above path eventually
elif isinstance(user_event, AssetMaterialization):
yield DagsterEvent.asset_materialization(step_context, user_event)
elif isinstance(user_event, AssetObservation):
yield DagsterEvent.asset_observation(step_context, user_event)
elif isinstance(user_event, AssetCheckEvaluation):
if (
not user_event.passed
and user_event.severity == AssetCheckSeverity.ERROR
and user_event.blocking
):
failed_blocking_asset_check_evaluations.append(user_event)
yield DagsterEvent.asset_check_evaluation(step_context, user_event)
elif isinstance(user_event, ExpectationResult):
yield DagsterEvent.step_expectation_result(step_context, user_event)
else:
check.failed(f"Unexpected event {user_event}, should have been caught earlier")
if failed_blocking_asset_check_evaluations:
grouped_by_asset_key: dict[AssetKey, list[AssetCheckEvaluation]] = defaultdict(list)
for failed_check in failed_blocking_asset_check_evaluations:
grouped_by_asset_key.setdefault(failed_check.asset_key, []).append(failed_check)
grouped_by_asset_key_str = "\n".join(
f"{asset_key.to_user_string()}: {','.join(failed_check.check_name for failed_check in checks)}"
for asset_key, checks in grouped_by_asset_key.items()
)
raise DagsterAssetCheckFailedError(
f"{len(failed_blocking_asset_check_evaluations)} blocking asset check{'s' if len(failed_blocking_asset_check_evaluations) > 1 else ''} failed with ERROR severity:\n{grouped_by_asset_key_str}"
)
yield DagsterEvent.step_success_event(
step_context, StepSuccessData(duration_ms=timer_result.millis)
)
def _type_check_and_store_output(
step_context: StepExecutionContext, output: Union[DynamicOutput, Output]
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.inst_param(output, "output", (Output, DynamicOutput))
mapping_key = output.mapping_key if isinstance(output, DynamicOutput) else None
step_output_handle = StepOutputHandle(
step_key=step_context.step.key, output_name=output.output_name, mapping_key=mapping_key
)
# If we are executing using the execute_in_process API, then we allow for the outputs of ops
# to be directly captured to a dictionary after they are computed.
if step_context.output_capture is not None:
step_context.output_capture[step_output_handle] = output.value
# capture output at the step level for threading the computed output values to hook context
if (
step_context.step_output_capture is not None
and step_context.step_output_metadata_capture is not None
):
step_context.step_output_capture[step_output_handle] = output.value
step_context.step_output_metadata_capture[step_output_handle] = output.metadata
yield from _type_check_output(step_context, step_output_handle, output)
yield from _store_output(step_context, step_output_handle, output)
def _get_output_asset_events(
asset_key: AssetKey,
asset_partitions: Iterable[str],
output: Union[Output, DynamicOutput],
output_def: OutputDefinition,
io_manager_metadata: Mapping[str, MetadataValue],
step_context: StepExecutionContext,
execution_type: AssetExecutionType,
) -> Iterator[Union[AssetMaterialization, AssetObservation]]:
# Metadata scoped to all events for this asset.
key_scoped_metadata = {**output.metadata, **io_manager_metadata}
# Clear any cached record associated with this asset, since we are about to generate a new
# materialization.
step_context.wipe_input_asset_version_info(asset_key)
tags: dict[str, str]
if (
execution_type == AssetExecutionType.MATERIALIZATION
and step_context.is_external_input_asset_version_info_loaded
and asset_key in step_context.job_def.asset_layer.executable_asset_keys
):
assert isinstance(output, Output)
code_version = _get_code_version(asset_key, step_context)
input_provenance_data = _get_input_provenance_data(asset_key, step_context)
cached_data_version = (
step_context.get_data_version(asset_key)
if step_context.has_data_version(asset_key)
else None
)
user_provided_data_version = output.data_version or cached_data_version
data_version = (
compute_logical_data_version(
code_version,
{k: meta["data_version"] for k, meta in input_provenance_data.items()},
)
if user_provided_data_version is None
else user_provided_data_version
)
tags = _build_data_version_tags(
data_version,
code_version,
input_provenance_data,
user_provided_data_version is not None,
)
if not step_context.has_data_version(asset_key):
data_version = DataVersion(tags[DATA_VERSION_TAG])
step_context.set_data_version(asset_key, data_version)
elif execution_type == AssetExecutionType.OBSERVATION:
assert isinstance(output, Output)
tags = (
_build_data_version_observation_tags(output.data_version) if output.data_version else {}
)
else:
tags = {}
all_tags = {**tags, **((output.tags if not isinstance(output, DynamicOutput) else None) or {})}
backfill_id = step_context.get_tag(BACKFILL_ID_TAG)
if backfill_id:
tags[BACKFILL_ID_TAG] = backfill_id
if execution_type == AssetExecutionType.MATERIALIZATION:
event_class = AssetMaterialization
event_class = AssetMaterialization
elif execution_type == AssetExecutionType.OBSERVATION:
event_class = AssetObservation
else:
check.failed(f"Unexpected asset execution type {execution_type}")
unpartitioned_asset_metadata = step_context.get_asset_metadata(asset_key=asset_key)
all_unpartitioned_asset_metadata = {
**key_scoped_metadata,
**(unpartitioned_asset_metadata or {}),
}
if asset_partitions:
for partition in asset_partitions:
with disable_dagster_warnings():
partition_scoped_metadata = step_context.get_asset_metadata(
asset_key=asset_key, partition_key=partition
)
all_metadata_for_partitioned_event = {
**all_unpartitioned_asset_metadata,
**(partition_scoped_metadata or {}),
}
# copy the tags dictionary before setting the partition key tags. Otherwise
# all asset materialization events will point to the same dictionary with the
# partition key tags of the last partition processed.
tags_for_event = {**all_tags}
tags_for_event.update(
get_tags_from_multi_partition_key(partition)
if isinstance(partition, MultiPartitionKey)
else {}
)
yield event_class(
asset_key=asset_key,
partition=partition,
metadata=all_metadata_for_partitioned_event,
tags=tags_for_event,
)
else:
with disable_dagster_warnings():
yield event_class(
asset_key=asset_key, metadata=all_unpartitioned_asset_metadata, tags=all_tags
)
def _get_code_version(asset_key: AssetKey, step_context: StepExecutionContext) -> str:
return (
step_context.job_def.asset_layer.get(asset_key).code_version
or step_context.dagster_run.run_id
)
| AssetResultOutput |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 56291,
"end": 57719
} | class ____(Response):
"""
Response of queues.get_default endpoint.
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
"""
_service = "queues"
_action = "get_default"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "Queue id", "type": ["string", "null"]},
"name": {"description": "Queue name", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, id: Optional[str] = None, name: Optional[str] = None, **kwargs: Any) -> None:
super(GetDefaultResponse, self).__init__(**kwargs)
self.id = id
self.name = name
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
| GetDefaultResponse |
python | getsentry__sentry | src/sentry/backup/dependencies.py | {
"start": 3022,
"end": 3769
} | class ____(Enum):
"""Kinds of foreign fields that we care about."""
# Uses our `FlexibleForeignKey` wrapper.
FlexibleForeignKey = auto()
# Uses our `HybridCloudForeignKey` wrapper.
HybridCloudForeignKey = auto()
# Uses our `OneToOneCascadeDeletes` wrapper.
OneToOneCascadeDeletes = auto()
# A naked usage of Django's `ForeignKey`.
DefaultForeignKey = auto()
# A naked usage of Django's `OneToOneField`.
DefaultOneToOneField = auto()
# A ForeignKey-like dependency that is opaque to Django because it uses `BoundedBigIntegerField`
# instead of one of the Django's default relational field types like `ForeignKey`,
# `OneToOneField`, etc.dd
ImplicitForeignKey = auto()
| ForeignFieldKind |
python | django-extensions__django-extensions | tests/auth/test_mixins.py | {
"start": 546,
"end": 657
} | class ____(ModelUserFieldPermissionMixin, EmptyResponseView):
model_permission_user_field = "owner"
| OwnerView |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 14781,
"end": 16597
} | class ____(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
.. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
| MyghtyLexer |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 21666,
"end": 22437
} | class ____(Instruction):
def __init__(self, parent, vector, index, name=''):
if not isinstance(vector.type, types.VectorType):
raise TypeError("vector needs to be of VectorType.")
if not isinstance(index.type, types.IntType):
raise TypeError("index needs to be of IntType.")
typ = vector.type.element
super(ExtractElement, self).__init__(parent, typ, "extractelement",
[vector, index], name=name)
def descr(self, buf):
operands = ", ".join("{0} {1}".format(
op.type, op.get_reference()) for op in self.operands)
buf.append("{opname} {operands}\n".format(
opname=self.opname, operands=operands))
| ExtractElement |
python | keras-team__keras | keras/src/ops/operation_test.py | {
"start": 850,
"end": 1210
} | class ____(operation.Operation):
def __init__(self, alpha, *, beta=1.0, name=None):
super().__init__(name=name)
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
| OpWithCustomConstructor |
python | pandas-dev__pandas | pandas/tests/reshape/merge/test_merge.py | {
"start": 79934,
"end": 111474
} | class ____:
@pytest.mark.parametrize(
"how, sort, expected",
[
("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
(
"left",
False,
DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
),
(
"left",
True,
DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
),
(
"right",
False,
DataFrame(
{"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]
),
),
(
"right",
True,
DataFrame(
{"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]
),
),
(
"outer",
False,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
(
"outer",
True,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
],
)
def test_merge_on_indexes(self, how, sort, expected):
left_df = DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
right_df = DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
result = merge(
left_df, right_df, left_index=True, right_index=True, how=how, sort=sort
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
Index([1, 2, 4], dtype=dtyp, name="index_col")
for dtyp in tm.ALL_REAL_NUMPY_DTYPES
]
+ [
CategoricalIndex(["A", "B", "C"], categories=["A", "B", "C"], name="index_col"),
RangeIndex(start=0, stop=3, name="index_col"),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"], name="index_col"),
],
ids=lambda x: f"{type(x).__name__}[{x.dtype}]",
)
def test_merge_index_types(index):
# gh-20777
# assert key access is consistent across index types
left = DataFrame({"left_data": [1, 2, 3]}, index=index)
right = DataFrame({"right_data": [1.0, 2.0, 3.0]}, index=index)
result = left.merge(right, on=["index_col"])
expected = DataFrame(
{"left_data": [1, 2, 3], "right_data": [1.0, 2.0, 3.0]}, index=index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"on,left_on,right_on,left_index,right_index,nm",
[
(["outer", "inner"], None, None, False, False, "B"),
(None, None, None, True, True, "B"),
(None, ["outer", "inner"], None, False, True, "B"),
(None, None, ["outer", "inner"], True, False, "B"),
(["outer", "inner"], None, None, False, False, None),
(None, None, None, True, True, None),
(None, ["outer", "inner"], None, False, True, None),
(None, None, ["outer", "inner"], True, False, None),
],
)
def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
# GH 21220
a = DataFrame(
{"A": [1, 2, 3, 4]},
index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]),
)
b = Series(
[1, 2, 3, 4],
index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]),
name=nm,
)
expected = DataFrame(
{"A": [2, 4], "B": [1, 3]},
index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
)
if nm is not None:
result = merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
tm.assert_frame_equal(result, expected)
else:
msg = "Cannot merge a Series without a name"
with pytest.raises(ValueError, match=msg):
result = merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
def test_merge_series_multilevel():
# GH#47946
# GH 40993: For raising, enforced in 2.0
a = DataFrame(
{"A": [1, 2, 3, 4]},
index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]),
)
b = Series(
[1, 2, 3, 4],
index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]),
name=("B", "C"),
)
with pytest.raises(
MergeError, match="Not allowed to merge between different levels"
):
merge(a, b, on=["outer", "inner"])
@pytest.mark.parametrize(
"col1, col2, kwargs, expected_cols",
[
(0, 0, {"suffixes": ("", "_dup")}, ["0", "0_dup"]),
(0, 0, {"suffixes": (None, "_dup")}, [0, "0_dup"]),
(0, 0, {"suffixes": ("_x", "_y")}, ["0_x", "0_y"]),
(0, 0, {"suffixes": ["_x", "_y"]}, ["0_x", "0_y"]),
("a", 0, {"suffixes": (None, "_y")}, ["a", 0]),
(0.0, 0.0, {"suffixes": ("_x", None)}, ["0.0_x", 0.0]),
("b", "b", {"suffixes": (None, "_y")}, ["b", "b_y"]),
("a", "a", {"suffixes": ("_x", None)}, ["a_x", "a"]),
("a", "b", {"suffixes": ("_x", None)}, ["a", "b"]),
("a", "a", {"suffixes": (None, "_x")}, ["a", "a_x"]),
(0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]),
("a", "a", {}, ["a_x", "a_y"]),
(0, 0, {}, ["0_x", "0_y"]),
],
)
def test_merge_suffix(col1, col2, kwargs, expected_cols):
# issue: 24782
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [4, 5, 6]})
expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols)
result = a.merge(b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
result = merge(a, b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"how,expected",
[
(
"right",
{"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]},
),
(
"outer",
{
"A": [1, 100, 200, 300],
"B1": [80, 60, 70, np.nan],
"B2": [np.nan, 600, 700, 800],
},
),
],
)
def test_merge_duplicate_suffix(how, expected):
left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]})
right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]})
result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x"))
expected = DataFrame(expected)
expected.columns = ["A", "B_x", "B_x"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"col1, col2, suffixes",
[("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))],
)
def test_merge_suffix_error(col1, col2, suffixes):
# issue: 24782
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [3, 4, 5]})
# TODO: might reconsider current raise behaviour, see issue 24782
msg = "columns overlap but no suffix specified"
with pytest.raises(ValueError, match=msg):
merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}])
def test_merge_suffix_raises(suffixes):
a = DataFrame({"a": [1, 2, 3]})
b = DataFrame({"b": [3, 4, 5]})
with pytest.raises(TypeError, match="Passing 'suffixes' as a"):
merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
TWO_GOT_THREE = "2, got 3" if PY314 else "2"
@pytest.mark.parametrize(
"col1, col2, suffixes, msg",
[
(
"a",
"a",
("a", "b", "c"),
(rf"too many values to unpack \(expected {TWO_GOT_THREE}\)"),
),
("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"),
],
)
def test_merge_suffix_length_error(col1, col2, suffixes, msg):
a = DataFrame({col1: [1, 2, 3]})
b = DataFrame({col2: [3, 4, 5]})
with pytest.raises(ValueError, match=msg):
merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("cat_dtype", ["one", "two"])
@pytest.mark.parametrize("reverse", [True, False])
def test_merge_equal_cat_dtypes(cat_dtype, reverse):
# see gh-22501
cat_dtypes = {
"one": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
"two": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
}
df1 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]}
).set_index("foo")
data_foo = ["a", "b", "c"]
data_right = [1, 2, 3]
if reverse:
data_foo.reverse()
data_right.reverse()
df2 = DataFrame(
{"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{
"left": [1, 2, 3],
"right": [1, 2, 3],
"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]),
}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_equal_cat_dtypes2():
# see gh-22501
cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False)
# Test Data
df1 = DataFrame(
{"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]}
).set_index("foo")
df2 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_on_cat_and_ext_array():
# GH 28668
right = DataFrame(
{"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")}
)
left = right.copy()
left["a"] = left["a"].astype("category")
result = merge(left, right, how="inner", on="a")
expected = right.copy()
tm.assert_frame_equal(result, expected)
def test_merge_multiindex_columns():
# Issue #28518
# Verify that merging two dataframes give the expected labels
# The original cause of this issue come from a bug lexsort_depth and is tested in
# test_lexsort_depth
letters = ["a", "b", "c", "d"]
numbers = ["1", "2", "3"]
index = MultiIndex.from_product((letters, numbers), names=["outer", "inner"])
frame_x = DataFrame(columns=index)
frame_x["id"] = ""
frame_y = DataFrame(columns=index)
frame_y["id"] = ""
l_suf = "_x"
r_suf = "_y"
result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf)))
# Constructing the expected results
tuples = [(letter + l_suf, num) for letter in letters for num in numbers]
tuples += [("id", "")]
tuples += [(letter + r_suf, num) for letter in letters for num in numbers]
expected_index = MultiIndex.from_tuples(tuples, names=["outer", "inner"])
expected = DataFrame(columns=expected_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_merge_datetime_upcast_dtype():
# https://github.com/pandas-dev/pandas/issues/31208
df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
df2 = DataFrame(
{"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
)
result = merge(df1, df2, how="left", on="y")
expected = DataFrame(
{
"x": ["a", "b", "c"],
"y": ["1", "2", "4"],
"z": pd.to_datetime(["2000", "2001", "NaT"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("n_categories", [5, 128])
def test_categorical_non_unique_monotonic(n_categories):
# GH 28189
# With n_categories as 5, we test the int8 case is hit in libjoin,
# with n_categories as 128 we test the int16 case.
left_index = CategoricalIndex([0] + list(range(n_categories)))
df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index)
df2 = DataFrame(
[[6]],
columns=["value"],
index=CategoricalIndex([0], categories=list(range(n_categories))),
)
result = merge(df1, df2, how="left", left_index=True, right_index=True)
expected = DataFrame(
[[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)],
columns=["value_x", "value_y"],
index=left_index,
)
tm.assert_frame_equal(expected, result)
def test_merge_join_categorical_multiindex():
# From issue 16627
a = {
"Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
expected = expected.drop(["Cat", "Int"], axis=1)
result = a.join(b, on=["Cat1", "Int1"])
tm.assert_frame_equal(expected, result)
# Same test, but with ordered categorical
a = {
"Cat1": Categorical(
["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True
),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(
["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True
),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
expected = expected.drop(["Cat", "Int"], axis=1)
result = a.join(b, on=["Cat1", "Int1"])
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
("kwargs", "err_msg"),
[
({"left_on": "a", "left_index": True}, ["left_on", "left_index"]),
({"right_on": "a", "right_index": True}, ["right_on", "right_index"]),
],
)
def test_merge_join_cols_error_reporting_duplicates(func, kwargs, err_msg):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = rf'Can only pass argument "{err_msg[0]}" OR "{err_msg[1]}" not both\.'
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, **kwargs)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
("kwargs", "err_msg"),
[
({"left_on": "a"}, ["right_on", "right_index"]),
({"right_on": "a"}, ["left_on", "left_index"]),
],
)
def test_merge_join_cols_error_reporting_missing(func, kwargs, err_msg):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = rf'Must pass "{err_msg[0]}" OR "{err_msg[1]}"\.'
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, **kwargs)
@pytest.mark.parametrize("func", ["merge", "merge_asof"])
@pytest.mark.parametrize(
"kwargs",
[
{"right_index": True},
{"left_index": True},
],
)
def test_merge_join_cols_error_reporting_on_and_index(func, kwargs):
# GH: 16228
left = DataFrame({"a": [1, 2], "b": [3, 4]})
right = DataFrame({"a": [1, 1], "c": [5, 6]})
msg = (
r'Can only pass argument "on" OR "left_index" '
r'and "right_index", not a combination of both\.'
)
with pytest.raises(MergeError, match=msg):
getattr(pd, func)(left, right, on="a", **kwargs)
def test_merge_right_left_index():
# GH#38616
left = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
right = DataFrame({"x": [1, 1], "z": ["foo", "foo"]})
result = merge(left, right, how="right", left_index=True, right_on="x")
expected = DataFrame(
{
"x": [1, 1],
"x_x": [1, 1],
"z_x": ["foo", "foo"],
"x_y": [1, 1],
"z_y": ["foo", "foo"],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_result_empty_index_and_on():
# GH#33814
df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"])
df2 = DataFrame({"b": [1]}).set_index(["b"])
expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"])
result = merge(df1, df2, left_on=["b"], right_index=True)
tm.assert_frame_equal(result, expected)
result = merge(df2, df1, left_index=True, right_on=["b"])
tm.assert_frame_equal(result, expected)
def test_merge_suffixes_produce_dup_columns_raises():
# GH#22818; Enforced in 2.0
left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2})
right = DataFrame({"a": [1, 2, 3], "b": 2})
with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
merge(left, right, on="a")
with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
merge(right, left, on="a", suffixes=("_y", "_x"))
def test_merge_duplicate_columns_with_suffix_no_warning():
# GH#22818
# Do not raise warning when duplicates are caused by duplicates in origin
left = DataFrame([[1, 1, 1], [2, 2, 2]], columns=["a", "b", "b"])
right = DataFrame({"a": [1, 3], "b": 2})
result = merge(left, right, on="a")
expected = DataFrame([[1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_y"])
tm.assert_frame_equal(result, expected)
def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises():
# GH#22818, Enforced in 2.0
# This should raise warning because suffixes cause another collision
left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"])
right = DataFrame({"a": [1, 3], "b": 2})
with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"):
merge(left, right, on="a")
def test_merge_string_float_column_result():
# GH 13353
df1 = DataFrame([[1, 2], [3, 4]], columns=Index(["a", 114.0]))
df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"])
result = merge(df2, df1, how="inner", left_index=True, right_index=True)
expected = DataFrame(
[[9, 10, 1, 2], [11, 12, 3, 4]], columns=Index(["x", "y", "a", 114.0])
)
tm.assert_frame_equal(result, expected)
def test_mergeerror_on_left_index_mismatched_dtypes():
# GH 22449
df_1 = DataFrame(data=["X"], columns=["C"], index=[22])
df_2 = DataFrame(data=["X"], columns=["C"], index=[999])
with pytest.raises(MergeError, match="Can only pass argument"):
merge(df_1, df_2, on=["C"], left_index=True)
def test_merge_on_left_categoricalindex():
# GH#48464 don't raise when left_on is a CategoricalIndex
ci = CategoricalIndex(range(3))
right = DataFrame({"A": ci, "B": range(3)})
left = DataFrame({"C": range(3, 6)})
res = merge(left, right, left_on=ci, right_on="A")
expected = merge(left, right, left_on=ci._data, right_on="A")
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("dtype", [None, "Int64"])
def test_merge_outer_with_NaN(dtype):
# GH#43550
item = np.nan if dtype is None else pd.NA
left = DataFrame({"key": [1, 2], "col1": [1, 2]}, dtype=dtype)
right = DataFrame({"key": [item, item], "col2": [3, 4]}, dtype=dtype)
result = merge(left, right, on="key", how="outer")
expected = DataFrame(
{
"key": [1, 2, item, item],
"col1": [1, 2, item, item],
"col2": [item, item, 3, 4],
},
dtype=dtype,
)
tm.assert_frame_equal(result, expected)
# switch left and right
result = merge(right, left, on="key", how="outer")
expected = DataFrame(
{
"key": [1, 2, item, item],
"col2": [item, item, 3, 4],
"col1": [1, 2, item, item],
},
dtype=dtype,
)
tm.assert_frame_equal(result, expected)
def test_merge_different_index_names():
# GH#45094
left = DataFrame({"a": [1]}, index=Index([1], name="c"))
right = DataFrame({"a": [1]}, index=Index([1], name="d"))
result = merge(left, right, left_on="c", right_on="d")
expected = DataFrame({"a_x": [1], "a_y": 1})
tm.assert_frame_equal(result, expected)
def test_merge_ea(any_numeric_ea_dtype, join_type):
# GH#44240
left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype)
right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype)
result = left.merge(right, how=join_type)
expected = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}, dtype=any_numeric_ea_dtype)
tm.assert_frame_equal(result, expected)
def test_merge_ea_and_non_ea(any_numeric_ea_dtype, join_type):
# GH#44240
left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype)
right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype.lower())
result = left.merge(right, how=join_type)
expected = DataFrame(
{
"a": Series([1, 2, 3], dtype=any_numeric_ea_dtype),
"b": Series([1, 1, 1], dtype=any_numeric_ea_dtype),
"c": Series([2, 2, 2], dtype=any_numeric_ea_dtype.lower()),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"])
def test_merge_arrow_and_numpy_dtypes(dtype):
# GH#52406
pytest.importorskip("pyarrow")
df = DataFrame({"a": [1, 2]}, dtype=dtype)
df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]")
result = df.merge(df2)
expected = df.copy()
tm.assert_frame_equal(result, expected)
result = df2.merge(df)
expected = df2.copy()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_merge_datetime_different_resolution(tz, join_type):
# https://github.com/pandas-dev/pandas/issues/53200
vals = [
pd.Timestamp(2023, 5, 12, tz=tz),
pd.Timestamp(2023, 5, 13, tz=tz),
pd.Timestamp(2023, 5, 14, tz=tz),
]
df1 = DataFrame({"t": vals[:2], "a": [1.0, 2.0]})
df1["t"] = df1["t"].dt.as_unit("ns")
df2 = DataFrame({"t": vals[1:], "b": [1.0, 2.0]})
df2["t"] = df2["t"].dt.as_unit("s")
expected = DataFrame({"t": vals, "a": [1.0, 2.0, np.nan], "b": [np.nan, 1.0, 2.0]})
expected["t"] = expected["t"].dt.as_unit("ns")
if join_type == "inner":
expected = expected.iloc[[1]].reset_index(drop=True)
elif join_type == "left":
expected = expected.iloc[[0, 1]]
elif join_type == "right":
expected = expected.iloc[[1, 2]].reset_index(drop=True)
result = df1.merge(df2, on="t", how=join_type)
tm.assert_frame_equal(result, expected)
def test_merge_multiindex_single_level():
# GH52331
df = DataFrame({"col": ["A", "B"]})
df2 = DataFrame(
data={"b": [100]},
index=MultiIndex.from_tuples([("A",), ("C",)], names=["col"]),
)
expected = DataFrame({"col": ["A", "B"], "b": [100, np.nan]})
result = df.merge(df2, left_on=["col"], right_index=True, how="left")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("on_index", [True, False])
@pytest.mark.parametrize("left_unique", [True, False])
@pytest.mark.parametrize("left_monotonic", [True, False])
@pytest.mark.parametrize("right_unique", [True, False])
@pytest.mark.parametrize("right_monotonic", [True, False])
def test_merge_combinations(
join_type,
sort,
on_index,
left_unique,
left_monotonic,
right_unique,
right_monotonic,
):
how = join_type
# GH 54611
left = [2, 3]
if left_unique:
left.append(4 if left_monotonic else 1)
else:
left.append(3 if left_monotonic else 2)
right = [2, 3]
if right_unique:
right.append(4 if right_monotonic else 1)
else:
right.append(3 if right_monotonic else 2)
left = DataFrame({"key": left})
right = DataFrame({"key": right})
if on_index:
left = left.set_index("key")
right = right.set_index("key")
on_kwargs = {"left_index": True, "right_index": True}
else:
on_kwargs = {"on": "key"}
result = merge(left, right, how=how, sort=sort, **on_kwargs)
if on_index:
left = left.reset_index()
right = right.reset_index()
if how in ["left", "right", "inner"]:
if how in ["left", "inner"]:
expected, other, other_unique = left, right, right_unique
else:
expected, other, other_unique = right, left, left_unique
if how == "inner":
keep_values = set(left["key"].values).intersection(right["key"].values)
keep_mask = expected["key"].isin(keep_values)
expected = expected[keep_mask]
if sort:
expected = expected.sort_values("key")
if not other_unique:
other_value_counts = other["key"].value_counts()
repeats = other_value_counts.reindex(expected["key"].values, fill_value=1)
repeats = repeats.astype(np.intp)
expected = expected["key"].repeat(repeats.values)
expected = expected.to_frame()
elif how == "outer":
left_counts = left["key"].value_counts()
right_counts = right["key"].value_counts()
expected_counts = left_counts.mul(right_counts, fill_value=1)
expected_counts = expected_counts.astype(np.intp)
expected = expected_counts.index.values.repeat(expected_counts.values)
expected = DataFrame({"key": expected})
expected = expected.sort_values("key")
if on_index:
expected = expected.set_index("key")
else:
expected = expected.reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_merge_ea_int_and_float_numpy():
# GH#46178
df1 = DataFrame([1.0, pd.NA], dtype=pd.Int64Dtype())
df2 = DataFrame([1.5])
expected = DataFrame(columns=[0], dtype="Int64")
with tm.assert_produces_warning(UserWarning, match="You are merging"):
result = df1.merge(df2)
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(UserWarning, match="You are merging"):
result = df2.merge(df1)
tm.assert_frame_equal(result, expected.astype("float64"))
df2 = DataFrame([1.0])
expected = DataFrame([1], columns=[0], dtype="Int64")
result = df1.merge(df2)
tm.assert_frame_equal(result, expected)
result = df2.merge(df1)
tm.assert_frame_equal(result, expected.astype("float64"))
def test_merge_arrow_string_index(any_string_dtype):
# GH#54894
pytest.importorskip("pyarrow")
left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype)
right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype))
result = left.merge(right, left_on="a", right_index=True, how="left")
expected = DataFrame(
{"a": Series(["a", "b"], dtype=any_string_dtype), "b": [1, np.nan]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("left_empty", [True, False])
@pytest.mark.parametrize("right_empty", [True, False])
def test_merge_empty_frames_column_order(left_empty, right_empty):
# GH 51929
df1 = DataFrame(1, index=[0], columns=["A", "B"])
df2 = DataFrame(1, index=[0], columns=["A", "C", "D"])
if left_empty:
df1 = df1.iloc[:0]
if right_empty:
df2 = df2.iloc[:0]
result = merge(df1, df2, on=["A"], how="outer")
expected = DataFrame(1, index=range(1), columns=["A", "B", "C", "D"])
if left_empty and right_empty:
expected = expected.iloc[:0]
elif left_empty:
expected["B"] = np.nan
elif right_empty:
expected[["C", "D"]] = np.nan
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"])
def test_merge_datetime_and_timedelta(how):
left = DataFrame({"key": Series([1, None], dtype="datetime64[ns]")})
right = DataFrame({"key": Series([1], dtype="timedelta64[ns]")})
msg = (
f"You are trying to merge on {left['key'].dtype} and {right['key'].dtype} "
"columns for key 'key'. If you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=re.escape(msg)):
left.merge(right, on="key", how=how)
msg = (
f"You are trying to merge on {right['key'].dtype} and {left['key'].dtype} "
"columns for key 'key'. If you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=re.escape(msg)):
right.merge(left, on="key", how=how)
def test_merge_on_all_nan_column():
# GH#59421
left = DataFrame({"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan], "z": [4, 5, 6]})
right = DataFrame({"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan], "zz": [4, 5, 6]})
result = left.merge(right, on=["x", "y"], how="outer")
# Should not trigger array bounds eerror with bounds checking or asan enabled.
expected = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan], "z": [4, 5, 6], "zz": [4, 5, 6]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("suffixes", [("_dup", ""), ("", "_dup")])
def test_merge_for_suffix_collisions(suffixes):
# GH#61402
df1 = DataFrame({"col1": [1], "col2": [2]})
df2 = DataFrame({"col1": [1], "col2": [2], "col2_dup": [3]})
with pytest.raises(MergeError, match="duplicate columns"):
merge(df1, df2, on="col1", suffixes=suffixes)
def test_merge_categorical_key_recursion():
# GH#56376
lt = CategoricalDtype(categories=np.asarray([1, 2, 3], dtype="int64"))
rt = CategoricalDtype(categories=np.asarray([1, 2, 3], dtype="float64"))
left = DataFrame({"key": Series([1, 2], dtype=lt)})
right = DataFrame({"key": Series([1, 3], dtype=rt)})
result = left.merge(right, on="key", how="outer")
expected = left.astype("int64").merge(
right.astype("float64"), on="key", how="outer"
)
tm.assert_frame_equal(result, expected)
def test_merge_pyarrow_datetime_duplicates():
# GH#61926
pytest.importorskip("pyarrow")
t = pd.date_range("2025-07-06", periods=3, freq="h")
df1 = DataFrame({"time": t, "val1": [1, 2, 3]})
df1 = df1.convert_dtypes(dtype_backend="pyarrow")
df2 = DataFrame({"time": t.repeat(2), "val2": [10, 20, 30, 40, 50, 60]})
df2 = df2.convert_dtypes(dtype_backend="pyarrow")
result = merge(df1, df2, on="time", how="left")
expected = DataFrame(
{
"time": t.repeat(2),
"val1": [1, 1, 2, 2, 3, 3],
"val2": [10, 20, 30, 40, 50, 60],
}
)
expected = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_frame_equal(result, expected)
| TestMergeOnIndexes |
python | MongoEngine__mongoengine | mongoengine/base/fields.py | {
"start": 876,
"end": 11844
} | class ____:
"""A base class for fields in a MongoDB document. Instances of this class
may be added to subclasses of `Document` to define a document's schema.
"""
name = None # set in TopLevelDocumentMetaclass
_geo_index = False
_auto_gen = False # Call `generate` to generate a value
_thread_local_storage = threading.local()
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that MongoEngine implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
def __init__(
self,
db_field=None,
required=False,
default=None,
unique=False,
unique_with=None,
primary_key=False,
validation=None,
choices=None,
null=False,
sparse=False,
**kwargs,
):
"""
:param db_field: The database field to store this field in
(defaults to the name of the field)
:param required: If the field is required. Whether it has to have a
value or not. Defaults to False.
:param default: (optional) The default value for this field if no value
has been set, if the value is set to None or has been unset. It can be a
callable.
:param unique: Is the field value unique or not (Creates an index). Defaults to False.
:param unique_with: (optional) The other field this field should be
unique with (Creates an index).
:param primary_key: Mark this field as the primary key ((Creates an index)). Defaults to False.
:param validation: (optional) A callable to validate the value of the
field. The callable takes the value as parameter and should raise
a ValidationError if validation fails
:param choices: (optional) The valid choices
:param null: (optional) If the field value can be null when a default exists. If not set, the default value
will be used in case a field with a default value is set to None. Defaults to False.
:param sparse: (optional) `sparse=True` combined with `unique=True` and `required=False`
means that uniqueness won't be enforced for `None` values (Creates an index). Defaults to False.
:param **kwargs: (optional) Arbitrary indirection-free metadata for
this field can be supplied as additional keyword arguments and
accessed as attributes of the field. Must not conflict with any
existing attributes. Common metadata includes `verbose_name` and
`help_text`.
"""
self.db_field = db_field if not primary_key else "_id"
self.required = required or primary_key
self.default = default
self.unique = bool(unique or unique_with)
self.unique_with = unique_with
self.primary_key = primary_key
self.validation = validation
self.choices = choices
self.null = null
self.sparse = sparse
self._owner_document = None
self.__auto_dereference = True
# Make sure db_field is a string (if it's explicitly defined).
if self.db_field is not None and not isinstance(self.db_field, str):
raise TypeError("db_field should be a string.")
# Make sure db_field doesn't contain any forbidden characters.
if isinstance(self.db_field, str) and (
"." in self.db_field
or "\0" in self.db_field
or self.db_field.startswith("$")
):
raise ValueError(
'field names cannot contain dots (".") or null characters '
'("\\0"), and they must not start with a dollar sign ("$").'
)
# Detect and report conflicts between metadata and base properties.
conflicts = set(dir(self)) & set(kwargs)
if conflicts:
raise TypeError(
"%s already has attribute(s): %s"
% (self.__class__.__name__, ", ".join(conflicts))
)
# Assign metadata to the instance
# This efficient method is available because no __slots__ are defined.
self.__dict__.update(kwargs)
# Adjust the appropriate creation counter, and save our local copy.
if self.db_field == "_id":
self.creation_counter = BaseField.auto_creation_counter
BaseField.auto_creation_counter -= 1
else:
self.creation_counter = BaseField.creation_counter
BaseField.creation_counter += 1
def set_auto_dereferencing(self, value):
self.__auto_dereference = value
@property
def _no_dereference_context_local(self):
if not hasattr(self._thread_local_storage, "no_dereference_context"):
self._thread_local_storage.no_dereference_context = 0
return self._thread_local_storage.no_dereference_context
@property
def _no_dereference_context_is_set(self):
return self._no_dereference_context_local > 0
def _incr_no_dereference_context(self):
self._thread_local_storage.no_dereference_context = (
self._no_dereference_context_local + 1
)
def _decr_no_dereference_context(self):
self._thread_local_storage.no_dereference_context = (
self._no_dereference_context_local - 1
)
@property
def _auto_dereference(self):
return self.__auto_dereference and not self._no_dereference_context_is_set
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document."""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
return instance._data.get(self.name)
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document."""
# If setting to None and there is a default value provided for this
# field, then set the value to the default value.
if value is None:
if self.null:
value = None
elif self.default is not None:
value = self.default
if callable(value):
value = value()
if instance._initialised:
try:
value_has_changed = (
self.name not in instance._data
or instance._data[self.name] != value
)
if value_has_changed:
instance._mark_as_changed(self.name)
except Exception:
# Some values can't be compared and throw an error when we
# attempt to do so (e.g. tz-naive and tz-aware datetimes).
# Mark the field as changed in such cases.
instance._mark_as_changed(self.name)
EmbeddedDocument = _import_class("EmbeddedDocument")
if isinstance(value, EmbeddedDocument):
value._instance = weakref.proxy(instance)
elif isinstance(value, (list, tuple)):
for v in value:
if isinstance(v, EmbeddedDocument):
v._instance = weakref.proxy(instance)
instance._data[self.name] = value
def error(self, message="", errors=None, field_name=None):
"""Raise a ValidationError."""
field_name = field_name if field_name else self.name
raise ValidationError(message, errors=errors, field_name=field_name)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
return value
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type."""
return self.to_python(value)
def _to_mongo_safe_call(self, value, use_db_field=True, fields=None):
"""Helper method to call to_mongo with proper inputs."""
f_inputs = self.to_mongo.__code__.co_varnames
ex_vars = {}
if "fields" in f_inputs:
ex_vars["fields"] = fields
if "use_db_field" in f_inputs:
ex_vars["use_db_field"] = use_db_field
return self.to_mongo(value, **ex_vars)
def prepare_query_value(self, op, value):
"""Prepare a value that is being used in a query for PyMongo."""
if op in UPDATE_OPERATORS:
self.validate(value)
return value
def validate(self, value, clean=True):
"""Perform validation on a value."""
pass
def _validate_choices(self, value):
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
choice_list = self.choices
if isinstance(next(iter(choice_list)), (list, tuple)):
# next(iter) is useful for sets
choice_list = [k for k, _ in choice_list]
# Choices which are other types of Documents
if isinstance(value, (Document, EmbeddedDocument)):
if not any(isinstance(value, c) for c in choice_list):
self.error("Value must be an instance of %s" % (choice_list))
# Choices which are types other than Documents
else:
values = value if isinstance(value, (list, tuple)) else [value]
if len(set(values) - set(choice_list)):
self.error("Value must be one of %s" % str(choice_list))
def _validate(self, value, **kwargs):
# Check the Choices Constraint
if self.choices:
self._validate_choices(value)
# check validation argument
if self.validation is not None:
if callable(self.validation):
try:
# breaking change of 0.18
# Get rid of True/False-type return for the validation method
# in favor of having validation raising a ValidationError
ret = self.validation(value)
if ret is not None:
raise DeprecatedError(
"validation argument for `%s` must not return anything, "
"it should raise a ValidationError if validation fails"
% self.name
)
except ValidationError as ex:
self.error(str(ex))
else:
raise ValueError(
'validation argument for `"%s"` must be a ' "callable." % self.name
)
self.validate(value, **kwargs)
@property
def owner_document(self):
return self._owner_document
def _set_owner_document(self, owner_document):
self._owner_document = owner_document
@owner_document.setter
def owner_document(self, owner_document):
self._set_owner_document(owner_document)
| BaseField |
python | getsentry__sentry-python | tests/integrations/beam/test_beam.py | {
"start": 3941,
"end": 6006
} | class ____(OutputHandler):
def process_outputs(
self, windowed_input_element, results, watermark_estimator=None
):
self.handle_process_outputs(
windowed_input_element, results, watermark_estimator
)
def handle_process_outputs(
self, windowed_input_element, results, watermark_estimator=None
):
print(windowed_input_element)
try:
for result in results:
assert result
except StopIteration:
print("In here")
@pytest.fixture
def init_beam(sentry_init):
def inner(fn):
sentry_init(default_integrations=False, integrations=[BeamIntegration()])
# Little hack to avoid having to run the whole pipeline.
pardo = ParDo(fn)
signature = pardo._signature
output_processor = _OutputHandler()
return DoFnInvoker.create_invoker(
signature,
output_processor,
DoFnContext("test"),
input_args=[],
input_kwargs={},
)
return inner
@pytest.mark.parametrize("fn", [test_simple, test_callable, test_place_holder])
def test_invoker_normal(init_beam, fn):
invoker = init_beam(fn)
print("Normal testing {} with {} invoker.".format(fn, invoker))
windowed_value = WindowedValue(False, 0, [None])
invoker.invoke_process(windowed_value)
@pytest.mark.parametrize("fn", [test_simple, test_callable, test_place_holder])
def test_invoker_exception(init_beam, capture_events, capture_exceptions, fn):
invoker = init_beam(fn)
events = capture_events()
print("Exception testing {} with {} invoker.".format(fn, invoker))
# Window value will always have one value for the process to run.
windowed_value = WindowedValue(True, 0, [None])
try:
invoker.invoke_process(windowed_value)
except Exception:
pass
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "beam"
| _OutputHandler |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 5668,
"end": 6337
} | class ____(
indexes.SearchIndex, indexes.Indexable
):
text = indexes.CharField(document=True, default="")
name = indexes.CharField(faceted=True)
is_active = indexes.BooleanField(faceted=True)
post_count = indexes.IntegerField()
post_count_i = indexes.FacetIntegerField(facet_for="post_count")
average_rating = indexes.FloatField(faceted=True)
pub_date = indexes.DateField(faceted=True)
created = indexes.DateTimeField(faceted=True)
sites = indexes.MultiValueField(faceted=True)
facet_field = indexes.FacetCharField(model_attr="name")
def get_model(self):
return MockModel
| Elasticsearch7ComplexFacetsMockSearchIndex |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ext.py | {
"start": 10323,
"end": 11170
} | class ____(functions.GenericFunction[_T]):
inherit_cache = True
def __init__(self, *args, **kwargs):
args = list(args)
if len(args) > 1:
initial_arg = coercions.expect(
roles.ExpressionElementRole,
args.pop(0),
name=getattr(self, "name", None),
apply_propagate_attrs=self,
type_=types.REGCONFIG,
)
initial_arg = [initial_arg]
else:
initial_arg = []
addtl_args = [
coercions.expect(
roles.ExpressionElementRole,
c,
name=getattr(self, "name", None),
apply_propagate_attrs=self,
)
for c in args
]
super().__init__(*(initial_arg + addtl_args), **kwargs)
| _regconfig_fn |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 1041,
"end": 2066
} | class ____(TestBase):
def testRegister(self):
reversion.register(TestModel)
self.assertTrue(reversion.is_registered(TestModel))
def testRegisterDecorator(self):
@reversion.register()
class TestModelDecorater(models.Model):
pass
self.assertTrue(reversion.is_registered(TestModelDecorater))
def testRegisterAlreadyRegistered(self):
reversion.register(TestModel)
with self.assertRaises(reversion.RegistrationError):
reversion.register(TestModel)
def testRegisterM2MSThroughLazy(self):
# When register is used as a decorator in models.py, lazy relations haven't had a chance to be resolved, so
# will still be a string.
@reversion.register()
class TestModelLazy(models.Model):
related = models.ManyToManyField(
TestModelRelated,
through="TestModelThroughLazy",
)
class TestModelThroughLazy(models.Model):
pass
| RegisterTest |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_business_month.py | {
"start": 957,
"end": 3909
} | class ____:
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
assert not offset1 != offset2
offset_cases = []
offset_cases.append(
(
BMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2),
},
)
)
offset_cases.append(
(
BMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| TestBMonthBegin |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/test_base_data_condition_group.py | {
"start": 4795,
"end": 7106
} | class ____(TestBaseDataConditionGroupValidator):
def test_update(self) -> None:
self.valid_data["conditions"] = [
{
"type": Condition.EQUAL,
"comparison": 1,
"conditionResult": True,
}
]
validator = BaseDataConditionGroupValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
dcg = validator.create(validator.validated_data)
assert dcg.conditions.count() == 1
condition = dcg.conditions.first()
assert condition
# update condition
self.valid_data["conditions"] = [
{
"id": condition.id,
"type": Condition.EQUAL,
"comparison": 2, # update to 2 from 1
"conditionResult": True,
}
]
validator = BaseDataConditionGroupValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
dcg = validator.update(dcg, validator.validated_data)
assert dcg.conditions.count() == 1
condition = dcg.conditions.first()
assert condition is not None
assert condition.type == Condition.EQUAL
assert condition.comparison == 2
assert condition.condition_group == dcg
# add another condition
self.valid_data["conditions"].append(
{
"conditionGroupId": dcg.id,
"type": Condition.NOT_EQUAL,
"comparison": 5,
"conditionResult": True,
}
)
validator = BaseDataConditionGroupValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
dcg = validator.update(dcg, validator.validated_data)
assert dcg.conditions.count() == 2
conditions = dcg.conditions.all()
condition1 = conditions[0]
condition2 = conditions[1]
assert condition1.type == Condition.EQUAL
assert condition1.comparison == 2
assert condition1.condition_group == dcg
assert condition2.type == Condition.NOT_EQUAL
assert condition2.comparison == 5
assert condition2.condition_group == dcg
| TestBaseDataConditionGroupValidatorUpdate |
python | doocs__leetcode | solution/3100-3199/3158.Find the XOR of Numbers Which Appear Twice/Solution.py | {
"start": 0,
"end": 172
} | class ____:
def duplicateNumbersXOR(self, nums: List[int]) -> int:
cnt = Counter(nums)
return reduce(xor, [x for x, v in cnt.items() if v == 2], 0)
| Solution |
python | apache__thrift | test/py/TestClient.py | {
"start": 15900,
"end": 16117
} | class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
return make_pedantic(TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False).getProtocol(transport))
| AcceleratedCompactTest |
python | euske__pdfminer | pdfminer/pdfdevice.py | {
"start": 1096,
"end": 3674
} | class ____(PDFDevice):
def render_string(self, textstate, seq):
matrix = mult_matrix(textstate.matrix, self.ctm)
font = textstate.font
fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
if isnumber(obj):
x -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
x += charspace
x += self.render_char(translate_matrix(matrix, (x, y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
x += wordspace
needcharspace = True
return (x, y)
def render_string_vertical(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
if isnumber(obj):
y -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
y += charspace
y += self.render_char(translate_matrix(matrix, (x, y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
y += wordspace
needcharspace = True
return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
| PDFTextDevice |
python | spyder-ide__spyder | spyder/plugins/console/utils/interpreter.py | {
"start": 1074,
"end": 11978
} | class ____(InteractiveConsole, threading.Thread):
"""Interpreter, executed in a separate thread"""
p1 = ">>> "
p2 = "... "
def __init__(self, namespace=None, exitfunc=None,
Output=None, WidgetProxy=None, debug=False):
"""
namespace: locals send to InteractiveConsole object
commands: list of commands executed at startup
"""
InteractiveConsole.__init__(self, namespace)
threading.Thread.__init__(self)
self._id = None
self.exit_flag = False
self.debug = debug
# Execution Status
self.more = False
if exitfunc is not None:
atexit.register(exitfunc)
self.namespace = self.locals
self.namespace['__name__'] = '__main__'
self.namespace['execfile'] = self.execfile
self.namespace['runfile'] = self.runfile
self.namespace['raw_input'] = self.raw_input_replacement
self.namespace['help'] = self.help_replacement
# Capture all interactive input/output
self.initial_stdout = sys.stdout
self.initial_stderr = sys.stderr
self.initial_stdin = sys.stdin
# Create communication pipes
pr, pw = os.pipe()
self.stdin_read = os.fdopen(pr, "r")
self.stdin_write = os.fdopen(pw, "wb", 0)
self.stdout_write = Output()
self.stderr_write = Output()
self.input_condition = threading.Condition()
self.widget_proxy = WidgetProxy(self.input_condition)
self.redirect_stds()
#------ Standard input/output
def redirect_stds(self):
"""Redirects stds"""
if not self.debug:
sys.stdout = self.stdout_write
sys.stderr = self.stderr_write
sys.stdin = self.stdin_read
def restore_stds(self):
"""Restore stds"""
if not self.debug:
sys.stdout = self.initial_stdout
sys.stderr = self.initial_stderr
sys.stdin = self.initial_stdin
def raw_input_replacement(self, prompt=''):
"""For raw_input builtin function emulation"""
self.widget_proxy.wait_input(prompt)
self.input_condition.acquire()
while not self.widget_proxy.data_available():
self.input_condition.wait()
inp = self.widget_proxy.input_data
self.input_condition.release()
return inp
def help_replacement(self, text=None, interactive=False):
"""For help builtin function emulation"""
if text is not None and not interactive:
return pydoc.help(text)
elif text is None:
pyver = "%d.%d" % (sys.version_info[0], sys.version_info[1])
self.write("""
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at https://www.python.org/about/gettingstarted/
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
""" % pyver)
else:
text = text.strip()
try:
eval("pydoc.help(%s)" % text)
except (NameError, SyntaxError):
print("no Python documentation found for '%r'" % text) # spyder: test-skip
self.write(os.linesep)
self.widget_proxy.new_prompt("help> ")
inp = self.raw_input_replacement()
if inp.strip():
self.help_replacement(inp, interactive=True)
else:
self.write("""
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
""")
def run_command(self, cmd, new_prompt=True):
"""Run command in interpreter"""
if cmd == 'exit()':
self.exit_flag = True
self.write('\n')
return
# -- Special commands type I
# (transformed into commands executed in the interpreter)
# ? command
special_pattern = r"^%s (?:r\')?(?:u\')?\"?\'?([a-zA-Z0-9_\.]+)"
run_match = re.match(special_pattern % 'run', cmd)
help_match = re.match(r'^([a-zA-Z0-9_\.]+)\?$', cmd)
cd_match = re.match(r"^\!cd \"?\'?([a-zA-Z0-9_ \.]+)", cmd)
if help_match:
cmd = 'help(%s)' % help_match.group(1)
# run command
elif run_match:
filename = guess_filename(run_match.groups()[0])
cmd = "%runfile " + repr(remove_backslashes(filename))
# !cd system command
elif cd_match:
cmd = 'import os; os.chdir(r"%s")' % cd_match.groups()[0].strip()
# -- End of Special commands type I
# -- Special commands type II
# (don't need code execution in interpreter)
xedit_match = re.match(special_pattern % 'xedit', cmd)
edit_match = re.match(special_pattern % 'edit', cmd)
clear_match = re.match(r"^clear ([a-zA-Z0-9_, ]+)", cmd)
# (external) edit command
if xedit_match:
filename = guess_filename(xedit_match.groups()[0])
self.widget_proxy.edit(filename, external_editor=True)
# local edit command
elif edit_match:
filename = guess_filename(edit_match.groups()[0])
if osp.isfile(filename):
self.widget_proxy.edit(filename)
else:
self.stderr_write.write(
"No such file or directory: %s\n" % filename)
# remove reference (equivalent to MATLAB's clear command)
elif clear_match:
varnames = clear_match.groups()[0].replace(' ', '').split(',')
for varname in varnames:
try:
self.namespace.pop(varname)
except KeyError:
pass
# Execute command
elif cmd.startswith('!'):
# System ! command
pipe = programs.run_shell_command(cmd[1:])
txt_out = encoding.transcode( pipe.stdout.read().decode() )
txt_err = encoding.transcode( pipe.stderr.read().decode().rstrip() )
if txt_err:
self.stderr_write.write(txt_err)
if txt_out:
self.stdout_write.write(txt_out)
self.stdout_write.write('\n')
self.more = False
# -- End of Special commands type II
else:
# Command executed in the interpreter
# self.widget_proxy.set_readonly(True)
self.more = self.push(cmd)
# self.widget_proxy.set_readonly(False)
if new_prompt:
self.widget_proxy.new_prompt(self.p2 if self.more else self.p1)
if not self.more:
self.resetbuffer()
def run(self):
"""Wait for input and run it"""
while not self.exit_flag:
self.run_line()
def run_line(self):
line = self.stdin_read.readline()
if self.exit_flag:
return
# Remove last character which is always '\n':
self.run_command(line[:-1])
def get_thread_id(self):
"""Return thread id"""
if self._id is None:
for thread_id, obj in list(threading._active.items()):
if obj is self:
self._id = thread_id
return self._id
def raise_keyboard_interrupt(self):
if self.isAlive():
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.get_thread_id(),
ctypes.py_object(KeyboardInterrupt))
return True
else:
return False
def closing(self):
"""Actions to be done before restarting this interpreter"""
pass
def execfile(self, filename):
"""Exec filename"""
source = open(filename, 'r').read()
try:
try:
name = filename.encode('ascii')
except UnicodeEncodeError:
name = '<executed_script>'
code = compile(source, name, "exec")
except (OverflowError, SyntaxError):
InteractiveConsole.showsyntaxerror(self, filename)
else:
self.runcode(code)
def runfile(self, filename, args=None):
"""
Run filename
args: command line arguments (string)
"""
if args is not None and not isinstance(args, str):
raise TypeError("expected a character buffer object")
self.namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in args.split():
sys.argv.append(arg)
self.execfile(filename)
sys.argv = ['']
self.namespace.pop('__file__')
def eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert isinstance(text, str)
try:
return eval(text, self.locals), True
except:
return None, False
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
return isdefined(objtxt, force_import=force_import,
namespace=self.locals)
#===========================================================================
# InteractiveConsole API
#===========================================================================
def push(self, line):
"""
Push a line of source text to the interpreter
The line should not have a trailing newline; it may have internal
newlines. The line is appended to a buffer and the interpreter’s
runsource() method is called with the concatenated contents of the
buffer as source. If this indicates that the command was executed
or invalid, the buffer is reset; otherwise, the command is incomplete,
and the buffer is left as it was after the line was appended.
The return value is True if more input is required, False if the line
was dealt with in some way (this is the same as runsource()).
"""
return InteractiveConsole.push(self, "#coding=utf-8\n" + line)
def resetbuffer(self):
"""Remove any unhandled source text from the input buffer"""
InteractiveConsole.resetbuffer(self)
| Interpreter |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 20293,
"end": 21715
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ChineseCLIPTextAttention(config)
self.intermediate = ChineseCLIPTextIntermediate(config)
self.output = ChineseCLIPTextOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| ChineseCLIPTextLayer |
python | huggingface__transformers | src/transformers/data/processors/squad.py | {
"start": 23045,
"end": 23153
} | class ____(SquadProcessor):
train_file = "train-v2.0.json"
dev_file = "dev-v2.0.json"
| SquadV2Processor |
python | Textualize__textual | src/textual/fuzzy.py | {
"start": 404,
"end": 4794
} | class ____:
"""Performs a fuzzy search.
Unlike a regex solution, this will finds all possible matches.
"""
def __init__(
self, case_sensitive: bool = False, *, cache_size: int = 1024 * 4
) -> None:
"""Initialize fuzzy search.
Args:
case_sensitive: Is the match case sensitive?
cache_size: Number of queries to cache.
"""
self.case_sensitive = case_sensitive
self.cache: LRUCache[tuple[str, str], tuple[float, Sequence[int]]] = LRUCache(
cache_size
)
def match(self, query: str, candidate: str) -> tuple[float, Sequence[int]]:
"""Match against a query.
Args:
query: The fuzzy query.
candidate: A candidate to check,.
Returns:
A pair of (score, tuple of offsets). `(0, ())` for no result.
"""
cache_key = (query, candidate)
if cache_key in self.cache:
return self.cache[cache_key]
default: tuple[float, Sequence[int]] = (0.0, [])
result = max(self._match(query, candidate), key=itemgetter(0), default=default)
self.cache[cache_key] = result
return result
@classmethod
@lru_cache(maxsize=1024)
def get_first_letters(cls, candidate: str) -> frozenset[int]:
return frozenset({match.start() for match in finditer(r"\w+", candidate)})
def score(self, candidate: str, positions: Sequence[int]) -> float:
"""Score a search.
Args:
search: Search object.
Returns:
Score.
"""
first_letters = self.get_first_letters(candidate)
# This is a heuristic, and can be tweaked for better results
# Boost first letter matches
offset_count = len(positions)
score: float = offset_count + len(first_letters.intersection(positions))
groups = 1
last_offset, *offsets = positions
for offset in offsets:
if offset != last_offset + 1:
groups += 1
last_offset = offset
# Boost to favor less groups
normalized_groups = (offset_count - (groups - 1)) / offset_count
score *= 1 + (normalized_groups * normalized_groups)
return score
def _match(
self, query: str, candidate: str
) -> Iterable[tuple[float, Sequence[int]]]:
letter_positions: list[list[int]] = []
position = 0
if not self.case_sensitive:
candidate = candidate.lower()
query = query.lower()
score = self.score
if query in candidate:
# Quick exit when the query exists as a substring
query_location = candidate.rfind(query)
offsets = list(range(query_location, query_location + len(query)))
yield (
score(candidate, offsets) * (2.0 if candidate == query else 1.5),
offsets,
)
return
for offset, letter in enumerate(query):
last_index = len(candidate) - offset
positions: list[int] = []
letter_positions.append(positions)
index = position
while (location := candidate.find(letter, index)) != -1:
positions.append(location)
index = location + 1
if index >= last_index:
break
if not positions:
yield (0.0, ())
return
position = positions[0] + 1
possible_offsets: list[list[int]] = []
query_length = len(query)
def get_offsets(offsets: list[int], positions_index: int) -> None:
"""Recursively match offsets.
Args:
offsets: A list of offsets.
positions_index: Index of query letter.
"""
for offset in letter_positions[positions_index]:
if not offsets or offset > offsets[-1]:
new_offsets = [*offsets, offset]
if len(new_offsets) == query_length:
possible_offsets.append(new_offsets)
else:
get_offsets(new_offsets, positions_index + 1)
get_offsets([], 0)
for offsets in possible_offsets:
yield score(candidate, offsets), offsets
@rich.repr.auto
| FuzzySearch |
python | pypa__twine | twine/utils.py | {
"start": 12200,
"end": 12981
} | class ____(argparse.Action):
"""Set boolean flag from environment variable."""
def __init__(self, env: str, **kwargs: Any) -> None:
default = self.bool_from_env(os.environ.get(env))
self.env = env
super().__init__(default=default, nargs=0, **kwargs)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None,
) -> None:
setattr(namespace, self.dest, True)
@staticmethod
def bool_from_env(val: Optional[str]) -> bool:
"""Allow '0' and 'false' and 'no' to be False."""
falsey = {"0", "false", "no"}
return bool(val and val.lower() not in falsey)
| EnvironmentFlag |
python | pytorch__pytorch | torch/_inductor/runtime/hints.py | {
"start": 607,
"end": 699
} | class ____(Enum):
INNER = 0
OUTER = 1
OUTER_TINY = 2
DEFAULT = 3
| ReductionHint |
python | ansible__ansible | lib/ansible/module_utils/facts/system/apparmor.py | {
"start": 826,
"end": 1297
} | class ____(BaseFactCollector):
name = 'apparmor'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
facts_dict = {}
apparmor_facts = {}
if os.path.exists('/sys/kernel/security/apparmor'):
apparmor_facts['status'] = 'enabled'
else:
apparmor_facts['status'] = 'disabled'
facts_dict['apparmor'] = apparmor_facts
return facts_dict
| ApparmorFactCollector |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py | {
"start": 9743,
"end": 10947
} | class ____(PreTrainedConfig):
model_type = "qwen3_vl_moe"
base_config_key = "vision_config"
def __init__(
self,
depth=27,
hidden_size=1152,
hidden_act="gelu_pytorch_tanh",
intermediate_size=4304,
num_heads=16,
in_channels=3,
patch_size=16,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=3584,
num_position_embeddings=2304,
deepstack_visual_indexes=[8, 16, 24],
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.num_heads = num_heads
self.in_channels = in_channels
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.out_hidden_size = out_hidden_size
self.num_position_embeddings = num_position_embeddings
self.initializer_range = initializer_range
self.deepstack_visual_indexes = deepstack_visual_indexes
| Qwen3VLMoeVisionConfig |
python | pypa__setuptools | setuptools/tests/test_core_metadata.py | {
"start": 9352,
"end": 13438
} | class ____:
def base_example(self):
attrs = dict(
**EXAMPLE_BASE_INFO,
# Example with complex requirement definition
python_requires=">=3.8",
install_requires="""
packaging==23.2
more-itertools==8.8.0; extra == "other"
jaraco.text==3.7.0
importlib-resources==5.10.2; python_version<"3.8"
importlib-metadata==6.0.0 ; python_version<"3.8"
colorama>=0.4.4; sys_platform == "win32"
""",
extras_require={
"testing": """
pytest >= 6
pytest-checkdocs >= 2.4
tomli ; \\
# Using stdlib when possible
python_version < "3.11"
ini2toml[lite]>=0.9
""",
"other": [],
},
)
# Generate a PKG-INFO file using setuptools
return Distribution(attrs)
def test_requires_dist(self, tmp_path):
dist = self.base_example()
pkg_info = _get_pkginfo(dist)
assert _valid_metadata(pkg_info)
# Ensure Requires-Dist is present
expected = [
'Metadata-Version:',
'Requires-Python: >=3.8',
'Provides-Extra: other',
'Provides-Extra: testing',
'Requires-Dist: tomli; python_version < "3.11" and extra == "testing"',
'Requires-Dist: more-itertools==8.8.0; extra == "other"',
'Requires-Dist: ini2toml[lite]>=0.9; extra == "testing"',
]
for line in expected:
assert line in pkg_info
HERE = Path(__file__).parent
EXAMPLES_FILE = HERE / "config/setupcfg_examples.txt"
@pytest.fixture(params=[None, *urls_from_file(EXAMPLES_FILE)])
def dist(self, request, monkeypatch, tmp_path):
"""Example of distribution with arbitrary configuration"""
monkeypatch.chdir(tmp_path)
monkeypatch.setattr(expand, "read_attr", Mock(return_value="0.42"))
monkeypatch.setattr(expand, "read_files", Mock(return_value="hello world"))
monkeypatch.setattr(
Distribution, "_finalize_license_files", Mock(return_value=None)
)
if request.param is None:
yield self.base_example()
else:
# Real-world usage
config = retrieve_file(request.param)
yield setupcfg.apply_configuration(Distribution({}), config)
@pytest.mark.uses_network
def test_equivalent_output(self, tmp_path, dist):
"""Ensure output from setuptools is equivalent to the one from `pypa/wheel`"""
# Generate a METADATA file using pypa/wheel for comparison
wheel_metadata = importlib.import_module("wheel.metadata")
pkginfo_to_metadata = getattr(wheel_metadata, "pkginfo_to_metadata", None)
if pkginfo_to_metadata is None: # pragma: nocover
pytest.xfail(
"wheel.metadata.pkginfo_to_metadata is undefined, "
"(this is likely to be caused by API changes in pypa/wheel"
)
# Generate an simplified "egg-info" dir for pypa/wheel to convert
pkg_info = _get_pkginfo(dist)
egg_info_dir = tmp_path / "pkg.egg-info"
egg_info_dir.mkdir(parents=True)
(egg_info_dir / "PKG-INFO").write_text(pkg_info, encoding="utf-8")
write_requirements(egg_info(dist), egg_info_dir, egg_info_dir / "requires.txt")
# Get pypa/wheel generated METADATA but normalize requirements formatting
metadata_msg = pkginfo_to_metadata(egg_info_dir, egg_info_dir / "PKG-INFO")
metadata_str = _normalize_metadata(metadata_msg)
pkg_info_msg = message_from_string(pkg_info)
pkg_info_str = _normalize_metadata(pkg_info_msg)
# Compare setuptools PKG-INFO x pypa/wheel METADATA
assert metadata_str == pkg_info_str
# Make sure it parses/serializes well in pypa/wheel
_assert_roundtrip_message(pkg_info)
| TestParityWithMetadataFromPyPaWheel |
python | readthedocs__readthedocs.org | readthedocs/core/mixins.py | {
"start": 2883,
"end": 3396
} | class ____(DeleteView):
"""Delete view that shows a message after queuing an object for deletion."""
success_message = None
def post(self, request, *args, **kwargs):
self.object = self.get_object()
delete_object.delay(
model_name=self.object._meta.label,
pk=self.object.pk,
user_id=request.user.pk,
)
messages.success(request, self.success_message)
return HttpResponseRedirect(self.get_success_url())
| AsyncDeleteViewWithMessage |
python | getsentry__sentry | src/sentry/snuba/trace.py | {
"start": 1715,
"end": 1873
} | class ____(TypedDict):
description: str
event_id: str
event_type: str
project_id: int
project_slug: str
transaction: str
| SerializedEvent |
python | joke2k__faker | faker/providers/phone_number/fr_FR/__init__.py | {
"start": 49,
"end": 4872
} | class ____(PhoneNumberProvider):
formats = (
"+33 (0){{area_code_with_separator}} ## ## ##",
"+33 {{area_code_with_separator}} ## ## ##",
"0{{area_code_without_separator}}######",
"0{{area_code_with_separator}} ## ## ##",
)
# https://fr.wikipedia.org/wiki/Liste_des_indicatifs_t%C3%A9l%C3%A9phoniques_en_France#Liste_des_indicatifs_d%C3%A9partementaux_fran%C3%A7ais_class%C3%A9s_par_indicatif
area_codes = (
# landlines
"130",
"134",
"139",
"140",
"141",
"142",
"143",
"144",
"145",
"146",
"147",
"148",
"149",
"153",
"155",
"156",
"158",
"160",
"164",
"169",
"170",
"172",
"173",
"174",
"175",
"176",
"177",
"178",
"179",
"180",
"181",
"182",
"183",
"188",
"214",
"218",
"219",
"221",
"222",
"223",
"228",
"229",
"230",
"231",
"232",
"233",
"234",
"235",
"236",
"237",
"238",
"240",
"241",
"243",
"244",
"245",
"246",
"247",
"248",
"249",
"250",
"251",
"252",
"253",
"254",
"255",
"256",
"257",
"258",
"261",
"262",
"269",
"272",
"276",
"277",
"278",
"279",
"285",
"290",
"296",
"297",
"298",
"299",
"310",
"320",
"321",
"322",
"323",
"324",
"325",
"326",
"327",
"328",
"329",
"339",
"344",
"345",
"351",
"352",
"353",
"354",
"355",
"356",
"357",
"358",
"359",
"360",
"361",
"362",
"363",
"364",
"365",
"366",
"367",
"368",
"369",
"370",
"371",
"372",
"373",
"375",
"376",
"379",
"380",
"381",
"382",
"383",
"384",
"385",
"386",
"387",
"388",
"389",
"390",
"411",
"413",
"415",
"420",
"422",
"423",
"426",
"427",
"430",
"432",
"434",
"437",
"438",
"442",
"443",
"444",
"449",
"450",
"456",
"457",
"458",
"463",
"465",
"466",
"467",
"468",
"469",
"470",
"471",
"472",
"473",
"474",
"475",
"476",
"477",
"478",
"479",
"480",
"481",
"482",
"483",
"484",
"485",
"486",
"487",
"488",
"489",
"490",
"491",
"492",
"493",
"494",
"495",
"497",
"498",
"499",
"508",
"516",
"517",
"518",
"519",
"524",
"531",
"532",
"533",
"534",
"535",
"536",
"540",
"545",
"546",
"547",
"549",
"553",
"554",
"555",
"556",
"557",
"558",
"559",
"561",
"562",
"563",
"564",
"565",
"567",
"579",
"581",
"582",
"586",
"587",
"590",
"594",
"596",
# mobile numbers
"60#",
"61#",
"62#",
"630",
"631",
"632",
"633",
"634",
"635",
"636",
"637",
"638",
"64#",
"65#",
"66#",
"67#",
"68#",
"695",
"698",
"699",
"73#",
"74#",
"75#",
"76#",
"77#",
"78#",
"79#",
# special numbers
"80#",
)
def area_code_without_separator(self) -> str:
return self.numerify(self.random_element(self.area_codes))
def area_code_with_separator(self) -> str:
area_code: str = self.random_element(self.area_codes)
return self.numerify(f"{area_code[0]} {area_code[1:]}")
def phone_number(self) -> str:
pattern: str = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| Provider |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 144653,
"end": 148565
} | class ____(multi_rv_generic):
r"""An Orthogonal matrix (O(N)) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
Alternatively, the object may be called (as a function) to fix the `dim`
parameter, returning a "frozen" ortho_group random variable:
>>> rv = ortho_group(5)
>>> # Frozen object with the same methods but holding the
>>> # dimension parameter fixed.
See Also
--------
special_ortho_group
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen O(N) distribution.
See `ortho_group_frozen` for more information.
"""
return ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim < 0 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar nonnegative integer.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
dim = self._process_parameters(dim)
size = (size,) if size > 1 else ()
z = random_state.normal(size=size + (dim, dim))
q, r = np.linalg.qr(z)
# The last two dimensions are the rows and columns of R matrices.
# Extract the diagonals. Note that this eliminates a dimension.
d = r.diagonal(offset=0, axis1=-2, axis2=-1)
# Add back a dimension for proper broadcasting: we're dividing
# each row of each R matrix by the diagonal of the R matrix.
q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly
return q
ortho_group = ortho_group_gen()
| ortho_group_gen |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver2.py | {
"start": 249,
"end": 472
} | class ____(Iterator[_T], Protocol):
pass
def decorator1(func: Callable[..., Iterator[_T]]) -> Callable[..., ProtoA[_T]]: ...
@decorator1
def func1() -> Iterator[str]:
yield ""
a = func1()
b: ProtoA[str] = a
| ProtoA |
python | faif__python-patterns | patterns/structural/bridge.py | {
"start": 216,
"end": 386
} | class ____:
def draw_circle(self, x: int, y: int, radius: float) -> None:
print(f"API1.circle at {x}:{y} radius {radius}")
# ConcreteImplementor 2/2
| DrawingAPI1 |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_alloy_db.py | {
"start": 1735,
"end": 2048
} | class ____:
def test_class_attributes(self):
assert AlloyDBClusterLink.key == EXPECTED_ALLOY_DB_CLUSTER_LINK_KEY
assert AlloyDBClusterLink.name == EXPECTED_ALLOY_DB_CLUSTER_LINK_NAME
assert AlloyDBClusterLink.format_str == EXPECTED_ALLOY_DB_CLUSTER_LINK_FORMAT_STR
| TestAlloyDBClusterLink |
python | kubernetes-client__python | kubernetes/base/config/kube_config.py | {
"start": 7009,
"end": 23659
} | class ____(object):
def __init__(self, config_dict, active_context=None,
get_google_credentials=None,
config_base_path="",
config_persister=None,
temp_file_path=None):
if config_dict is None:
raise ConfigException(
'Invalid kube-config. '
'Expected config_dict to not be None.')
elif isinstance(config_dict, ConfigNode):
self._config = config_dict
else:
self._config = ConfigNode('kube-config', config_dict)
self._current_context = None
self._user = None
self._cluster = None
self.set_active_context(active_context)
self._config_base_path = config_base_path
self._config_persister = config_persister
self._temp_file_path = temp_file_path
def _refresh_credentials_with_cmd_path():
config = self._user['auth-provider']['config']
cmd = config['cmd-path']
if len(cmd) == 0:
raise ConfigException(
'missing access token cmd '
'(cmd-path is an empty string in your kubeconfig file)')
if 'scopes' in config and config['scopes'] != "":
raise ConfigException(
'scopes can only be used '
'when kubectl is using a gcp service account key')
args = []
if 'cmd-args' in config:
args = config['cmd-args'].split()
else:
fields = config['cmd-path'].split()
cmd = fields[0]
args = fields[1:]
commandTokenSource = CommandTokenSource(
cmd, args,
config.safe_get('token-key'),
config.safe_get('expiry-key'))
return commandTokenSource.token()
def _refresh_credentials():
# Refresh credentials using cmd-path
if ('auth-provider' in self._user and
'config' in self._user['auth-provider'] and
'cmd-path' in self._user['auth-provider']['config']):
return _refresh_credentials_with_cmd_path()
# Make the Google auth block optional.
if google_auth_available:
credentials, project_id = google.auth.default(scopes=[
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/userinfo.email'
])
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials
else:
return None
if get_google_credentials:
self._get_google_credentials = get_google_credentials
else:
self._get_google_credentials = _refresh_credentials
def set_active_context(self, context_name=None):
if context_name is None:
context_name = self._config['current-context']
self._current_context = self._config['contexts'].get_with_name(
context_name)
if (self._current_context['context'].safe_get('user') and
self._config.safe_get('users')):
user = self._config['users'].get_with_name(
self._current_context['context']['user'], safe=True)
if user:
self._user = user['user']
else:
self._user = None
else:
self._user = None
self._cluster = self._config['clusters'].get_with_name(
self._current_context['context']['cluster'])['cluster']
def _load_authentication(self):
"""Read authentication from kube-config user section if exists.
This function goes through various authentication methods in user
section of kube-config and stops if it finds a valid authentication
method. The order of authentication methods is:
1. auth-provider (gcp, azure, oidc)
2. token field (point to a token file)
3. exec provided plugin
4. username/password
"""
if not self._user:
return
if self._load_auth_provider_token():
return
if self._load_user_token():
return
if self._load_from_exec_plugin():
return
self._load_user_pass_token()
def _load_auth_provider_token(self):
if 'auth-provider' not in self._user:
return
provider = self._user['auth-provider']
if 'name' not in provider:
return
if provider['name'] == 'gcp':
return self._load_gcp_token(provider)
if provider['name'] == 'azure':
return self._load_azure_token(provider)
if provider['name'] == 'oidc':
return self._load_oid_token(provider)
def _azure_is_expired(self, provider):
expires_on = provider['config']['expires-on']
if expires_on.isdigit():
return int(expires_on) < time.time()
else:
exp_time = time.strptime(expires_on, '%Y-%m-%d %H:%M:%S.%f')
return exp_time < time.gmtime()
def _load_azure_token(self, provider):
if 'config' not in provider:
return
if 'access-token' not in provider['config']:
return
if 'expires-on' in provider['config']:
if self._azure_is_expired(provider):
self._refresh_azure_token(provider['config'])
self.token = 'Bearer %s' % provider['config']['access-token']
return self.token
def _refresh_azure_token(self, config):
if 'adal' not in globals():
raise ImportError('refresh token error, adal library not imported')
tenant = config['tenant-id']
authority = 'https://login.microsoftonline.com/{}'.format(tenant)
context = adal.AuthenticationContext(
authority, validate_authority=True, api_version='1.0'
)
refresh_token = config['refresh-token']
client_id = config['client-id']
apiserver_id = '00000002-0000-0000-c000-000000000000'
try:
apiserver_id = config['apiserver-id']
except ConfigException:
# We've already set a default above
pass
token_response = context.acquire_token_with_refresh_token(
refresh_token, client_id, apiserver_id)
provider = self._user['auth-provider']['config']
provider.value['access-token'] = token_response['accessToken']
provider.value['expires-on'] = token_response['expiresOn']
if self._config_persister:
self._config_persister()
def _load_gcp_token(self, provider):
if (('config' not in provider) or
('access-token' not in provider['config']) or
('expiry' in provider['config'] and
_is_expired(provider['config']['expiry']))):
# token is not available or expired, refresh it
self._refresh_gcp_token()
self.token = "Bearer %s" % provider['config']['access-token']
if 'expiry' in provider['config']:
self.expiry = parse_rfc3339(provider['config']['expiry'])
return self.token
def _refresh_gcp_token(self):
if 'config' not in self._user['auth-provider']:
self._user['auth-provider'].value['config'] = {}
provider = self._user['auth-provider']['config']
credentials = self._get_google_credentials()
provider.value['access-token'] = credentials.token
provider.value['expiry'] = format_rfc3339(credentials.expiry)
if self._config_persister:
self._config_persister()
def _load_oid_token(self, provider):
if 'config' not in provider:
return
reserved_characters = frozenset(["=", "+", "/"])
token = provider['config']['id-token']
if any(char in token for char in reserved_characters):
# Invalid jwt, as it contains url-unsafe chars
return
parts = token.split('.')
if len(parts) != 3: # Not a valid JWT
return
padding = (4 - len(parts[1]) % 4) * '='
if len(padding) == 3:
# According to spec, 3 padding characters cannot occur
# in a valid jwt
# https://tools.ietf.org/html/rfc7515#appendix-C
return
if PY3:
jwt_attributes = json.loads(
base64.urlsafe_b64decode(parts[1] + padding).decode('utf-8')
)
else:
jwt_attributes = json.loads(
base64.b64decode(parts[1] + padding)
)
expire = jwt_attributes.get('exp')
if ((expire is not None) and
(_is_expired(datetime.datetime.fromtimestamp(expire,
tz=UTC)))):
self._refresh_oidc(provider)
if self._config_persister:
self._config_persister()
self.token = "Bearer %s" % provider['config']['id-token']
return self.token
def _refresh_oidc(self, provider):
config = Configuration()
if 'idp-certificate-authority-data' in provider['config']:
ca_cert = tempfile.NamedTemporaryFile(delete=True)
if PY3:
cert = base64.b64decode(
provider['config']['idp-certificate-authority-data']
).decode('utf-8')
else:
cert = base64.b64decode(
provider['config']['idp-certificate-authority-data'] + "=="
)
with open(ca_cert.name, 'w') as fh:
fh.write(cert)
config.ssl_ca_cert = ca_cert.name
elif 'idp-certificate-authority' in provider['config']:
config.ssl_ca_cert = provider['config']['idp-certificate-authority']
else:
config.verify_ssl = False
client = ApiClient(configuration=config)
response = client.request(
method="GET",
url="%s/.well-known/openid-configuration"
% provider['config']['idp-issuer-url']
)
if response.status != 200:
return
response = json.loads(response.data)
request = OAuth2Session(
client_id=provider['config']['client-id'],
token=provider['config']['refresh-token'],
auto_refresh_kwargs={
'client_id': provider['config']['client-id'],
'client_secret': provider['config']['client-secret']
},
auto_refresh_url=response['token_endpoint']
)
try:
refresh = request.refresh_token(
token_url=response['token_endpoint'],
refresh_token=provider['config']['refresh-token'],
auth=(provider['config']['client-id'],
provider['config']['client-secret']),
verify=config.ssl_ca_cert if config.verify_ssl else None
)
except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError:
return
provider['config'].value['id-token'] = refresh['id_token']
provider['config'].value['refresh-token'] = refresh['refresh_token']
def _load_from_exec_plugin(self):
if 'exec' not in self._user:
return
try:
base_path = self._get_base_path(self._cluster.path)
status = ExecProvider(self._user['exec'], base_path, self._cluster).run()
if 'token' in status:
self.token = "Bearer %s" % status['token']
elif 'clientCertificateData' in status:
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats
# Plugin has provided certificates instead of a token.
if 'clientKeyData' not in status:
logging.error('exec: missing clientKeyData field in '
'plugin output')
return None
self.cert_file = FileOrData(
status, None,
data_key_name='clientCertificateData',
file_base_path=base_path,
base64_file_content=False,
temp_file_path=self._temp_file_path).as_file()
self.key_file = FileOrData(
status, None,
data_key_name='clientKeyData',
file_base_path=base_path,
base64_file_content=False,
temp_file_path=self._temp_file_path).as_file()
else:
logging.error('exec: missing token or clientCertificateData '
'field in plugin output')
return None
if 'expirationTimestamp' in status:
self.expiry = parse_rfc3339(status['expirationTimestamp'])
return True
except Exception as e:
logging.error(str(e))
def _load_user_token(self):
base_path = self._get_base_path(self._user.path)
token = FileOrData(
self._user, 'tokenFile', 'token',
file_base_path=base_path,
base64_file_content=False,
temp_file_path=self._temp_file_path).as_data()
if token:
self.token = "Bearer %s" % token
return True
def _load_user_pass_token(self):
if 'username' in self._user and 'password' in self._user:
self.token = urllib3.util.make_headers(
basic_auth=(self._user['username'] + ':' +
self._user['password'])).get('authorization')
return True
def _get_base_path(self, config_path):
if self._config_base_path is not None:
return self._config_base_path
if config_path is not None:
return os.path.abspath(os.path.dirname(config_path))
return ""
def _load_cluster_info(self):
if 'server' in self._cluster:
self.host = self._cluster['server'].rstrip('/')
if self.host.startswith("https"):
base_path = self._get_base_path(self._cluster.path)
self.ssl_ca_cert = FileOrData(
self._cluster, 'certificate-authority',
file_base_path=base_path,
temp_file_path=self._temp_file_path).as_file()
if 'cert_file' not in self.__dict__:
# cert_file could have been provided by
# _load_from_exec_plugin; only load from the _user
# section if we need it.
self.cert_file = FileOrData(
self._user, 'client-certificate',
file_base_path=base_path,
temp_file_path=self._temp_file_path).as_file()
self.key_file = FileOrData(
self._user, 'client-key',
file_base_path=base_path,
temp_file_path=self._temp_file_path).as_file()
if 'insecure-skip-tls-verify' in self._cluster:
self.verify_ssl = not self._cluster['insecure-skip-tls-verify']
if 'tls-server-name' in self._cluster:
self.tls_server_name = self._cluster['tls-server-name']
def _set_config(self, client_configuration):
if 'token' in self.__dict__:
client_configuration.api_key['authorization'] = self.token
def _refresh_api_key(client_configuration):
if ('expiry' in self.__dict__ and _is_expired(self.expiry)):
self._load_authentication()
self._set_config(client_configuration)
client_configuration.refresh_api_key_hook = _refresh_api_key
# copy these keys directly from self to configuration object
keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl','tls_server_name']
for key in keys:
if key in self.__dict__:
setattr(client_configuration, key, getattr(self, key))
def load_and_set(self, client_configuration):
self._load_authentication()
self._load_cluster_info()
self._set_config(client_configuration)
def list_contexts(self):
return [context.value for context in self._config['contexts']]
@property
def current_context(self):
return self._current_context.value
| KubeConfigLoader |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 4610,
"end": 5635
} | class ____(greentest.TestCase):
link_method = None
def link(self, p, listener=None):
getattr(p, self.link_method)(listener)
def set_links(self, p):
event = AsyncResult()
self.link(p, event)
queue = Queue(1)
self.link(p, queue.put)
callback_flag = ['initial']
self.link(p, lambda *args: callback_flag.remove('initial'))
for _ in range(10):
self.link(p, AsyncResult())
self.link(p, Queue(1).put)
return event, queue, callback_flag
def set_links_timeout(self, link):
# stuff that won't be touched
event = AsyncResult()
link(event)
queue = Channel()
link(queue.put)
return event, queue
def check_timed_out(self, event, queue):
got = with_timeout(DELAY, event.get, timeout_value=X)
self.assertIs(got, X)
got = with_timeout(DELAY, queue.get, timeout_value=X)
self.assertIs(got, X)
def return25():
return 25
| LinksTestCase |
python | coleifer__peewee | tests/libs/mock.py | {
"start": 61447,
"end": 62047
} | class ____(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
| MagicProxy |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v2.py | {
"start": 7469,
"end": 9665
} | class ____(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError('Expected float or integer dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
'minval': self.minval,
'maxval': self.maxval,
'seed': self.seed
}
| RandomUniform |
python | google__flatbuffers | tests/MyGame/Example/NestedUnion/NestedUnionTest.py | {
"start": 3365,
"end": 5390
} | class ____(object):
# NestedUnionTestT
def __init__(
self,
name = None,
dataType = 0,
data = None,
id = 0,
):
self.name = name # type: Optional[str]
self.dataType = dataType # type: int
self.data = data # type: Union[None, 'MyGame.Example.NestedUnion.Vec3.Vec3T', 'MyGame.Example.NestedUnion.TestSimpleTableWithEnum.TestSimpleTableWithEnumT']
self.id = id # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
nestedUnionTest = NestedUnionTest()
nestedUnionTest.Init(buf, pos)
return cls.InitFromObj(nestedUnionTest)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, nestedUnionTest):
x = NestedUnionTestT()
x._UnPack(nestedUnionTest)
return x
# NestedUnionTestT
def _UnPack(self, nestedUnionTest):
if nestedUnionTest is None:
return
self.name = nestedUnionTest.Name()
if self.name is not None:
self.name = self.name.decode('utf-8')
self.dataType = nestedUnionTest.DataType()
self.data = MyGame.Example.NestedUnion.Any.AnyCreator(self.dataType, nestedUnionTest.Data())
self.id = nestedUnionTest.Id()
# NestedUnionTestT
def Pack(self, builder):
if self.name is not None:
name = builder.CreateString(self.name)
if self.data is not None:
data = self.data.Pack(builder)
NestedUnionTestStart(builder)
if self.name is not None:
NestedUnionTestAddName(builder, name)
NestedUnionTestAddDataType(builder, self.dataType)
if self.data is not None:
NestedUnionTestAddData(builder, data)
NestedUnionTestAddId(builder, self.id)
nestedUnionTest = NestedUnionTestEnd(builder)
return nestedUnionTest
| NestedUnionTestT |
python | huggingface__transformers | src/transformers/models/hiera/modeling_hiera.py | {
"start": 7525,
"end": 11711
} | class ____(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config, is_mae: bool = False):
super().__init__()
# Support any number of spatial dimensions
self.spatial_dims = len(config.patch_size)
if self.spatial_dims != 2:
raise ValueError(f"The number of dimensions of the input image should be 2, but got {self.spatial_dims}.")
self.num_channels = config.num_channels
self.image_size = config.image_size[-2:]
self.tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)]
self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, config.masked_unit_size)]
self.mask_ratio = config.mask_ratio
self.is_mae = is_mae
self.projection = nn.Conv2d(
self.num_channels,
config.embed_dim,
kernel_size=config.patch_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def masked_conv(
self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor] = None
) -> torch.Tensor:
"""Zero-out the masked regions of the input before conv.
Prevents leakage of masked regions when using overlapping kernels.
"""
if bool_masked_pos is None:
return self.projection(pixel_values)
target_size = pixel_values.shape[2:]
# Reshape bool_masked_pos to (batch_size, 1, mask_unit_height, mask_unit_width)
bool_masked_pos = bool_masked_pos.view(pixel_values.shape[0], 1, *self.mask_spatial_shape)
bool_masked_pos = nn.functional.interpolate(bool_masked_pos.float(), size=target_size)
return self.projection(pixel_values * bool_masked_pos)
def random_masking(
self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor] = None
) -> tuple[torch.BoolTensor, torch.LongTensor]:
"""
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`)
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
"""
batch_size = pixel_values.shape[0]
# Tokens selected for masking at mask unit level
num_windows = math.prod(self.mask_spatial_shape)
len_keep = int(num_windows * (1 - self.mask_ratio))
if noise is None:
noise = torch.rand(batch_size, num_windows, device=pixel_values.device)
# Sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1)
# ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1).to(pixel_values.device)
# Generate the binary bool_masked_pos: 1 is *keep*, 0 is *remove*
# Note this is opposite to original MAE
bool_masked_pos = torch.zeros([batch_size, num_windows], device=pixel_values.device)
bool_masked_pos[:, :len_keep] = 1
# Unshuffle to get the binary bool_masked_pos
bool_masked_pos = torch.gather(bool_masked_pos, dim=1, index=ids_restore).bool()
return bool_masked_pos, ids_restore
def forward(
self,
pixel_values: torch.FloatTensor,
noise: Optional[torch.FloatTensor] = None,
) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]:
(bool_masked_pos, ids_restore) = (
self.random_masking(pixel_values, noise=noise) if self.is_mae else (None, None)
)
embeddings = self.masked_conv(pixel_values, bool_masked_pos)
embeddings = embeddings.flatten(2).transpose(2, 1)
return embeddings, bool_masked_pos, ids_restore
| HieraPatchEmbeddings |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 273130,
"end": 273222
} | class ____(Protocol):
def __call__(self, obj: Any, /) -> str: ...
| _SchemaForObjectCallable |
python | matplotlib__matplotlib | lib/matplotlib/backends/registry.py | {
"start": 245,
"end": 15480
} | class ____:
"""
Registry of backends available within Matplotlib.
This is the single source of truth for available backends.
All use of ``BackendRegistry`` should be via the singleton instance
``backend_registry`` which can be imported from ``matplotlib.backends``.
Each backend has a name, a module name containing the backend code, and an
optional GUI framework that must be running if the backend is interactive.
There are three sources of backends: built-in (source code is within the
Matplotlib repository), explicit ``module://some.backend`` syntax (backend is
obtained by loading the module), or via an entry point (self-registering
backend in an external package).
.. versionadded:: 3.9
"""
# Mapping of built-in backend name to GUI framework, or "headless" for no
# GUI framework. Built-in backends are those which are included in the
# Matplotlib repo. A backend with name 'name' is located in the module
# f"matplotlib.backends.backend_{name.lower()}"
_BUILTIN_BACKEND_TO_GUI_FRAMEWORK = {
"gtk3agg": "gtk3",
"gtk3cairo": "gtk3",
"gtk4agg": "gtk4",
"gtk4cairo": "gtk4",
"macosx": "macosx",
"nbagg": "nbagg",
"notebook": "nbagg",
"qtagg": "qt",
"qtcairo": "qt",
"qt5agg": "qt5",
"qt5cairo": "qt5",
"tkagg": "tk",
"tkcairo": "tk",
"webagg": "webagg",
"wx": "wx",
"wxagg": "wx",
"wxcairo": "wx",
"agg": "headless",
"cairo": "headless",
"pdf": "headless",
"pgf": "headless",
"ps": "headless",
"svg": "headless",
"template": "headless",
}
# Reverse mapping of gui framework to preferred built-in backend.
_GUI_FRAMEWORK_TO_BACKEND = {
"gtk3": "gtk3agg",
"gtk4": "gtk4agg",
"headless": "agg",
"macosx": "macosx",
"qt": "qtagg",
"qt5": "qt5agg",
"qt6": "qtagg",
"tk": "tkagg",
"wx": "wxagg",
}
def __init__(self):
# Only load entry points when first needed.
self._loaded_entry_points = False
# Mapping of non-built-in backend to GUI framework, added dynamically from
# entry points and from matplotlib.use("module://some.backend") format.
# New entries have an "unknown" GUI framework that is determined when first
# needed by calling _get_gui_framework_by_loading.
self._backend_to_gui_framework = {}
# Mapping of backend name to module name, where different from
# f"matplotlib.backends.backend_{backend_name.lower()}". These are either
# hardcoded for backward compatibility, or loaded from entry points or
# "module://some.backend" syntax.
self._name_to_module = {
"notebook": "nbagg",
}
def _backend_module_name(self, backend):
if backend.startswith("module://"):
return backend[9:]
# Return name of module containing the specified backend.
# Does not check if the backend is valid, use is_valid_backend for that.
backend = backend.lower()
# Check if have specific name to module mapping.
backend = self._name_to_module.get(backend, backend)
return (backend[9:] if backend.startswith("module://")
else f"matplotlib.backends.backend_{backend}")
def _clear(self):
# Clear all dynamically-added data, used for testing only.
self.__init__()
def _ensure_entry_points_loaded(self):
# Load entry points, if they have not already been loaded.
if not self._loaded_entry_points:
entries = self._read_entry_points()
self._validate_and_store_entry_points(entries)
self._loaded_entry_points = True
def _get_gui_framework_by_loading(self, backend):
# Determine GUI framework for a backend by loading its module and reading the
# FigureCanvas.required_interactive_framework attribute.
# Returns "headless" if there is no GUI framework.
module = self.load_backend_module(backend)
canvas_class = module.FigureCanvas
return canvas_class.required_interactive_framework or "headless"
def _read_entry_points(self):
# Read entry points of modules that self-advertise as Matplotlib backends.
# Expects entry points like this one from matplotlib-inline (in pyproject.toml
# format):
# [project.entry-points."matplotlib.backend"]
# inline = "matplotlib_inline.backend_inline"
import importlib.metadata as im
entry_points = im.entry_points(group="matplotlib.backend")
entries = [(entry.name, entry.value) for entry in entry_points]
# For backward compatibility, if matplotlib-inline and/or ipympl are installed
# but too old to include entry points, create them. Do not import ipympl
# directly as this calls matplotlib.use() whilst in this function.
def backward_compatible_entry_points(
entries, module_name, threshold_version, names, target):
from matplotlib import _parse_to_version_info
try:
module_version = im.version(module_name)
if _parse_to_version_info(module_version) < threshold_version:
for name in names:
entries.append((name, target))
except im.PackageNotFoundError:
pass
names = [entry[0] for entry in entries]
if "inline" not in names:
backward_compatible_entry_points(
entries, "matplotlib_inline", (0, 1, 7), ["inline"],
"matplotlib_inline.backend_inline")
if "ipympl" not in names:
backward_compatible_entry_points(
entries, "ipympl", (0, 9, 4), ["ipympl", "widget"],
"ipympl.backend_nbagg")
return entries
def _validate_and_store_entry_points(self, entries):
# Validate and store entry points so that they can be used via matplotlib.use()
# in the normal manner. Entry point names cannot be of module:// format, cannot
# shadow a built-in backend name, and there cannot be multiple entry points
# with the same name but different modules. Multiple entry points with the same
# name and value are permitted (it can sometimes happen outside of our control,
# see https://github.com/matplotlib/matplotlib/issues/28367).
for name, module in set(entries):
name = name.lower()
if name.startswith("module://"):
raise RuntimeError(
f"Entry point name '{name}' cannot start with 'module://'")
if name in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK:
raise RuntimeError(f"Entry point name '{name}' is a built-in backend")
if name in self._backend_to_gui_framework:
raise RuntimeError(f"Entry point name '{name}' duplicated")
self._name_to_module[name] = "module://" + module
# Do not yet know backend GUI framework, determine it only when necessary.
self._backend_to_gui_framework[name] = "unknown"
def backend_for_gui_framework(self, framework):
"""
Return the name of the backend corresponding to the specified GUI framework.
Parameters
----------
framework : str
GUI framework such as "qt".
Returns
-------
str or None
Backend name or None if GUI framework not recognised.
"""
return self._GUI_FRAMEWORK_TO_BACKEND.get(framework.lower())
def is_valid_backend(self, backend):
"""
Return True if the backend name is valid, False otherwise.
A backend name is valid if it is one of the built-in backends or has been
dynamically added via an entry point. Those beginning with ``module://`` are
always considered valid and are added to the current list of all backends
within this function.
Even if a name is valid, it may not be importable or usable. This can only be
determined by loading and using the backend module.
Parameters
----------
backend : str
Name of backend.
Returns
-------
bool
True if backend is valid, False otherwise.
"""
if not backend.startswith("module://"):
backend = backend.lower()
# For backward compatibility, convert ipympl and matplotlib-inline long
# module:// names to their shortened forms.
backwards_compat = {
"module://ipympl.backend_nbagg": "widget",
"module://matplotlib_inline.backend_inline": "inline",
}
backend = backwards_compat.get(backend, backend)
if (backend in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK or
backend in self._backend_to_gui_framework):
return True
if backend.startswith("module://"):
self._backend_to_gui_framework[backend] = "unknown"
return True
# Only load entry points if really need to and not already done so.
self._ensure_entry_points_loaded()
if backend in self._backend_to_gui_framework:
return True
return False
def list_all(self):
"""
Return list of all known backends.
These include built-in backends and those obtained at runtime either from entry
points or explicit ``module://some.backend`` syntax.
Entry points will be loaded if they haven't been already.
Returns
-------
list of str
Backend names.
"""
self._ensure_entry_points_loaded()
return [*self.list_builtin(), *self._backend_to_gui_framework]
def list_builtin(self, filter_=None):
"""
Return list of backends that are built into Matplotlib.
Parameters
----------
filter_ : `~.BackendFilter`, optional
Filter to apply to returned backends. For example, to return only
non-interactive backends use `.BackendFilter.NON_INTERACTIVE`.
Returns
-------
list of str
Backend names.
"""
if filter_ == BackendFilter.INTERACTIVE:
return [k for k, v in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.items()
if v != "headless"]
elif filter_ == BackendFilter.NON_INTERACTIVE:
return [k for k, v in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.items()
if v == "headless"]
return [*self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK]
def list_gui_frameworks(self):
"""
Return list of GUI frameworks used by Matplotlib backends.
Returns
-------
list of str
GUI framework names.
"""
return [k for k in self._GUI_FRAMEWORK_TO_BACKEND if k != "headless"]
def load_backend_module(self, backend):
"""
Load and return the module containing the specified backend.
Parameters
----------
backend : str
Name of backend to load.
Returns
-------
Module
Module containing backend.
"""
module_name = self._backend_module_name(backend)
return importlib.import_module(module_name)
def resolve_backend(self, backend):
"""
Return the backend and GUI framework for the specified backend name.
If the GUI framework is not yet known then it will be determined by loading the
backend module and checking the ``FigureCanvas.required_interactive_framework``
attribute.
This function only loads entry points if they have not already been loaded and
the backend is not built-in and not of ``module://some.backend`` format.
Parameters
----------
backend : str or None
Name of backend, or None to use the default backend.
Returns
-------
backend : str
The backend name.
framework : str or None
The GUI framework, which will be None for a backend that is non-interactive.
"""
if isinstance(backend, str):
if not backend.startswith("module://"):
backend = backend.lower()
else: # Might be _auto_backend_sentinel or None
# Use whatever is already running...
from matplotlib import get_backend
backend = get_backend()
# Is backend already known (built-in or dynamically loaded)?
gui = (self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.get(backend) or
self._backend_to_gui_framework.get(backend))
# Is backend "module://something"?
if gui is None and isinstance(backend, str) and backend.startswith("module://"):
gui = "unknown"
# Is backend a possible entry point?
if gui is None and not self._loaded_entry_points:
self._ensure_entry_points_loaded()
gui = self._backend_to_gui_framework.get(backend)
# Backend known but not its gui framework.
if gui == "unknown":
gui = self._get_gui_framework_by_loading(backend)
self._backend_to_gui_framework[backend] = gui
if gui is None:
raise RuntimeError(f"'{backend}' is not a recognised backend name")
return backend, gui if gui != "headless" else None
def resolve_gui_or_backend(self, gui_or_backend):
"""
Return the backend and GUI framework for the specified string that may be
either a GUI framework or a backend name, tested in that order.
This is for use with the IPython %matplotlib magic command which may be a GUI
framework such as ``%matplotlib qt`` or a backend name such as
``%matplotlib qtagg``.
This function only loads entry points if they have not already been loaded and
the backend is not built-in and not of ``module://some.backend`` format.
Parameters
----------
gui_or_backend : str or None
Name of GUI framework or backend, or None to use the default backend.
Returns
-------
backend : str
The backend name.
framework : str or None
The GUI framework, which will be None for a backend that is non-interactive.
"""
if not gui_or_backend.startswith("module://"):
gui_or_backend = gui_or_backend.lower()
# First check if it is a gui loop name.
backend = self.backend_for_gui_framework(gui_or_backend)
if backend is not None:
return backend, gui_or_backend if gui_or_backend != "headless" else None
# Then check if it is a backend name.
try:
return self.resolve_backend(gui_or_backend)
except Exception: # KeyError ?
raise RuntimeError(
f"'{gui_or_backend}' is not a recognised GUI loop or backend name")
# Singleton
backend_registry = BackendRegistry()
| BackendRegistry |
python | ApeWorX__ape | tests/functional/test_coverage.py | {
"start": 5358,
"end": 8720
} | class ____:
@pytest.fixture
def pytest_config(self, mocker):
return mocker.MagicMock()
@pytest.fixture
def config_wrapper(self, pytest_config):
return ConfigWrapper(pytest_config)
@pytest.fixture
def tracker(self, pytest_config, project):
return CoverageTracker(pytest_config, project=project)
def test_data(self, tracker):
assert tracker.data is not None
actual = tracker.data.project
expected = tracker.local_project
assert actual == expected
def test_cover(self, mocker, pytest_config, compilers, mock_compiler):
"""
Ensure coverage of a call works.
"""
filestem = "atest"
filename = f"{filestem}.__mock__"
fn_name = "_a_method"
# Set up the mock compiler.
mock_compiler.abi = [MethodABI(name=fn_name)]
mock_compiler.ast = {
"src": "0:112:0",
"name": filename,
"end_lineno": 7,
"lineno": 1,
"ast_type": "Module",
}
mock_compiler.pcmap = {"0": {"location": (1, 7, 1, 7)}}
mock_contract = mocker.MagicMock()
mock_contract.name = filename
mock_statement = mocker.MagicMock()
mock_statement.pcs = {20}
mock_statement.hit_count = 0
mock_function = mocker.MagicMock()
mock_function.name = fn_name
mock_function.statements = [mock_statement]
mock_contract.functions = [mock_function]
mock_contract.statements = [mock_statement]
def init_profile(source_cov, src):
source_cov.contracts = [mock_contract]
mock_compiler.init_coverage_profile.side_effect = init_profile
stmt = {"type": "dev: Cannot send ether to non-payable function", "pcs": [20]}
fn_name = "_a_method"
tb_data = {
"statements": [stmt],
"closure": {"name": fn_name, "full_name": f"{fn_name}()"},
"depth": 0,
}
with ape.Project.create_temporary_project() as tmp:
# Create a source file.
file = tmp.path / "contracts" / filename
file.parent.mkdir(exist_ok=True, parents=True)
file.write_text("testing", encoding="utf8")
# Ensure the TB refers to this source.
tb_data["source_path"] = f"{tmp.path}/contracts/{filename}"
call_tb = SourceTraceback.model_validate([tb_data])
try:
# Hack in our mock compiler.
_ = compilers.registered_compilers # Ensure cache exists.
compilers.__dict__["registered_compilers"][mock_compiler.ext] = mock_compiler
# Ensure our coverage tracker is using our new tmp project w/ the new src
# as well is set _after_ our new compiler plugin is added.
tracker = CoverageTracker(pytest_config, project=tmp)
tracker.cover(call_tb, contract=filestem, function=f"{fn_name}()")
assert mock_statement.hit_count > 0
finally:
if (
"registered_compilers" in compilers.__dict__
and mock_compiler.ext in compilers.__dict__["registered_compilers"]
):
del compilers.__dict__["registered_compilers"][mock_compiler.ext]
| TestCoverageTracker |
python | numba__numba | numba/tests/test_numpy_support.py | {
"start": 356,
"end": 5380
} | class ____(TestCase):
def test_number_types(self):
"""
Test from_dtype() and as_dtype() with the various scalar number types.
"""
f = numpy_support.from_dtype
def check(typechar, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typechar)
self.assertIs(f(dtype), numba_type)
self.assertIs(f(np.dtype('=' + typechar)), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('?', types.bool_)
check('f', types.float32)
check('f4', types.float32)
check('d', types.float64)
check('f8', types.float64)
check('F', types.complex64)
check('c8', types.complex64)
check('D', types.complex128)
check('c16', types.complex128)
check('O', types.pyobject)
check('b', types.int8)
check('i1', types.int8)
check('B', types.uint8)
check('u1', types.uint8)
check('h', types.int16)
check('i2', types.int16)
check('H', types.uint16)
check('u2', types.uint16)
check('i', types.int32)
check('i4', types.int32)
check('I', types.uint32)
check('u4', types.uint32)
check('q', types.int64)
check('Q', types.uint64)
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intp', 'uintp'):
self.assertIs(f(np.dtype(name)), getattr(types, name))
# Non-native alignments are unsupported (except for 1-byte types)
foreign_align = '>' if sys.byteorder == 'little' else '<'
for letter in 'hHiIlLqQfdFD':
self.assertRaises(NumbaNotImplementedError, f,
np.dtype(foreign_align + letter))
def test_string_types(self):
"""
Test from_dtype() and as_dtype() with the character string types.
"""
def check(typestring, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typestring)
self.assertEqual(numpy_support.from_dtype(dtype), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('S10', types.CharSeq(10))
check('a11', types.CharSeq(11))
check('U12', types.UnicodeCharSeq(12))
def check_datetime_types(self, letter, nb_class):
def check(dtype, numba_type, code):
tp = numpy_support.from_dtype(dtype)
self.assertEqual(tp, numba_type)
self.assertEqual(tp.unit_code, code)
self.assertEqual(numpy_support.as_dtype(numba_type), dtype)
self.assertEqual(numpy_support.as_dtype(tp), dtype)
# Unit-less ("generic") type
check(np.dtype(letter), nb_class(''), 14)
def test_datetime_types(self):
"""
Test from_dtype() and as_dtype() with the datetime types.
"""
self.check_datetime_types('M', types.NPDatetime)
def test_timedelta_types(self):
"""
Test from_dtype() and as_dtype() with the timedelta types.
"""
self.check_datetime_types('m', types.NPTimedelta)
def test_struct_types(self):
def check(dtype, fields, size, aligned):
tp = numpy_support.from_dtype(dtype)
self.assertIsInstance(tp, types.Record)
# Only check for dtype equality, as the Numba type may be interned
self.assertEqual(tp.dtype, dtype)
self.assertEqual(tp.fields, fields)
self.assertEqual(tp.size, size)
self.assertEqual(tp.aligned, aligned)
dtype = np.dtype([('a', np.int16), ('b', np.int32)])
check(dtype,
fields={'a': (types.int16, 0, None, None),
'b': (types.int32, 2, None, None)},
size=6, aligned=False)
dtype = np.dtype([('a', np.int16), ('b', np.int32)], align=True)
check(dtype,
fields={'a': (types.int16, 0, None, None),
'b': (types.int32, 4, None, None)},
size=8, aligned=True)
dtype = np.dtype([('m', np.int32), ('n', 'S5')])
check(dtype,
fields={'m': (types.int32, 0, None, None),
'n': (types.CharSeq(5), 4, None, None)},
size=9, aligned=False)
def test_enum_type(self):
def check(base_inst, enum_def, type_class):
np_dt = np.dtype(base_inst)
nb_ty = numpy_support.from_dtype(np_dt)
inst = type_class(enum_def, nb_ty)
recovered = numpy_support.as_dtype(inst)
self.assertEqual(np_dt, recovered)
dts = [np.float64, np.int32, np.complex128, np.bool_]
enums = [Shake, RequestError]
for dt, enum in product(dts, enums):
check(dt, enum, types.EnumMember)
for dt, enum in product(dts, enums):
check(dt, enum, types.IntEnumMember)
| TestFromDtype |
python | numba__numba | numba/tests/npyufunc/test_parallel_ufunc_issues.py | {
"start": 2556,
"end": 4387
} | class ____(unittest.TestCase):
_numba_parallel_test_ = False
@skip_if_freethreading
def test_gil_reacquire_deadlock(self):
"""
Testing similar issue to #1998 due to GIL reacquiring for Gufunc
"""
# make a ctypes callback that requires the GIL
proto = ctypes.CFUNCTYPE(None, ctypes.c_int32)
characters = 'abcdefghij'
def bar(x):
print(characters[x])
cbar = proto(bar)
# our unit under test
@guvectorize(['(int32, int32[:])'], "()->()",
target='parallel', nopython=True)
def foo(x, out):
print(x % 10) # this reacquires the GIL
cbar(x % 10) # this reacquires the GIL
out[0] = x * 2
# Numpy ufunc has a heuristic to determine whether to release the GIL
# during execution. Small input size (10) seems to not release the GIL.
# Large input size (1000) seems to release the GIL.
for nelem in [1, 10, 100, 1000]:
# inputs
a = np.arange(nelem, dtype=np.int32)
acopy = a.copy()
# run and capture stdout
with captured_stdout() as buf:
got = foo(a)
stdout = buf.getvalue()
buf.close()
# process outputs from print
got_output = sorted(map(lambda x: x.strip(), stdout.splitlines()))
# build expected output
expected_output = [str(x % 10) for x in range(nelem)]
expected_output += [characters[x % 10] for x in range(nelem)]
expected_output = sorted(expected_output)
# verify
self.assertEqual(got_output, expected_output)
np.testing.assert_equal(got, 2 * acopy)
if __name__ == '__main__':
unittest.main()
| TestParGUfuncIssues |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 3696,
"end": 3752
} | class ____:
pass
NoModule.__module__ = None
| NoModule |
python | getsentry__sentry | tests/sentry/api/test_utils.py | {
"start": 1036,
"end": 4048
} | class ____(unittest.TestCase):
def test_timeframe(self) -> None:
start, end = get_date_range_from_params({"timeframe": "14h"})
assert end - datetime.timedelta(hours=14) == start
start, end = get_date_range_from_params({"timeframe": "14d"})
assert end - datetime.timedelta(days=14) == start
start, end = get_date_range_from_params({"timeframe": "60m"})
assert end - datetime.timedelta(minutes=60) == start
start, end = get_date_range_from_params({"timeframe": "3600s"})
assert end - datetime.timedelta(seconds=3600) == start
start, end = get_date_range_from_params({"timeframe": "91d"})
assert end - datetime.timedelta(days=91) == start
start, end = get_date_range_from_params({"statsPeriod": "14h"})
assert end - datetime.timedelta(hours=14) == start
start, end = get_date_range_from_params({"statsPeriod": "14d"})
assert end - datetime.timedelta(days=14) == start
start, end = get_date_range_from_params({"statsPeriod": "60m"})
assert end - datetime.timedelta(minutes=60) == start
with pytest.raises(InvalidParams):
get_date_range_from_params({"timeframe": "9000000d"})
def test_date_range(self) -> None:
start, end = get_date_range_from_params({"start": "2018-11-01", "end": "2018-11-07"})
assert start == datetime.datetime(2018, 11, 1, tzinfo=datetime.UTC)
assert end == datetime.datetime(2018, 11, 7, tzinfo=datetime.UTC)
with pytest.raises(InvalidParams):
get_date_range_from_params(
{"start": "2018-11-01T00:00:00", "end": "2018-11-01T00:00:00"}
)
@freeze_time("2018-12-11 03:21:34")
def test_no_params(self) -> None:
start, end = get_date_range_from_params({})
assert start == timezone.now() - MAX_STATS_PERIOD
assert end == timezone.now()
@freeze_time("2018-12-11 03:21:34")
def test_no_params_optional(self) -> None:
start, end = get_date_range_from_params({}, optional=True)
assert start is None
assert end is None
@freeze_time("2018-12-11 03:21:34")
def test_relative_date_range(self) -> None:
start, end = get_date_range_from_params({"timeframeStart": "14d", "timeframeEnd": "7d"})
assert start == datetime.datetime(2018, 11, 27, 3, 21, 34, tzinfo=datetime.UTC)
assert end == datetime.datetime(2018, 12, 4, 3, 21, 34, tzinfo=datetime.UTC)
start, end = get_date_range_from_params({"statsPeriodStart": "14d", "statsPeriodEnd": "7d"})
assert start == datetime.datetime(2018, 11, 27, 3, 21, 34, tzinfo=datetime.UTC)
assert end == datetime.datetime(2018, 12, 4, 3, 21, 34, tzinfo=datetime.UTC)
@freeze_time("2018-12-11 03:21:34")
def test_relative_date_range_incomplete(self) -> None:
with pytest.raises(InvalidParams):
start, end = get_date_range_from_params({"timeframeStart": "14d"})
| GetDateRangeFromParamsTest |
python | numba__numba | numba/testing/main.py | {
"start": 23362,
"end": 24338
} | class ____(object):
"""
A minimal, picklable TestResult-alike object.
"""
__slots__ = (
'failures', 'errors', 'skipped', 'expectedFailures',
'unexpectedSuccesses', 'stream', 'shouldStop', 'testsRun',
'test_id', 'resource_info')
def fixup_case(self, case):
"""
Remove any unpicklable attributes from TestCase instance *case*.
"""
# Python 3.3 doesn't reset this one.
case._outcomeForDoCleanups = None
def __init__(self, original_result, test_id=None, resource_info=None):
for attr in self.__slots__:
setattr(self, attr, getattr(original_result, attr, None))
for case, _ in self.expectedFailures:
self.fixup_case(case)
for case, _ in self.errors:
self.fixup_case(case)
for case, _ in self.failures:
self.fixup_case(case)
self.test_id = test_id
self.resource_info = resource_info
| _MinimalResult |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 13566,
"end": 13659
} | class ____(OpcodeWithArg):
_FLAGS = HAS_NAME | HAS_ARGUMENT
__slots__ = ()
| STORE_ANNOTATION |
python | google__jax | jax/_src/core.py | {
"start": 59118,
"end": 59179
} | class ____(NamedTuple):
val: int
@dataclass(frozen=True)
| DBIdx |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/connections.py | {
"start": 2222,
"end": 2399
} | class ____(BaseModel):
"""Connection Collection serializer for responses."""
connections: Iterable[ConnectionResponse]
total_entries: int
| ConnectionCollectionResponse |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py | {
"start": 50762,
"end": 54298
} | class ____(GKEOperatorMixin, KubernetesDeleteJobOperator):
"""
Delete a Kubernetes job in the specified Google Kubernetes Engine cluster.
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``
.. seealso::
For more detail about Kubernetes Engine authentication have a look at the reference:
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#internal_ip
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDeleteJobOperator`
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster.
:param use_internal_ip: Use the internal IP address as the endpoint.
:param use_dns_endpoint: Use the DNS address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
set(GKEOperatorMixin.template_fields) | set(KubernetesDeleteJobOperator.template_fields)
)
def __init__(
self,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
use_dns_endpoint: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
self.use_dns_endpoint = use_dns_endpoint
self.impersonation_chain = impersonation_chain
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
# There is no need to manage the kube_config file, as it will be generated automatically.
# All Kubernetes parameters (except config_file) are also valid for the GKEDeleteJobOperator.
if self.config_file:
raise AirflowException("config_file is not an allowed parameter for the GKEDeleteJobOperator.")
| GKEDeleteJobOperator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/lambdas.py | {
"start": 1794,
"end": 4941
} | class ____(Options):
enable_tracking = True
track_closure_variables = True
track_on: Optional[object] = None
global_track_bound_values = True
track_bound_values = True
lambda_cache: Optional[_LambdaCacheType] = None
def lambda_stmt(
lmb: _StmtLambdaType,
enable_tracking: bool = True,
track_closure_variables: bool = True,
track_on: Optional[object] = None,
global_track_bound_values: bool = True,
track_bound_values: bool = True,
lambda_cache: Optional[_LambdaCacheType] = None,
) -> StatementLambdaElement:
"""Produce a SQL statement that is cached as a lambda.
The Python code object within the lambda is scanned for both Python
literals that will become bound parameters as well as closure variables
that refer to Core or ORM constructs that may vary. The lambda itself
will be invoked only once per particular set of constructs detected.
E.g.::
from sqlalchemy import lambda_stmt
stmt = lambda_stmt(lambda: table.select())
stmt += lambda s: s.where(table.c.id == 5)
result = connection.execute(stmt)
The object returned is an instance of :class:`_sql.StatementLambdaElement`.
.. versionadded:: 1.4
:param lmb: a Python function, typically a lambda, which takes no arguments
and returns a SQL expression construct
:param enable_tracking: when False, all scanning of the given lambda for
changes in closure variables or bound parameters is disabled. Use for
a lambda that produces the identical results in all cases with no
parameterization.
:param track_closure_variables: when False, changes in closure variables
within the lambda will not be scanned. Use for a lambda where the
state of its closure variables will never change the SQL structure
returned by the lambda.
:param track_bound_values: when False, bound parameter tracking will
be disabled for the given lambda. Use for a lambda that either does
not produce any bound values, or where the initial bound values never
change.
:param global_track_bound_values: when False, bound parameter tracking
will be disabled for the entire statement including additional links
added via the :meth:`_sql.StatementLambdaElement.add_criteria` method.
:param lambda_cache: a dictionary or other mapping-like object where
information about the lambda's Python code as well as the tracked closure
variables in the lambda itself will be stored. Defaults
to a global LRU cache. This cache is independent of the "compiled_cache"
used by the :class:`_engine.Connection` object.
.. seealso::
:ref:`engine_lambda_caching`
"""
return StatementLambdaElement(
lmb,
roles.StatementRole,
LambdaOptions(
enable_tracking=enable_tracking,
track_on=track_on,
track_closure_variables=track_closure_variables,
global_track_bound_values=global_track_bound_values,
track_bound_values=track_bound_values,
lambda_cache=lambda_cache,
),
)
| LambdaOptions |
python | miyuchina__mistletoe | test/test_block_token.py | {
"start": 893,
"end": 1771
} | class ____(TestToken):
def test_match(self):
lines = ['### heading 3\n']
arg = 'heading 3'
self._test_match(block_token.Heading, lines, arg, level=3)
def test_children_with_enclosing_hashes(self):
lines = ['# heading 3 ##### \n']
arg = 'heading 3'
self._test_match(block_token.Heading, lines, arg, level=1)
def test_not_heading(self):
lines = ['####### paragraph\n']
arg = '####### paragraph'
self._test_match(block_token.Paragraph, lines, arg)
def test_heading_in_paragraph(self):
lines = ['foo\n', '# heading\n', 'bar\n']
token1, token2, token3 = block_token.tokenize(lines)
self.assertIsInstance(token1, block_token.Paragraph)
self.assertIsInstance(token2, block_token.Heading)
self.assertIsInstance(token3, block_token.Paragraph)
| TestAtxHeading |
python | bokeh__bokeh | setup.py | {
"start": 6129,
"end": 6297
} | class ____(build): # type: ignore
def run(self) -> None:
check_tags()
build_or_install_bokehjs(self.distribution.packages)
super().run()
| Build |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/response.py | {
"start": 1490,
"end": 1622
} | class ____(TypedDict):
teamSlug: str
role: str | None
@extend_schema_serializer(exclude_fields=["role", "roleName"])
| _TeamRole |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 2227,
"end": 2302
} | class ____(A):
def get_memo(self, obj):
return super().get(obj)
| B |
python | getsentry__sentry | src/sentry/models/organization.py | {
"start": 3368,
"end": 5396
} | class ____(BaseManager["Organization"]):
def get_for_user_ids(self, user_ids: Collection[int]) -> BaseQuerySet[Organization]:
"""Returns the QuerySet of all organizations that a set of Users have access to."""
return self.filter(
status=OrganizationStatus.ACTIVE,
member_set__user_id__in=user_ids,
)
def get_for_team_ids(self, team_ids: Sequence[int]) -> BaseQuerySet[Organization]:
"""Returns the QuerySet of all organizations that a set of Teams have access to."""
from sentry.models.team import Team
return self.filter(
status=OrganizationStatus.ACTIVE,
id__in=Team.objects.filter(id__in=team_ids).values("organization"),
)
def get_for_user(self, user, scope=None, only_visible=True) -> list[Organization]:
"""
Returns a set of all organizations a user has access to.
"""
from sentry.models.organizationmember import OrganizationMember
if not user.is_authenticated:
return []
qs = OrganizationMember.objects.filter(user_id=user.id).select_related("organization")
if only_visible:
qs = qs.filter(organization__status=OrganizationStatus.ACTIVE)
results = list(qs)
if scope is not None:
return [r.organization for r in results if scope in r.get_scopes()]
return [r.organization for r in results]
def get_organizations_where_user_is_owner(self, user_id: int) -> BaseQuerySet[Organization]:
"""
Returns a QuerySet of all organizations where a user has the top priority role.
The default top priority role in Sentry is owner.
"""
# get owners from orgs
owner_role_orgs = Organization.objects.filter(
member_set__user_id=user_id,
status=OrganizationStatus.ACTIVE,
member_set__role=roles.get_top_dog().id,
)
return owner_role_orgs
@snowflake_id_model
@region_silo_model
| OrganizationManager |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_artifact_manifest.py | {
"start": 257,
"end": 353
} | class ____(GQLResult):
artifact: Optional[FetchArtifactManifestArtifact]
| FetchArtifactManifest |
python | sqlalchemy__sqlalchemy | test/orm/test_joins.py | {
"start": 67103,
"end": 74618
} | class ____(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table("table1", metadata, Column("id", Integer, primary_key=True))
Table(
"table2",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer),
)
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
table1, table2 = cls.tables.table1, cls.tables.table2
T1, T2 = cls.classes("T1", "T2")
cls.mapper_registry.map_imperatively(T1, table1)
cls.mapper_registry.map_imperatively(T2, table2)
def test_select_mapped_to_mapped_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(subq.c.count, T1.id)
.select_from(subq)
.join(T1, subq.c.t1_id == T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 "
"GROUP BY table2.t1_id) AS anon_1 JOIN table1 "
"ON anon_1.t1_id = table1.id",
)
def test_select_mapped_to_mapped_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(subq.c.count, T1.id).join(T1, subq.c.t1_id == T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 "
"GROUP BY table2.t1_id) AS anon_1 JOIN table1 "
"ON anon_1.t1_id = table1.id",
)
def test_select_mapped_to_select_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(subq.c.count, T1.id)
.select_from(T1)
.join(subq, subq.c.t1_id == T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 GROUP BY table2.t1_id) "
"AS anon_1 ON anon_1.t1_id = table1.id",
)
def test_select_mapped_to_select_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
# without select_from
self.assert_compile(
sess.query(subq.c.count, T1.id).join(subq, subq.c.t1_id == T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM table1 JOIN "
"(SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) "
"AS anon_1 ON anon_1.t1_id = table1.id",
)
# with select_from, same query
self.assert_compile(
sess.query(subq.c.count, T1.id)
.select_from(T1)
.join(subq, subq.c.t1_id == T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM table1 JOIN "
"(SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) "
"AS anon_1 ON anon_1.t1_id = table1.id",
)
def test_mapped_select_to_mapped_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
# without select_from
self.assert_compile(
sess.query(T1.id, subq.c.count).join(T1, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"JOIN table1 ON anon_1.t1_id = table1.id",
)
# with select_from, same query
self.assert_compile(
sess.query(T1.id, subq.c.count)
.select_from(subq)
.join(T1, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"JOIN table1 ON anon_1.t1_id = table1.id",
)
def test_mapped_select_to_mapped_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(T1.id, subq.c.count)
.select_from(subq)
.join(T1, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 JOIN table1 "
"ON anon_1.t1_id = table1.id",
)
def test_mapped_select_to_select_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(T1.id, subq.c.count)
.select_from(T1)
.join(subq, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"ON anon_1.t1_id = table1.id",
)
def test_mapped_select_to_select_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = fixture_session()
subq = (
sess.query(T2.t1_id, func.count(T2.id).label("count"))
.group_by(T2.t1_id)
.subquery()
)
self.assert_compile(
sess.query(T1.id, subq.c.count).join(subq, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"ON anon_1.t1_id = table1.id",
)
| JoinFromSelectableTest |
python | walkccc__LeetCode | solutions/3078. Match Alphanumerical Pattern in Matrix I/3078.py | {
"start": 0,
"end": 857
} | class ____:
def findPattern(
self,
board: list[list[int]],
pattern: list[str],
) -> list[int]:
def isMatch(x: int, y: int) -> bool:
digitToLetter = {}
letterToDigit = {}
for i, row in enumerate(pattern):
for j, c in enumerate(row):
digit = board[i + x][j + y]
if c.isdigit():
if int(c) != digit:
return False
else:
if digitToLetter.get(digit, c) != c:
return False
if letterToDigit.get(c, digit) != digit:
return False
digitToLetter[digit] = c
letterToDigit[c] = digit
return True
for x in range(len(board) - len(pattern) + 1):
for y in range(len(board[0]) - len(pattern[0]) + 1):
if isMatch(x, y):
return [x, y]
return [-1, -1]
| Solution |
python | allegroai__clearml | clearml/utilities/io_manager.py | {
"start": 25,
"end": 1244
} | class ____(object):
def __init__(self) -> None:
self.threads_io = {}
def add_io_to_thread(self, thread_id: int, io_object: Any) -> None:
if thread_id in self.threads_io:
self.threads_io[thread_id].add(id(io_object))
else:
self.threads_io[thread_id] = {id(io_object)}
if self._io_has_canvas_figure(io_object):
self.threads_io[thread_id].add(id(io_object.canvas.figure))
def is_plot_called(self, thread_id: int, io_object: Any) -> bool:
return id(io_object) in self.threads_io.get(thread_id, set())
def remove_io_to_thread(self, thread_id: int, io_object: Any) -> None:
try:
self.threads_io[thread_id].remove(id(io_object))
if self._io_has_canvas_figure(io_object):
self.threads_io[thread_id].remove(id(io_object.canvas.figure))
except Exception:
pass
def remove_thread(self, thread_id: int) -> None:
if thread_id in self.threads_io:
del self.threads_io[thread_id]
@staticmethod
def _io_has_canvas_figure(io_object: Any) -> bool:
return hasattr(io_object, "canvas") and hasattr(io_object.canvas, "figure")
| IOCallsManager |
python | huggingface__transformers | src/transformers/models/swin/modeling_swin.py | {
"start": 21831,
"end": 22558
} | class ____(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = SwinSelfAttention(config, dim, num_heads, window_size)
self.output = SwinSelfOutput(config, dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
| SwinAttention |
python | kamyu104__LeetCode-Solutions | Python/minimum-remove-to-make-valid-parentheses.py | {
"start": 29,
"end": 700
} | class ____(object):
def minRemoveToMakeValid(self, s):
"""
:type s: str
:rtype: str
"""
result = list(s)
count = 0
for i, v in enumerate(result):
if v == '(':
count += 1
elif v == ')':
if count:
count -= 1
else:
result[i] = ""
if count:
for i in reversed(xrange(len(result))):
if result[i] == '(':
result[i] = ""
count -= 1
if not count:
break
return "".join(result)
| Solution |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_training.py | {
"start": 36122,
"end": 38050
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_parity_with_shared_params(self):
self.run_subtests(
{
"reshard_after_forward": [False, True],
"use_activation_checkpointing": [False, True],
},
self._test_train_shared_params,
)
def _test_train_shared_params(
self,
reshard_after_forward: bool,
use_activation_checkpointing: bool,
):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=3, dropout_p=0.0, weight_tying=True)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
replicate(
ref_model,
device_ids=[self.rank],
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for module in model.modules():
if isinstance(module, TransformerBlock):
if use_activation_checkpointing:
checkpoint(module)
fully_shard(module, reshard_after_forward=reshard_after_forward)
fully_shard(model, reshard_after_forward=reshard_after_forward)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank + 1)
for iter_idx in range(10):
inp = torch.randint(
0, model_args.vocab_size, (2, 16), device=device_type.type
)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
| TestFullyShardSharedParams |
python | streamlit__streamlit | lib/streamlit/runtime/state/session_state.py | {
"start": 2074,
"end": 2216
} | class ____:
"""A widget value that's serialized to a protobuf. Immutable."""
value: WidgetStateProto
@dataclass(frozen=True)
| Serialized |
python | ray-project__ray | python/ray/tune/examples/tf_mnist_example.py | {
"start": 795,
"end": 1212
} | class ____(Model):
def __init__(self, hiddens=128):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation="relu")
self.flatten = Flatten()
self.d1 = Dense(hiddens, activation="relu")
self.d2 = Dense(10, activation="softmax")
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
| MyModel |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 1220,
"end": 3091
} | class ____(TestCase):
def test_simple(self):
def gn(x, y):
return torch.mul(x, y)
def fn(x, y):
return nested_compile_region(gn)(x, y)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = gn(x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
res = fn(x_clone, y_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
def test_aot_function(self):
def gn(x, y):
return torch.mul(x, y)
def fn(x, y):
return nested_compile_region(gn)(x, y)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = gn(x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
aot_fn = aot_function(fn, nop)
res = aot_fn(x_clone, y_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
def test_multiple(self):
@nested_compile_region
def cos(x):
return torch.cos(x)
@nested_compile_region
def sin(x):
return torch.sin(x)
def fn(x):
a = cos(x)
b = sin(a)
return cos(b)
x = torch.randn(8, requires_grad=True)
ref = fn(x)
aot_fn = aot_function(fn, nop)
res = aot_fn(x)
self.assertEqual(ref, res)
@skipIfTorchDynamo("Not a torch._dynamo test")
| TestInvokeSubgraph |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 9752,
"end": 10885
} | class ____:
@declared_attr
def __table_args__(cls): # noqa: N805
return (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="%s_valid_name" % cls.__tablename__,
),
CheckConstraint(
"link_url ~* '^https?://.*'::text",
name="%s_valid_link_url" % cls.__tablename__,
),
)
name: Mapped[str] = mapped_column(comment="The account name used in URLS")
display_name: Mapped[str] = mapped_column(comment="Display name used in UI")
orgtype: Mapped[enum.Enum] = mapped_column(
Enum(OrganizationType, values_callable=lambda x: [e.value for e in x]),
comment="What type of organization such as Community or Company",
)
link_url: Mapped[str] = mapped_column(
comment="External URL associated with the organization"
)
description: Mapped[str] = mapped_column(
comment="Description of the business or project the organization represents",
)
# TODO: Determine if this should also utilize SitemapMixin
| OrganizationMixin |
python | getsentry__sentry | src/sentry/profiles/task.py | {
"start": 41450,
"end": 44766
} | class ____(TypedDict):
project_id: int
use_case: str
@metrics.wraps("process_profile.track_outcome")
def _track_duration_outcome(
profile: Profile,
project: Project,
) -> None:
duration_ms = _calculate_profile_duration_ms(profile)
if duration_ms <= 0:
return
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=None,
outcome=Outcome.ACCEPTED,
timestamp=datetime.now(timezone.utc),
category=_get_duration_category(profile),
quantity=duration_ms,
)
def _get_duration_category(profile: Profile) -> DataCategory:
if profile["platform"] in UI_PROFILE_PLATFORMS:
return DataCategory.PROFILE_DURATION_UI
return DataCategory.PROFILE_DURATION
def _calculate_profile_duration_ms(profile: Profile) -> int:
version = profile.get("version")
if version:
if version == "1":
return _calculate_duration_for_sample_format_v1(profile)
elif version == "2":
return _calculate_duration_for_sample_format_v2(profile)
else:
platform = profile["platform"]
if platform == "android":
return _calculate_duration_for_android_format(profile)
return 0
def _calculate_duration_for_sample_format_v1(profile: Profile) -> int:
start_ns = int(profile["transaction"].get("relative_start_ns", 0))
end_ns = int(profile["transaction"].get("relative_end_ns", 0))
duration_ns = end_ns - start_ns
# try another method to determine the duration in case it's negative or 0.
if duration_ns <= 0:
samples = sorted(
profile["profile"]["samples"],
key=itemgetter("elapsed_since_start_ns"),
)
if len(samples) < 2:
return 0
first, last = samples[0], samples[-1]
first_ns = int(first["elapsed_since_start_ns"])
last_ns = int(last["elapsed_since_start_ns"])
duration_ns = last_ns - first_ns
duration_ms = int(duration_ns * 1e-6)
return min(duration_ms, 30000)
def _calculate_duration_for_sample_format_v2(profile: Profile) -> int:
timestamp_getter = itemgetter("timestamp")
samples = profile["profile"]["samples"]
min_timestamp = min(samples, key=timestamp_getter)
max_timestamp = max(samples, key=timestamp_getter)
duration_secs = max_timestamp["timestamp"] - min_timestamp["timestamp"]
duration_ms = int(duration_secs * 1e3)
if duration_ms > MAX_DURATION_SAMPLE_V2:
sentry_sdk.set_context(
"profile duration calculation",
{
"min_timestamp": min_timestamp,
"max_timestamp": max_timestamp,
"duration_ms": duration_ms,
},
)
sentry_sdk.capture_message("Calculated duration is above the limit")
return MAX_DURATION_SAMPLE_V2
return duration_ms
def _calculate_duration_for_android_format(profile: Profile) -> int:
return int(profile["duration_ns"] * 1e-6)
def _set_frames_platform(profile: Profile) -> None:
platform = profile["platform"]
frames = (
profile["profile"]["methods"] if platform == "android" else profile["profile"]["frames"]
)
for f in frames:
if "platform" not in f:
f["platform"] = platform
| _ProjectKeyKwargs |
python | openai__gym | tests/envs/test_compatibility.py | {
"start": 812,
"end": 3885
} | class ____(gym.Env):
"""Legacy env that implicitly implements the old API as a protocol."""
observation_space = Discrete(1)
action_space = Discrete(1)
metadata = {"render.modes": ["human", "rgb_array"]}
def __init__(self):
pass
def reset(self): # type: ignore
return 0 # type: ignore
def step(self, action: Any) -> Tuple[int, float, bool, Dict]:
return 0, 0.0, False, {}
def render(self, mode: Optional[str] = "human") -> Any:
if mode == "human":
return
elif mode == "rgb_array":
return np.zeros((1, 1, 3), dtype=np.uint8)
def close(self):
pass
def seed(self, seed: Optional[int] = None):
pass
def test_explicit():
old_env = LegacyEnvExplicit()
assert isinstance(old_env, LegacyEnv)
env = EnvCompatibility(old_env, render_mode="rgb_array")
assert env.observation_space == Discrete(1)
assert env.action_space == Discrete(1)
assert env.reset() == (0, {})
assert env.reset(seed=0, options={"some": "option"}) == (0, {})
assert env.step(0) == (0, 0, False, False, {})
assert env.render().shape == (1, 1, 3)
env.close()
def test_implicit():
old_env = LegacyEnvImplicit()
if sys.version_info >= (3, 7):
# We need to give up on typing in Python 3.6
assert isinstance(old_env, LegacyEnv)
env = EnvCompatibility(old_env, render_mode="rgb_array")
assert env.observation_space == Discrete(1)
assert env.action_space == Discrete(1)
assert env.reset() == (0, {})
assert env.reset(seed=0, options={"some": "option"}) == (0, {})
assert env.step(0) == (0, 0, False, False, {})
assert env.render().shape == (1, 1, 3)
env.close()
def test_make_compatibility_in_spec():
gym.register(
id="LegacyTestEnv-v0",
entry_point=LegacyEnvExplicit,
apply_api_compatibility=True,
)
env = gym.make("LegacyTestEnv-v0", render_mode="rgb_array")
assert env.observation_space == Discrete(1)
assert env.action_space == Discrete(1)
assert env.reset() == (0, {})
assert env.reset(seed=0, options={"some": "option"}) == (0, {})
assert env.step(0) == (0, 0, False, False, {})
img = env.render()
assert isinstance(img, np.ndarray)
assert img.shape == (1, 1, 3) # type: ignore
env.close()
del gym.envs.registration.registry["LegacyTestEnv-v0"]
def test_make_compatibility_in_make():
gym.register(id="LegacyTestEnv-v0", entry_point=LegacyEnvExplicit)
env = gym.make(
"LegacyTestEnv-v0", apply_api_compatibility=True, render_mode="rgb_array"
)
assert env.observation_space == Discrete(1)
assert env.action_space == Discrete(1)
assert env.reset() == (0, {})
assert env.reset(seed=0, options={"some": "option"}) == (0, {})
assert env.step(0) == (0, 0, False, False, {})
img = env.render()
assert isinstance(img, np.ndarray)
assert img.shape == (1, 1, 3) # type: ignore
env.close()
del gym.envs.registration.registry["LegacyTestEnv-v0"]
| LegacyEnvImplicit |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 41827,
"end": 45722
} | class ____(Marker):
''' Render scatter markers selected from a predefined list of designs.
Use ``Scatter`` to draw any of Bokeh's built-in marker types:
``asterisk``, ``circle``, ``circle_cross``, ``circle_dot``, ``circle_x``,
``circle_y``, ``cross``, ``dash``, ``diamond``, ``diamond_cross``,
``diamond_dot``, ``dot``, ``hex``, ``hex_dot``, ``inverted_triangle``,
``plus``, ``square``, ``square_cross``, ``square_dot``, ``square_pin``,
``square_x``, ``star``, ``star_dot``, ``triangle``, ``triangle_dot``,
``triangle_pin``, ``x``, or ``y``. This collection is available in
:class:`~bokeh.core.enums.MarkerType`.
Bokeh's built-in markers consist of a set of base markers, most of which can
be combined with different kinds of additional visual features:
.. bokeh-plot:: __REPO__/examples/basic/scatters/markertypes.py
:source-position: none
You can select marker types in two ways:
* To draw the **same marker for all values**, use the ``marker`` attribute
to specify the name of a specific marker. For example:
.. code-block:: python
glyph = Scatter(x="x", y="y", size="sizes", marker="square")
plot.add_glyph(source, glyph)
This will render square markers for all points.
* Alternatively, to use **marker types specified in a data source column**,
assign the column name to the ``marker`` attribute. For example:
.. code-block:: python
# source.data['markers'] = ["circle", "square", "circle", ... ]
glyph = Scatter(x="x", y="y", size="sizes", marker="markers")
plot.add_glyph(source, glyph)
It is also possible to define a custom marker. See :attr:`bokeh.models.Scatter.defs`.
.. note::
When you draw ``circle`` markers with ``Scatter``, you can only assign a
size in |screen units| (by passing a number of pixels to the ``size``
property). In case you want to define the radius of circles in
|data units|, use the :class:`~bokeh.models.glyphs.Circle` glyph instead
of the ``Scatter`` glyph.
.. note::
``Scatter`` markers with multiple marker types may be drawn in a
different order when using the WebGL output backend. This is an explicit
trade-off made in the interests of performance.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Scatter.py"
_args = ('x', 'y', 'size', 'angle', 'marker')
marker = MarkerSpec(default="circle", help="""
Which marker to render. This can be the name of any built in marker,
e.g. "circle", or a reference to a data column containing such names.
""")
defs = Dict(Regex("^@.*$"), Instance(CustomJS))(default={}, help="""
A collection of custom marker definitions.
There are two ways to define a custom marker:
* construct and return an instance of ``Path2D``:
.. code:: python
CustomJS(code='''
export default (args, obj, {ctx, i, r, visuals}) => {
const path = new Path2D()
path.arc(0, 0, r, 0, 2*Math.PI, false)
return path
}
''')
* paint directly to an instance of ``Context2d``:
.. code:: python
CustomJS(code='''
export default (args, obj, {ctx, i, r, visuals}) => {
ctx.arc(0, 0, r, 0, 2*Math.PI, false)
visuals.fill.apply(ctx, i)
visuals.hatch.apply(ctx, i)
visuals.line.apply(ctx, i)
}
''')
.. note::
Custom marker's names must start with `"@"` prefix, e.g. `"@my_marker"`.
.. note::
Custom markers are only supported with ``"canvas"`` and ``"svg"`` backends.
""")
| Scatter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_line05.py | {
"start": 315,
"end": 1367
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_line05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line", "subtype": "stacked"})
chart.axis_ids = [108321408, 108634112]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.