language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kubernetes-client__python | kubernetes/client/models/v2_external_metric_source.py | {
"start": 383,
"end": 4435
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metric': 'V2MetricIdentifier',
'target': 'V2MetricTarget'
}
attribute_map = {
'metric': 'metric',
'target': 'target'
}
def __init__(self, metric=None, target=None, local_vars_configuration=None): # noqa: E501
"""V2ExternalMetricSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metric = None
self._target = None
self.discriminator = None
self.metric = metric
self.target = target
@property
def metric(self):
"""Gets the metric of this V2ExternalMetricSource. # noqa: E501
:return: The metric of this V2ExternalMetricSource. # noqa: E501
:rtype: V2MetricIdentifier
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this V2ExternalMetricSource.
:param metric: The metric of this V2ExternalMetricSource. # noqa: E501
:type: V2MetricIdentifier
"""
if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
self._metric = metric
@property
def target(self):
"""Gets the target of this V2ExternalMetricSource. # noqa: E501
:return: The target of this V2ExternalMetricSource. # noqa: E501
:rtype: V2MetricTarget
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this V2ExternalMetricSource.
:param target: The target of this V2ExternalMetricSource. # noqa: E501
:type: V2MetricTarget
"""
if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2ExternalMetricSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2ExternalMetricSource):
return True
return self.to_dict() != other.to_dict()
| V2ExternalMetricSource |
python | ansible__ansible | lib/ansible/plugins/inventory/__init__.py | {
"start": 14140,
"end": 21731
} | class ____(_BaseInventoryPlugin):
def _compose(self, template, variables, disable_lookups=...):
""" helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars"""
if disable_lookups is not ...:
self.display.deprecated("The disable_lookups arg has no effect.", version="2.23")
try:
use_extra = self.get_option('use_extra_vars')
except Exception:
use_extra = False
if use_extra:
self.templar.available_variables = combine_vars(variables, self._vars)
else:
self.templar.available_variables = variables
return self.templar.evaluate_expression(template)
def _set_composite_vars(self, compose, variables, host, strict=False):
""" loops over compose entries to create vars for hosts """
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False, fetch_hostvars=True):
""" helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group"""
# process each 'group entry'
if groups and isinstance(groups, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = groups[group_name]
group_name = self._sanitize_group_name(group_name)
try:
result = self.templar.evaluate_conditional(conditional)
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
""" helper to create groups for plugins based on variable values and add the corresponding hosts to it"""
should_default_value = (None, '')
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
default_value_name = keyed.get('default_value', None)
trailing_separator = keyed.get('trailing_separator')
if trailing_separator is not None and default_value_name is not None:
raise AnsibleParserError("parameters are mutually exclusive for keyed groups: default_value|trailing_separator")
use_default = key in should_default_value and default_value_name is not None
if key or use_default:
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleValueOmittedError:
raw_parent_name = None
except Exception as ex:
if strict:
raise AnsibleParserError(f'Could not generate parent group {raw_parent_name!r} for group {key!r}: {ex}') from ex
continue
new_raw_group_names = []
if use_default:
new_raw_group_names.append(default_value_name)
elif isinstance(key, str):
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
# if list item is empty, 'default_value' will be used as group name
if name in should_default_value and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
bare_name = '%s%s%s' % (gname, sep, gval)
if gval in should_default_value:
# key's value is empty
if default_value_name is not None:
bare_name = '%s%s%s' % (gname, sep, default_value_name)
elif trailing_separator is False:
bare_name = gname
new_raw_group_names.append(bare_name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
if prefix == '' and self.get_option('leading_separator') is False:
sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
| Constructable |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 107996,
"end": 108157
} | class ____(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
| gdk_pixbuf_2_info |
python | huggingface__transformers | src/transformers/models/musicgen/configuration_musicgen.py | {
"start": 846,
"end": 6410
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a
MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MusicGen
[facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 2048):
Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`MusicgenDecoder`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of decoder layers.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer block.
ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically, set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_factor (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(hidden_size).
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models)
num_codebooks (`int`, *optional*, defaults to 4):
The number of parallel codebooks forwarded to the model.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether input and output word embeddings should be tied.
audio_channels (`int`, *optional*, defaults to 1
Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate
audio stream for the left/right output channels. Mono models generate a single audio stream output.
"""
model_type = "musicgen_decoder"
base_config_key = "decoder_config"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=2048,
max_position_embeddings=2048,
num_hidden_layers=24,
ffn_dim=4096,
num_attention_heads=16,
layerdrop=0.0,
use_cache=True,
activation_function="gelu",
hidden_size=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
initializer_factor=0.02,
scale_embedding=False,
num_codebooks=4,
audio_channels=1,
pad_token_id=2048,
bos_token_id=2048,
eos_token_id=None,
tie_word_embeddings=False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.initializer_factor = initializer_factor
self.layerdrop = layerdrop
self.use_cache = use_cache
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.num_codebooks = num_codebooks
if audio_channels not in [1, 2]:
raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
self.audio_channels = audio_channels
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
| MusicgenDecoderConfig |
python | huggingface__transformers | tests/models/splinter/test_modeling_splinter.py | {
"start": 14144,
"end": 20798
} | class ____(unittest.TestCase):
@slow
def test_splinter_question_answering(self):
model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]"
# Output should be the span "the United Kingdom"
input_ids = torch.tensor(
[[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
output = model(input_ids)
expected_shape = torch.Size((1, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits), 10)
self.assertEqual(torch.argmax(output.end_logits), 12)
@slow
def test_splinter_pretraining(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
output = model(input_ids, question_positions=question_positions)
expected_shape = torch.Size((1, 2, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10)
self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12)
@slow
def test_splinter_pretraining_loss_requires_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
with self.assertRaises(TypeError):
model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
)
@slow
def test_splinter_pretraining_loss(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long)
end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long)
question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.assertAlmostEqual(output.loss.item(), 0.0024, 4)
@slow
def test_splinter_pretraining_loss_with_padding(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long)
end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long)
question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
output_with_padding = model(
input_ids,
start_positions=start_positions_with_padding,
end_positions=end_positions_with_padding,
question_positions=question_positions_with_padding,
)
self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4)
# Note that the original code uses 0 to denote padded question tokens
# and their start and end positions. As the pad_token_id of the model's
# config is used for the losse's ignore_index in SplinterForPreTraining,
# we add this test to ensure anybody making changes to the default
# value of the config, will be aware of the implication.
self.assertEqual(model.config.pad_token_id, 0)
@slow
def test_splinter_pretraining_prepare_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
input_ids = torch.tensor(
[
[101, 104, 1, 2, 104, 3, 4, 102],
[101, 1, 104, 2, 104, 3, 104, 102],
[101, 1, 2, 104, 104, 3, 4, 102],
[101, 1, 2, 3, 4, 5, 104, 102],
]
)
question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long)
output_without_positions = model(input_ids)
output_with_positions = model(input_ids, question_positions=question_positions)
self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all())
self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
| SplinterModelIntegrationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1313253,
"end": 1324564
} | class ____(sgqlc.types.Type, Closable, Updatable, Node):
"""New projects that manage issues, pull requests and drafts using
tables and boards.
"""
__schema__ = github_schema
__field_names__ = (
"created_at",
"creator",
"database_id",
"field",
"fields",
"items",
"number",
"owner",
"public",
"readme",
"repositories",
"resource_path",
"short_description",
"teams",
"template",
"title",
"updated_at",
"url",
"view",
"views",
"workflow",
"workflows",
)
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
creator = sgqlc.types.Field(Actor, graphql_name="creator")
"""The actor who originally created the project."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
field = sgqlc.types.Field(
"ProjectV2FieldConfiguration",
graphql_name="field",
args=sgqlc.types.ArgDict((("name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="name", default=None)),)),
)
"""A field of the project
Arguments:
* `name` (`String!`): The name of the field
"""
fields = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2FieldConfigurationConnection),
graphql_name="fields",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(ProjectV2FieldOrder, graphql_name="orderBy", default={"field": "POSITION", "direction": "ASC"}),
),
)
),
)
"""List of fields and their constraints in the project
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`ProjectV2FieldOrder`): Ordering options for project
v2 fields returned from the connection (default: `{field:
POSITION, direction: ASC}`)
"""
items = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemConnection),
graphql_name="items",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(ProjectV2ItemOrder, graphql_name="orderBy", default={"field": "POSITION", "direction": "ASC"}),
),
)
),
)
"""List of items in the project
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`ProjectV2ItemOrder`): Ordering options for project
v2 items returned from the connection (default: `{field:
POSITION, direction: ASC}`)
"""
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
"""The project's number."""
owner = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2Owner), graphql_name="owner")
"""The project's owner. Currently limited to organizations and users."""
public = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="public")
"""Returns true if the project is public."""
readme = sgqlc.types.Field(String, graphql_name="readme")
"""The project's readme."""
repositories = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="repositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "DESC"}),
),
)
),
)
"""The repositories the project is linked to.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`RepositoryOrder`): Ordering options for
repositories returned from the connection (default: `{field:
CREATED_AT, direction: DESC}`)
"""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this project"""
short_description = sgqlc.types.Field(String, graphql_name="shortDescription")
"""The project's short description."""
teams = sgqlc.types.Field(
sgqlc.types.non_null(TeamConnection),
graphql_name="teams",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(TeamOrder, graphql_name="orderBy", default={"field": "NAME", "direction": "ASC"})),
)
),
)
"""The teams the project is linked to.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`TeamOrder`): Ordering options for teams returned
from this connection. (default: `{field: NAME, direction: ASC}`)
"""
template = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="template")
"""Returns true if this project is a template."""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The project's name."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this project"""
view = sgqlc.types.Field(
"ProjectV2View",
graphql_name="view",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""A view of the project
Arguments:
* `number` (`Int!`): The number of a view belonging to the project
"""
views = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ViewConnection),
graphql_name="views",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(ProjectV2ViewOrder, graphql_name="orderBy", default={"field": "POSITION", "direction": "ASC"}),
),
)
),
)
"""List of views in the project
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`ProjectV2ViewOrder`): Ordering options for project
v2 views returned from the connection (default: `{field:
POSITION, direction: ASC}`)
"""
workflow = sgqlc.types.Field(
"ProjectV2Workflow",
graphql_name="workflow",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""A workflow of the project
Arguments:
* `number` (`Int!`): The number of a workflow belonging to the
project
"""
workflows = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2WorkflowConnection),
graphql_name="workflows",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(ProjectV2WorkflowOrder, graphql_name="orderBy", default={"field": "NAME", "direction": "ASC"}),
),
)
),
)
"""List of the workflows in the project
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`ProjectV2WorkflowOrder`): Ordering options for
project v2 workflows returned from the connection (default:
`{field: NAME, direction: ASC}`)
"""
| ProjectV2 |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_test.py | {
"start": 11586,
"end": 15340
} | class ____(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, tensor.Tensor))
def testAsTensorForShapeInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
with self.assertRaisesRegex(ValueError,
"a dimension is too large.*int64"):
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int32)
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
with self.assertRaisesRegex(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegex(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
@test_util.run_deprecated_v1
def testAsTensorForDimensionInput(self):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, self.evaluate(x))
shape = tensor_shape.TensorShape(None)
if shape._v2_behavior:
with self.assertRaisesRegex(ValueError, "None values not supported"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegex(ValueError, "None values not supported"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
else:
with self.assertRaisesRegex(ValueError, "unknown Dimension"):
ops.convert_to_tensor(shape[1])
with self.assertRaisesRegex(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
| AsTensorTest |
python | catalyst-team__catalyst | catalyst/contrib/losses/regression.py | {
"start": 2683,
"end": 3867
} | class ____(nn.Module):
"""QuantileRegressionLoss"""
def __init__(self, num_atoms: int = 51, clip_delta: float = 1.0):
"""Init."""
super().__init__()
self.num_atoms = num_atoms
tau_min = 1 / (2 * self.num_atoms)
tau_max = 1 - tau_min
self.tau = torch.linspace(start=tau_min, end=tau_max, steps=self.num_atoms)
self.criterion = HuberLossV0(clip_delta=clip_delta)
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Compute the loss.
Args:
outputs (torch.Tensor): predicted atoms, shape: [bs; num_atoms]
targets (torch.Tensor): target atoms, shape: [bs; num_atoms]
Returns:
torch.Tensor: computed loss
"""
atoms_diff = targets[:, None, :] - outputs[:, :, None]
delta_atoms_diff = atoms_diff.lt(0).to(torch.float32).detach()
huber_weights = (
torch.abs(self.tau[None, :, None] - delta_atoms_diff) / self.num_atoms
)
loss = self.criterion(
outputs[:, :, None], targets[:, None, :], huber_weights
).mean()
return loss
| QuantileRegressionLoss |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 91089,
"end": 93533
} | class ____(system_info):
section = 'accelerate'
_lib_names = ['accelerate', 'veclib']
notfounderror = BlasNotFoundError
def calc_info(self):
# Make possible to enable/disable from config file/env var
libraries = os.environ.get('ACCELERATE')
if libraries:
libraries = [libraries]
else:
libraries = self.get_libs('libraries', self._lib_names)
libraries = [lib.strip().lower() for lib in libraries]
if (sys.platform == 'darwin' and
not os.getenv('_PYTHON_HOST_PLATFORM', None)):
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if (os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/') and
'accelerate' in libraries):
if intel:
args.extend(['-msse3'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif (os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/') and
'veclib' in libraries):
if intel:
args.extend(['-msse3'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
macros = [
('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None),
('ACCELERATE_NEW_LAPACK', None),
]
if(os.getenv('NPY_USE_BLAS_ILP64', None)):
print('Setting HAVE_BLAS_ILP64')
macros += [
('HAVE_BLAS_ILP64', None),
('ACCELERATE_LAPACK_ILP64', None),
]
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=macros)
return
| accelerate_info |
python | astropy__astropy | setup.py | {
"start": 753,
"end": 938
} | class ____(install):
"""Post-installation command for installation mode."""
def run(self):
super().run()
install_stubs(self.build_lib, self.root)
| InstallWithStubs |
python | tensorflow__tensorflow | tensorflow/lite/tools/optimize/python/modify_model_interface_lib_test.py | {
"start": 1855,
"end": 6663
} | class ____(test_util.TensorFlowTestCase):
def testInt8Interface(self):
# 1. SETUP
# Define the temporary directory and files
temp_dir = self.get_temp_dir()
initial_file = os.path.join(temp_dir, 'initial_model.tflite')
final_file = os.path.join(temp_dir, 'final_model.tflite')
# Define initial model
initial_model = build_tflite_model_with_full_integer_quantization()
with open(initial_file, 'wb') as model_file:
model_file.write(initial_model)
# 2. INVOKE
# Invoke the modify_model_interface function
modify_model_interface_lib.modify_model_interface(initial_file, final_file,
tf.int8, tf.int8)
# 3. VALIDATE
# Load TFLite model and allocate tensors.
initial_interpreter = lite.Interpreter(model_path=initial_file)
initial_interpreter.allocate_tensors()
final_interpreter = lite.Interpreter(model_path=final_file)
final_interpreter.allocate_tensors()
# Get input and output types.
initial_input_dtype = initial_interpreter.get_input_details()[0]['dtype']
initial_output_dtype = initial_interpreter.get_output_details()[0]['dtype']
final_input_dtype = final_interpreter.get_input_details()[0]['dtype']
final_output_dtype = final_interpreter.get_output_details()[0]['dtype']
# Validate the model interfaces
self.assertEqual(initial_input_dtype, np.float32)
self.assertEqual(initial_output_dtype, np.float32)
self.assertEqual(final_input_dtype, np.int8)
self.assertEqual(final_output_dtype, np.int8)
def testInt16Interface(self):
# 1. SETUP
# Define the temporary directory and files
temp_dir = self.get_temp_dir()
initial_file = os.path.join(temp_dir, 'initial_model.tflite')
final_file = os.path.join(temp_dir, 'final_model.tflite')
# Define initial model
initial_model = build_tflite_model_with_full_integer_quantization(
supported_ops=lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8)
with open(initial_file, 'wb') as model_file:
model_file.write(initial_model)
# 2. INVOKE
# Invoke the modify_model_interface function
modify_model_interface_lib.modify_model_interface(initial_file, final_file,
tf.int16, tf.int16)
# 3. VALIDATE
# Load TFLite model and allocate tensors.
initial_interpreter = lite.Interpreter(model_path=initial_file)
initial_interpreter.allocate_tensors()
final_interpreter = lite.Interpreter(model_path=final_file)
final_interpreter.allocate_tensors()
# Get input and output types.
initial_input_dtype = initial_interpreter.get_input_details()[0]['dtype']
initial_output_dtype = initial_interpreter.get_output_details()[0]['dtype']
final_input_dtype = final_interpreter.get_input_details()[0]['dtype']
final_output_dtype = final_interpreter.get_output_details()[0]['dtype']
# Validate the model interfaces
self.assertEqual(initial_input_dtype, np.float32)
self.assertEqual(initial_output_dtype, np.float32)
self.assertEqual(final_input_dtype, np.int16)
self.assertEqual(final_output_dtype, np.int16)
def testUInt8Interface(self):
# 1. SETUP
# Define the temporary directory and files
temp_dir = self.get_temp_dir()
initial_file = os.path.join(temp_dir, 'initial_model.tflite')
final_file = os.path.join(temp_dir, 'final_model.tflite')
# Define initial model
initial_model = build_tflite_model_with_full_integer_quantization()
with open(initial_file, 'wb') as model_file:
model_file.write(initial_model)
# 2. INVOKE
# Invoke the modify_model_interface function
modify_model_interface_lib.modify_model_interface(initial_file, final_file,
tf.uint8, tf.uint8)
# 3. VALIDATE
# Load TFLite model and allocate tensors.
initial_interpreter = lite.Interpreter(model_path=initial_file)
initial_interpreter.allocate_tensors()
final_interpreter = lite.Interpreter(model_path=final_file)
final_interpreter.allocate_tensors()
# Get input and output types.
initial_input_dtype = initial_interpreter.get_input_details()[0]['dtype']
initial_output_dtype = initial_interpreter.get_output_details()[0]['dtype']
final_input_dtype = final_interpreter.get_input_details()[0]['dtype']
final_output_dtype = final_interpreter.get_output_details()[0]['dtype']
# Validate the model interfaces
self.assertEqual(initial_input_dtype, np.float32)
self.assertEqual(initial_output_dtype, np.float32)
self.assertEqual(final_input_dtype, np.uint8)
self.assertEqual(final_output_dtype, np.uint8)
if __name__ == '__main__':
test.main()
| ModifyModelInterfaceTest |
python | getsentry__sentry | src/sentry/api/serializers/models/savedsearch.py | {
"start": 131,
"end": 568
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": str(obj.id),
"type": obj.type,
"name": obj.name,
"query": obj.query,
"sort": obj.sort,
"visibility": obj.visibility,
"dateCreated": obj.date_added,
"isGlobal": obj.is_global,
"isPinned": obj.is_pinned,
}
| SavedSearchSerializer |
python | huggingface__transformers | src/transformers/models/cpmant/configuration_cpmant.py | {
"start": 804,
"end": 5145
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an
CPMAnt model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CPMAnt
[openbmb/cpm-ant-10b](https://huggingface.co/openbmb/cpm-ant-10b) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30720):
Vocabulary size of the CPMAnt model. Defines the number of different tokens that can be represented by the
`input` passed when calling [`CpmAntModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the encoder layers.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads in the Transformer encoder.
dim_head (`int`, *optional*, defaults to 128):
Dimension of attention heads for each attention layer in the Transformer encoder.
dim_ff (`int`, *optional*, defaults to 10240):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 48):
Number of layers of the Transformer encoder.
dropout_p (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder.
position_bias_num_buckets (`int`, *optional*, defaults to 512):
The number of position_bias buckets.
position_bias_max_distance (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
init_std (`float`, *optional*, defaults to 1.0):
Initialize parameters with std = init_std.
prompt_types (`int`, *optional*, defaults to 32):
The type of prompt.
prompt_length (`int`, *optional*, defaults to 32):
The length of prompt.
segment_types (`int`, *optional*, defaults to 32):
The type of segment.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use cache.
Example:
```python
>>> from transformers import CpmAntModel, CpmAntConfig
>>> # Initializing a CPMAnt cpm-ant-10b style configuration
>>> configuration = CpmAntConfig()
>>> # Initializing a model from the cpm-ant-10b style configuration
>>> model = CpmAntModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "cpmant"
def __init__(
self,
vocab_size: int = 30720,
hidden_size: int = 4096,
num_attention_heads: int = 32,
dim_head: int = 128,
dim_ff: int = 10240,
num_hidden_layers: int = 48,
dropout_p: int = 0.0,
position_bias_num_buckets: int = 512,
position_bias_max_distance: int = 2048,
eps: int = 1e-6,
init_std: float = 1.0,
prompt_types: int = 32,
prompt_length: int = 32,
segment_types: int = 32,
use_cache: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.prompt_types = prompt_types
self.prompt_length = prompt_length
self.segment_types = segment_types
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.dim_head = dim_head
self.dim_ff = dim_ff
self.num_hidden_layers = num_hidden_layers
self.position_bias_num_buckets = position_bias_num_buckets
self.position_bias_max_distance = position_bias_max_distance
self.dropout_p = dropout_p
self.eps = eps
self.use_cache = use_cache
self.vocab_size = vocab_size
self.init_std = init_std
__all__ = ["CpmAntConfig"]
| CpmAntConfig |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 41637,
"end": 42468
} | class ____(Blockwise):
"""
This doesn't really do anything, but we anticipate that future
optimizations, like `len` will care about which operations preserve length
"""
_is_length_preserving = True
def _simplify_up(self, parent, dependents):
if isinstance(parent, Filter) and self._filter_passthrough_available(
parent, dependents
):
predicate = None
if self.frame.ndim == 1 and self.ndim == 2:
name = self.frame._meta.name
# Avoid Projection since we are already a Series
subs = Projection(self, name)
predicate = parent.predicate.substitute(subs, self.frame)
return self._filter_simplification(parent, predicate)
return super()._simplify_up(parent, dependents)
| Elemwise |
python | getsentry__sentry | src/sentry/analytics/events/missing_members_nudge.py | {
"start": 83,
"end": 204
} | class ____(analytics.Event):
organization_id: int
analytics.register(MissingMembersNudgeEvent)
| MissingMembersNudgeEvent |
python | huggingface__transformers | src/transformers/models/vjepa2/modeling_vjepa2.py | {
"start": 31886,
"end": 33762
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self_attn = VJEPA2PoolerSelfAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, ...]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| VJEPA2PoolerSelfAttentionLayer |
python | eventlet__eventlet | eventlet/green/http/cookiejar.py | {
"start": 30458,
"end": 31542
} | class ____:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
| CookiePolicy |
python | django__django | tests/queries/models.py | {
"start": 7774,
"end": 7947
} | class ____(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
| SingleObject |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 18943,
"end": 19546
} | class ____(_ActualTreeParamName):
def __init__(self, function_value, tree_name, arguments):
super().__init__(function_value, tree_name)
self.arguments = arguments
def infer(self):
values = super().infer()
if values:
return values
return self.get_executed_param_name().infer()
def get_executed_param_name(self):
from jedi.inference.param import get_executed_param_names
params_names = get_executed_param_names(self.function_value, self.arguments)
return params_names[self._get_param_node().position_index]
| ParamName |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 100239,
"end": 101553
} | class ____(Response):
"""
Response of models.get_frameworks endpoint.
:param frameworks: Unique list of the frameworks used in the company models
:type frameworks: Sequence[str]
"""
_service = "models"
_action = "get_frameworks"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"frameworks": {
"description": "Unique list of the frameworks used in the company models",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, frameworks: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetFrameworksResponse, self).__init__(**kwargs)
self.frameworks = frameworks
@schema_property("frameworks")
def frameworks(self) -> Optional[List[str]]:
return self._property_frameworks
@frameworks.setter
def frameworks(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_frameworks = None
return
self.assert_isinstance(value, "frameworks", (list, tuple))
self.assert_isinstance(value, "frameworks", six.string_types, is_array=True)
self._property_frameworks = value
| GetFrameworksResponse |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 16416,
"end": 17095
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
| DebertaV2Intermediate |
python | wandb__wandb | wandb/vendor/pygments/lexers/fortran.py | {
"start": 8544,
"end": 9768
} | class ____(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code'),
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
default('root'),
]
}
| FortranFixedLexer |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_halfyear.py | {
"start": 7346,
"end": 13836
} | class ____:
def test_repr(self):
expected = "<HalfYearEnd: startingMonth=6>"
assert repr(HalfYearEnd()) == expected
expected = "<HalfYearEnd: startingMonth=3>"
assert repr(HalfYearEnd(startingMonth=3)) == expected
expected = "<HalfYearEnd: startingMonth=1>"
assert repr(HalfYearEnd(startingMonth=1)) == expected
def test_offset_corner_case(self):
# corner
offset = HalfYearEnd(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
offset_cases = []
offset_cases.append(
(
HalfYearEnd(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 7, 31): datetime(2009, 1, 31),
},
)
)
offset_cases.append(
(
HalfYearEnd(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 8, 31),
datetime(2008, 3, 15): datetime(2008, 8, 31),
datetime(2008, 3, 31): datetime(2008, 8, 31),
datetime(2008, 4, 15): datetime(2008, 8, 31),
datetime(2008, 8, 30): datetime(2008, 8, 31),
datetime(2008, 8, 31): datetime(2009, 2, 28),
},
)
)
offset_cases.append(
(
HalfYearEnd(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 7, 31): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
HalfYearEnd(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 7, 31),
datetime(2008, 1, 31): datetime(2007, 7, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 7, 15): datetime(2008, 1, 31),
datetime(2008, 7, 30): datetime(2008, 1, 31),
datetime(2008, 7, 31): datetime(2008, 1, 31),
datetime(2008, 8, 1): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
HalfYearEnd(startingMonth=6, n=2),
{
datetime(2008, 1, 31): datetime(2008, 12, 31),
datetime(2008, 2, 15): datetime(2008, 12, 31),
datetime(2008, 2, 29): datetime(2008, 12, 31),
datetime(2008, 3, 15): datetime(2008, 12, 31),
datetime(2008, 3, 31): datetime(2008, 12, 31),
datetime(2008, 4, 15): datetime(2008, 12, 31),
datetime(2008, 4, 30): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2009, 6, 30),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(HalfYearEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(HalfYearEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(HalfYearEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(HalfYearEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(HalfYearEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(HalfYearEnd(1, startingMonth=1), datetime(2008, 4, 30), False),
(HalfYearEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(HalfYearEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
(HalfYearEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(HalfYearEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(HalfYearEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(HalfYearEnd(1, startingMonth=3), datetime(2007, 12, 31), False),
(HalfYearEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(HalfYearEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
(HalfYearEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(HalfYearEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(HalfYearEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(HalfYearEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
(HalfYearEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
(HalfYearEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
(HalfYearEnd(1, startingMonth=6), datetime(2008, 1, 31), False),
(HalfYearEnd(1, startingMonth=6), datetime(2007, 12, 31), True),
(HalfYearEnd(1, startingMonth=6), datetime(2008, 2, 29), False),
(HalfYearEnd(1, startingMonth=6), datetime(2007, 3, 30), False),
(HalfYearEnd(1, startingMonth=6), datetime(2007, 3, 31), False),
(HalfYearEnd(1, startingMonth=6), datetime(2008, 4, 30), False),
(HalfYearEnd(1, startingMonth=6), datetime(2008, 5, 30), False),
(HalfYearEnd(1, startingMonth=6), datetime(2008, 5, 31), False),
(HalfYearEnd(1, startingMonth=6), datetime(2007, 6, 29), False),
(HalfYearEnd(1, startingMonth=6), datetime(2007, 6, 30), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| TestHalfYearEnd |
python | jackfrued__Python-100-Days | Day31-35/code/example12.py | {
"start": 459,
"end": 690
} | class ____(Employee):
"""程序员"""
def __init__(self, name, working_hour=0):
self.working_hour = working_hour
super().__init__(name)
def get_salary(self):
return 200.0 * self.working_hour
| Programmer |
python | ansible__ansible | lib/ansible/plugins/callback/junit.py | {
"start": 13332,
"end": 14194
} | class ____:
"""
Data about an individual task.
"""
def __init__(self, uuid: str, name: str, path: str, play: str, action: str) -> None:
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
self.host_data: dict[str, HostData] = {}
self.start = time.time()
self.action = action
def add_host(self, host: HostData) -> None:
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
self.host_data[host.uuid] = host
| TaskData |
python | pallets__jinja | tests/test_loader.py | {
"start": 376,
"end": 4525
} | class ____:
def test_dict_loader(self, dict_loader):
env = Environment(loader=dict_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_package_loader(self, package_loader):
env = Environment(loader=package_loader)
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_filesystem_loader_overlapping_names(self, filesystem_loader):
t2_dir = Path(filesystem_loader.searchpath[0]) / ".." / "templates2"
# Make "foo" show up before "foo/test.html".
filesystem_loader.searchpath.insert(0, t2_dir)
e = Environment(loader=filesystem_loader)
e.get_template("foo")
# This would raise NotADirectoryError if "t2/foo" wasn't skipped.
e.get_template("foo/test.html")
def test_choice_loader(self, choice_loader):
env = Environment(loader=choice_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_function_loader(self, function_loader):
env = Environment(loader=function_loader)
tmpl = env.get_template("justfunction.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_prefix_loader(self, prefix_loader):
env = Environment(loader=prefix_loader)
tmpl = env.get_template("a/test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("b/justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing")
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return "foo", None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template("template")
assert tmpl is env.get_template("template")
changed = True
assert tmpl is not env.get_template("template")
changed = False
def test_no_cache(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping), cache_size=0)
assert env.get_template("foo") is not env.get_template("foo")
def test_limited_size_cache(self):
mapping = {"one": "foo", "two": "bar", "three": "baz"}
loader = loaders.DictLoader(mapping)
env = Environment(loader=loader, cache_size=2)
t1 = env.get_template("one")
t2 = env.get_template("two")
assert t2 is env.get_template("two")
assert t1 is env.get_template("one")
env.get_template("three")
loader_ref = weakref.ref(loader)
assert (loader_ref, "one") in env.cache
assert (loader_ref, "two") not in env.cache
assert (loader_ref, "three") in env.cache
def test_cache_loader_change(self):
loader1 = loaders.DictLoader({"foo": "one"})
loader2 = loaders.DictLoader({"foo": "two"})
env = Environment(loader=loader1, cache_size=2)
assert env.get_template("foo").render() == "one"
env.loader = loader2
assert env.get_template("foo").render() == "two"
def test_dict_loader_cache_invalidates(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template("foo").render() == "one"
mapping["foo"] = "two"
assert env.get_template("foo").render() == "two"
def test_split_template_path(self):
assert split_template_path("foo/bar") == ["foo", "bar"]
assert split_template_path("./foo/bar") == ["foo", "bar"]
pytest.raises(TemplateNotFound, split_template_path, "../foo")
| TestLoaders |
python | ApeWorX__ape | tests/functional/conversion/test_address.py | {
"start": 1450,
"end": 2066
} | class ____:
@pytest.fixture(scope="class")
def converter(self):
return IntAddressConverter()
def test_is_convertible(self, converter, owner):
int_address = int(owner.address, 16)
assert converter.is_convertible(int_address)
def test_is_convertible_random_int(self, converter):
assert converter.is_convertible(0)
@pytest.mark.parametrize("val", (0, 1))
def test_convert_simple_int(self, converter, val, zero_address):
actual = converter.convert(val)
expected = f"{zero_address[:-1]}{val}"
assert actual == expected
| TestIntAddressConverter |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 142199,
"end": 154041
} | class ____(
TestCase,
SnubaTestCase,
OccurrenceTestMixin,
CorePostProcessGroupTestMixin,
InboxTestMixin,
RuleProcessorTestMixin,
SnoozeTestMixin,
):
def create_event(
self,
data,
project_id,
assert_no_errors=True,
feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
is_spam=False,
):
data["type"] = "generic"
event = self.store_event(
data=data, project_id=project_id, assert_no_errors=assert_no_errors
)
evidence_data = {
"Test": 123,
"source": feedback_type.value if feedback_type else None,
}
evidence_display = [
{"name": "hi", "value": "bye", "important": True},
{"name": "what", "value": "where", "important": False},
]
if is_spam:
evidence_data["is_spam"] = True
occurrence_data = self.build_occurrence_data(
event_id=event.event_id,
project_id=project_id,
**{
"id": uuid.uuid4().hex,
"fingerprint": ["c" * 32],
"issue_title": "User Feedback",
"subtitle": "it was bad",
"culprit": "api/123",
"resource_id": "1234",
"evidence_data": evidence_data,
"evidence_display": evidence_display,
"type": FeedbackGroup.type_id,
"detection_time": datetime.now().timestamp(),
"level": "info",
},
)
occurrence, group_info = save_issue_occurrence(occurrence_data, event)
assert group_info is not None
group_event = event.for_group(group_info.group)
group_event.occurrence = occurrence
return group_event
def call_post_process_group(
self, is_new, is_regression, is_new_group_environment, event, cache_key=None
):
with self.feature(FeedbackGroup.build_post_process_group_feature_name()):
post_process_group(
is_new=is_new,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
cache_key=None,
group_id=event.group_id,
occurrence_id=event.occurrence.id,
project_id=event.group.project_id,
eventstream_type=EventStreamEventType.Error.value,
)
return cache_key
def test_not_ran_if_crash_report_option_disabled(self) -> None:
self.project.update_option("sentry:feedback_user_report_notifications", False)
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 0
def test_not_ran_if_spam(self) -> None:
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
is_spam=True,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 0
def test_not_ran_if_crash_report_project_option_enabled(self) -> None:
self.project.update_option("sentry:feedback_user_report_notifications", True)
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 1
def test_not_ran_if_crash_report_setting_option_epoch_0(self) -> None:
self.project.update_option("sentry:option-epoch", 1)
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 0
def test_ran_if_default_on_new_projects(self) -> None:
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 1
def test_ran_if_crash_feedback_envelope(self) -> None:
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
)
mock_process_func = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 1
def test_logs_if_source_missing(self) -> None:
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=None,
)
mock_process_func = Mock()
mock_logger = Mock()
with patch(
"sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
{
GroupCategory.FEEDBACK: [
feedback_filter_decorator(mock_process_func),
]
},
):
with patch("sentry.tasks.post_process.logger", mock_logger):
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
cache_key="total_rubbish",
)
assert mock_process_func.call_count == 0
assert mock_logger.error.call_count == 1
@pytest.mark.skip(
reason="Skip this test since there's no way to have issueless events in the issue platform"
)
def test_issueless(self) -> None: ...
def test_no_cache_abort(self) -> None:
# We don't use the cache for generic issues, so skip this test
pass
@pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
def test_processing_cache_cleared(self) -> None:
pass
@pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
def test_processing_cache_cleared_with_commits(self) -> None:
pass
@pytest.mark.skip(reason="escalation detection is disabled for feedback issues")
def test_invalidates_snooze(self) -> None:
pass
@pytest.mark.skip(reason="escalation detection is disabled for feedback issues")
def test_invalidates_snooze_with_buffers(self) -> None:
pass
@pytest.mark.skip(reason="auto resolve is disabled for feedback issues")
def test_group_inbox_regression(self) -> None:
pass
@pytest.mark.skip(reason="escalation detection is disabled for feedback issues")
def test_forecast_in_activity(self) -> None:
pass
@pytest.mark.skip(reason="regression is disabled for feedback issues")
def test_group_last_seen_buffer(self) -> None:
pass
@with_feature("organizations:expanded-sentry-apps-webhooks")
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_feedback_sends_webhook_with_feature_flag(self, mock_delay: MagicMock) -> None:
sentry_app = self.create_sentry_app(
organization=self.organization, events=["issue.created"]
)
self.create_sentry_app_installation(organization=self.organization, slug=sentry_app.slug)
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
mock_delay.assert_called_once_with(
action="created", sender="Group", instance_id=event.group.id
)
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_feedback_no_webhook_without_feature_flag(self, mock_delay: MagicMock) -> None:
sentry_app = self.create_sentry_app(
organization=self.organization, events=["issue.created"]
)
self.create_sentry_app_installation(organization=self.organization, slug=sentry_app.slug)
event = self.create_event(
data={},
project_id=self.project.id,
feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert not mock_delay.called
| PostProcessGroupFeedbackTest |
python | pypa__pipenv | pipenv/patched/pip/_internal/resolution/resolvelib/provider.py | {
"start": 2978,
"end": 11250
} | class ____(_ProviderBase):
"""Pip's provider implementation for resolvelib.
:params constraints: A mapping of constraints specified by the user. Keys
are canonicalized project names.
:params ignore_dependencies: Whether the user specified ``--no-deps``.
:params upgrade_strategy: The user-specified upgrade strategy.
:params user_requested: A set of canonicalized package names that the user
supplied for pip to install/upgrade.
"""
def __init__(
self,
factory: Factory,
constraints: Dict[str, Constraint],
ignore_dependencies: bool,
upgrade_strategy: str,
user_requested: Dict[str, int],
) -> None:
self._factory = factory
self._constraints = constraints
self._ignore_dependencies = ignore_dependencies
self._upgrade_strategy = upgrade_strategy
self._user_requested = user_requested
def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:
return requirement_or_candidate.name
def narrow_requirement_selection(
self,
identifiers: Iterable[str],
resolutions: Mapping[str, Candidate],
candidates: Mapping[str, Iterator[Candidate]],
information: Mapping[str, Iterator["PreferenceInformation"]],
backtrack_causes: Sequence["PreferenceInformation"],
) -> Iterable[str]:
"""Produce a subset of identifiers that should be considered before others.
Currently pip narrows the following selection:
* Requires-Python, if present is always returned by itself
* Backtrack causes are considered next because they can be identified
in linear time here, whereas because get_preference() is called
for each identifier, it would be quadratic to check for them there.
Further, the current backtrack causes likely need to be resolved
before other requirements as a resolution can't be found while
there is a conflict.
"""
backtrack_identifiers = set()
for info in backtrack_causes:
backtrack_identifiers.add(info.requirement.name)
if info.parent is not None:
backtrack_identifiers.add(info.parent.name)
current_backtrack_causes = []
for identifier in identifiers:
# Requires-Python has only one candidate and the check is basically
# free, so we always do it first to avoid needless work if it fails.
# This skips calling get_preference() for all other identifiers.
if identifier == REQUIRES_PYTHON_IDENTIFIER:
return [identifier]
# Check if this identifier is a backtrack cause
if identifier in backtrack_identifiers:
current_backtrack_causes.append(identifier)
continue
if current_backtrack_causes:
return current_backtrack_causes
return identifiers
def get_preference(
self,
identifier: str,
resolutions: Mapping[str, Candidate],
candidates: Mapping[str, Iterator[Candidate]],
information: Mapping[str, Iterable["PreferenceInformation"]],
backtrack_causes: Sequence["PreferenceInformation"],
) -> "Preference":
"""Produce a sort key for given requirement based on preference.
The lower the return value is, the more preferred this group of
arguments is.
Currently pip considers the following in order:
* Any requirement that is "direct", e.g., points to an explicit URL.
* Any requirement that is "pinned", i.e., contains the operator ``===``
or ``==`` without a wildcard.
* Any requirement that imposes an upper version limit, i.e., contains the
operator ``<``, ``<=``, ``~=``, or ``==`` with a wildcard. Because
pip prioritizes the latest version, preferring explicit upper bounds
can rule out infeasible candidates sooner. This does not imply that
upper bounds are good practice; they can make dependency management
and resolution harder.
* Order user-specified requirements as they are specified, placing
other requirements afterward.
* Any "non-free" requirement, i.e., one that contains at least one
operator, such as ``>=`` or ``!=``.
* Alphabetical order for consistency (aids debuggability).
"""
try:
next(iter(information[identifier]))
except StopIteration:
# There is no information for this identifier, so there's no known
# candidates.
has_information = False
else:
has_information = True
if not has_information:
direct = False
ireqs: Tuple[Optional[InstallRequirement], ...] = ()
else:
# Go through the information and for each requirement,
# check if it's explicit (e.g., a direct link) and get the
# InstallRequirement (the second element) from get_candidate_lookup()
directs, ireqs = zip(
*(
(isinstance(r, ExplicitRequirement), r.get_candidate_lookup()[1])
for r, _ in information[identifier]
)
)
direct = any(directs)
operators: list[tuple[str, str]] = [
(specifier.operator, specifier.version)
for specifier_set in (ireq.specifier for ireq in ireqs if ireq)
for specifier in specifier_set
]
pinned = any(((op[:2] == "==") and ("*" not in ver)) for op, ver in operators)
upper_bounded = any(
((op in ("<", "<=", "~=")) or (op == "==" and "*" in ver))
for op, ver in operators
)
unfree = bool(operators)
requested_order = self._user_requested.get(identifier, math.inf)
return (
not direct,
not pinned,
not upper_bounded,
requested_order,
not unfree,
identifier,
)
def find_matches(
self,
identifier: str,
requirements: Mapping[str, Iterator[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
) -> Iterable[Candidate]:
def _eligible_for_upgrade(identifier: str) -> bool:
"""Are upgrades allowed for this project?
This checks the upgrade strategy, and whether the project was one
that the user specified in the command line, in order to decide
whether we should upgrade if there's a newer version available.
(Note that we don't need access to the `--upgrade` flag, because
an upgrade strategy of "to-satisfy-only" means that `--upgrade`
was not specified).
"""
if self._upgrade_strategy == "eager":
return True
elif self._upgrade_strategy == "only-if-needed":
user_order = _get_with_identifier(
self._user_requested,
identifier,
default=None,
)
return user_order is not None
return False
constraint = _get_with_identifier(
self._constraints,
identifier,
default=Constraint.empty(),
)
return self._factory.find_candidates(
identifier=identifier,
requirements=requirements,
constraint=constraint,
prefers_installed=(not _eligible_for_upgrade(identifier)),
incompatibilities=incompatibilities,
is_satisfied_by=self.is_satisfied_by,
)
@staticmethod
@lru_cache(maxsize=None)
def is_satisfied_by(requirement: Requirement, candidate: Candidate) -> bool:
return requirement.is_satisfied_by(candidate)
def get_dependencies(self, candidate: Candidate) -> Iterable[Requirement]:
with_requires = not self._ignore_dependencies
# iter_dependencies() can perform nontrivial work so delay until needed.
return (r for r in candidate.iter_dependencies(with_requires) if r is not None)
| PipProvider |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 81509,
"end": 81620
} | class ____(BaseModel, extra="forbid"):
order_by: "OrderByInterface" = Field(..., description="")
| OrderByQuery |
python | pennersr__django-allauth | allauth/headless/socialaccount/views.py | {
"start": 2604,
"end": 3278
} | class ____(AuthenticatedAPIView):
input_class = {
"DELETE": DeleteProviderAccountInput,
}
def get(self, request, *args, **kwargs):
return self.respond_provider_accounts(request)
@classmethod
def respond_provider_accounts(self, request):
accounts = SocialAccount.objects.filter(user=request.user)
return SocialAccountsResponse(request, accounts)
def delete(self, request, *args, **kwargs):
flows.connect.disconnect(request, self.input.cleaned_data["account"])
return self.respond_provider_accounts(request)
def get_input_kwargs(self):
return {"user": self.request.user}
| ManageProvidersView |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/twentythreeandme/tests.py | {
"start": 269,
"end": 1009
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = TwentyThreeAndMeProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"profiles": [
{"id": "56c46bdb0902f8e2", "genotyped": false}
],
"id": "b4b975a5a6a1b80b"
}
""",
)
def get_expected_to_str(self):
return "23andMe"
def get_login_response_json(self, with_refresh_token=True):
return """
{
"access_token":"testac",
"token_type":"bearer",
"expires_in": 86400,
"refresh_token":"33c53cd7bb",
"scope":"basic"
}"""
| TwentyThreeAndMeTests |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/hooks.py | {
"start": 11438,
"end": 13536
} | class ____(GrpcDebugHook):
"""A tfdbg hook that can be used with TensorBoard Debugger Plugin.
This hook is the same as `GrpcDebugHook`, except that it uses a predefined
`watch_fn` that
1) uses `DebugIdentity` debug ops with the `gated_grpc` attribute set to
`True`, to allow the interactive enabling and disabling of tensor
breakpoints.
2) watches all tensors in the graph.
This saves the need for the user to define a `watch_fn`.
"""
def __init__(self,
grpc_debug_server_addresses,
thread_name_filter=None,
send_traceback_and_source_code=True):
"""Constructor of TensorBoardDebugHook.
Args:
grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a
`str` or a `list` of `str`s. E.g., "localhost:2333",
"grpc://localhost:2333", ["192.168.0.7:2333", "192.168.0.8:2333"].
thread_name_filter: Optional filter for thread names.
send_traceback_and_source_code: Whether traceback of graph elements and
the source code are to be sent to the debug server(s).
"""
def _gated_grpc_watch_fn(fetches, feeds):
del fetches, feeds # Unused.
return framework.WatchOptions(
debug_ops=["DebugIdentity(gated_grpc=true)"])
super(TensorBoardDebugHook, self).__init__(
grpc_debug_server_addresses,
watch_fn=_gated_grpc_watch_fn,
thread_name_filter=thread_name_filter)
self._grpc_debug_server_addresses = grpc_debug_server_addresses
self._send_traceback_and_source_code = send_traceback_and_source_code
self._sent_graph_version = -1
grpc_wrapper.register_signal_handler()
def before_run(self, run_context):
if self._send_traceback_and_source_code:
self._sent_graph_version = grpc_wrapper.publish_traceback(
self._grpc_debug_server_addresses, run_context.session.graph,
run_context.original_args.feed_dict,
run_context.original_args.fetches, self._sent_graph_version)
return super(TensorBoardDebugHook, self).before_run(run_context)
| TensorBoardDebugHook |
python | getsentry__sentry | src/sentry/monitors/apps.py | {
"start": 36,
"end": 133
} | class ____(AppConfig):
name = "sentry.monitors"
def ready(self) -> None:
pass
| Config |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 18752,
"end": 19477
} | class ____(SimpleTagInlineProcessor):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: # pragma: no cover
"""
Return [`Element`][xml.etree.ElementTree.Element] in following format:
`<tag1><tag2>group(2)</tag2>group(3)</tag2>` where `group(3)` is optional.
"""
tag1, tag2 = self.tag.split(",")
el1 = etree.Element(tag1)
el2 = etree.SubElement(el1, tag2)
el2.text = m.group(2)
if len(m.groups()) == 3:
el2.tail = m.group(3)
return el1, m.start(0), m.end(0)
| DoubleTagInlineProcessor |
python | huggingface__transformers | tests/pipelines/test_pipelines_text_to_audio.py | {
"start": 1037,
"end": 14027
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
# for now only test text_to_waveform and not text_to_spectrogram
@require_torch
def test_small_musicgen_pt(self):
music_generator = pipeline(
task="text-to-audio", model="facebook/musicgen-small", do_sample=False, max_new_tokens=5
)
num_channels = 1 # model generates mono audio
outputs = music_generator("This is a test")
self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs)
self.assertEqual(len(outputs["audio"].shape), num_channels)
# test two examples side-by-side
outputs = music_generator(["This is a test", "This is a second test"])
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test batching, this time with parameterization in the forward pass
music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small")
forward_params = {"do_sample": False, "max_new_tokens": 5}
outputs = music_generator(
["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
@slow
@require_torch
def test_medium_seamless_m4t_pt(self):
speech_generator = pipeline(task="text-to-audio", model="facebook/hf-seamless-m4t-medium", max_new_tokens=5)
for forward_params in [{"tgt_lang": "eng"}, {"return_intermediate_token_ids": True, "tgt_lang": "eng"}]:
outputs = speech_generator("This is a test", forward_params=forward_params)
self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 16000}, outputs)
# test two examples side-by-side
outputs = speech_generator(["This is a test", "This is a second test"], forward_params=forward_params)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test batching
outputs = speech_generator(
["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
@slow
@require_torch
def test_small_bark_pt(self):
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small")
num_channels = 1 # model generates mono audio
forward_params = {
# Using `do_sample=False` to force deterministic output
"do_sample": False,
"semantic_max_new_tokens": 5,
}
outputs = speech_generator("This is a test", forward_params=forward_params)
self.assertEqual(
{"audio": ANY(np.ndarray), "sampling_rate": 24000},
outputs,
)
self.assertEqual(len(outputs["audio"].shape), num_channels)
# test two examples side-by-side
outputs = speech_generator(
["This is a test", "This is a second test"],
forward_params=forward_params,
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test other generation strategy
forward_params = {
"do_sample": True,
"semantic_max_new_tokens": 5,
"semantic_num_return_sequences": 2,
}
outputs = speech_generator("This is a test", forward_params=forward_params)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# test using a speaker embedding
processor = AutoProcessor.from_pretrained("suno/bark-small")
temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5")
history_prompt = temp_inp["history_prompt"]
forward_params["history_prompt"] = history_prompt
outputs = speech_generator(
["This is a test", "This is a second test"],
forward_params=forward_params,
batch_size=2,
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
@slow
@require_torch_accelerator
def test_conversion_additional_tensor(self):
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", device=torch_device)
processor = AutoProcessor.from_pretrained("suno/bark-small")
forward_params = {
"do_sample": True,
"semantic_max_new_tokens": 5,
}
# atm, must do to stay coherent with BarkProcessor
preprocess_params = {
"max_length": 256,
"add_special_tokens": False,
"return_attention_mask": True,
"return_token_type_ids": False,
}
outputs = speech_generator(
"This is a test",
forward_params=forward_params,
preprocess_params=preprocess_params,
)
temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5")
history_prompt = temp_inp["history_prompt"]
forward_params["history_prompt"] = history_prompt
# history_prompt is a torch.Tensor passed as a forward_param
# if generation is successful, it means that it was passed to the right device
outputs = speech_generator(
"This is a test", forward_params=forward_params, preprocess_params=preprocess_params
)
self.assertEqual(
{"audio": ANY(np.ndarray), "sampling_rate": 24000},
outputs,
)
@require_torch
def test_vits_model_pt(self):
speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng")
outputs = speech_generator("This is a test")
self.assertEqual(outputs["sampling_rate"], 16000)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# test two examples side-by-side
outputs = speech_generator(["This is a test", "This is a second test"])
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test batching
outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2)
self.assertEqual(ANY(np.ndarray), outputs[0]["audio"])
@require_torch
def test_forward_model_kwargs(self):
# use vits - a forward model
speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk")
# for reproducibility
set_seed(555)
outputs = speech_generator("This is a test", forward_params={"speaker_id": 5})
audio = outputs["audio"]
with self.assertRaises(TypeError):
# assert error if generate parameter
outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True})
forward_params = {"speaker_id": 5}
generate_kwargs = {"do_sample": True}
with self.assertRaises(ValueError):
# assert error if generate_kwargs with forward-only models
outputs = speech_generator(
"This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs
)
self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5)
@require_torch
def test_generative_model_kwargs(self):
# use musicgen - a generative model
music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small")
forward_params = {
"do_sample": True,
"max_new_tokens": 20,
}
# for reproducibility
set_seed(555)
outputs = music_generator("This is a test", forward_params=forward_params)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# make sure generate kwargs get priority over forward params
forward_params = {
"do_sample": False,
"max_new_tokens": 20,
}
generate_kwargs = {"do_sample": True}
# for reproducibility
set_seed(555)
outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs)
self.assertListEqual(outputs["audio"].tolist(), audio.tolist())
@slow
@require_torch
def test_csm_model_pt(self):
speech_generator = pipeline(task="text-to-audio", model="sesame/csm-1b", device=torch_device)
generate_kwargs = {"max_new_tokens": 10, "output_audio": True}
num_channels = 1 # model generates mono audio
outputs = speech_generator("[0]This is a test", generate_kwargs=generate_kwargs)
self.assertEqual(outputs["sampling_rate"], 24000)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# ensure audio and not discrete codes
self.assertEqual(len(audio.shape), num_channels)
# test two examples side-by-side
outputs = speech_generator(["[0]This is a test", "[0]This is a second test"], generate_kwargs=generate_kwargs)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
self.assertEqual(len(audio[0].shape), num_channels)
# test batching
batch_size = 2
outputs = speech_generator(
["[0]This is a test", "[0]This is a second test"], generate_kwargs=generate_kwargs, batch_size=batch_size
)
self.assertEqual(len(outputs), batch_size)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
self.assertEqual(len(outputs[0]["audio"].shape), num_channels)
@slow
@require_torch
def test_dia_model(self):
speech_generator = pipeline(task="text-to-audio", model="nari-labs/Dia-1.6B-0626", device=torch_device)
generate_kwargs = {"max_new_tokens": 20}
num_channels = 1 # model generates mono audio
outputs = speech_generator(
"[S1] Dia is an open weights text to dialogue model.", generate_kwargs=generate_kwargs
)
self.assertEqual(outputs["sampling_rate"], 44100)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# ensure audio (with one channel) and not discrete codes
self.assertEqual(len(audio.shape), num_channels)
# test two examples side-by-side
outputs = speech_generator(
["[S1] Dia is an open weights text to dialogue model.", "[S2] This is a second example."],
generate_kwargs=generate_kwargs,
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
self.assertEqual(len(audio[0].shape), num_channels)
# test batching
batch_size = 2
outputs = speech_generator(
["[S1] Dia is an open weights text to dialogue model.", "[S2] This is a second example."],
generate_kwargs=generate_kwargs,
batch_size=2,
)
self.assertEqual(len(outputs), batch_size)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
self.assertEqual(len(outputs[0]["audio"].shape), num_channels)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
model_test_kwargs = {}
if model.can_generate(): # not all models in this pipeline can generate and, therefore, take `generate` kwargs
model_test_kwargs["max_new_tokens"] = 5
model.config._attn_implementation = "eager"
speech_generator = TextToAudioPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
**model_test_kwargs,
)
return speech_generator, ["This is a test", "Another test"]
def run_pipeline_test(self, speech_generator, _):
outputs = speech_generator("This is a test")
self.assertEqual(ANY(np.ndarray), outputs["audio"])
forward_params = (
{"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {}
)
outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
| TextToAudioPipelineTests |
python | kamyu104__LeetCode-Solutions | Python/maximum-spending-after-buying-items.py | {
"start": 70,
"end": 581
} | class ____(object):
def maxSpending(self, values):
"""
:type values: List[List[int]]
:rtype: int
"""
m, n = len(values), len(values[0])
min_heap = [(values[i].pop(), i) for i in xrange(m)]
heapq.heapify(min_heap)
result = 0
for d in xrange(1, m*n+1):
x, i = heapq.heappop(min_heap)
result += x*d
if values[i]:
heapq.heappush(min_heap, (values[i].pop(), i))
return result
| Solution |
python | PyCQA__pylint | tests/functional/a/attribute_defined_outside_init.py | {
"start": 565,
"end": 661
} | class ____:
def setUp(self):
self.set_z()
def set_z(self):
self.z = 42
| D |
python | altair-viz__altair | tools/datasets/datapackage.py | {
"start": 635,
"end": 1168
} | class ____:
def __init__(
self, name: str, expr: pl.Expr, /, doc: str = "_description_", tp_str: str = ""
) -> None:
self._name: str = name
self._expr: pl.Expr = expr
self._doc: str = doc
self._tp_str: str = tp_str
@property
def expr(self) -> pl.Expr:
return self._expr.alias(self._name)
@property
def doc(self) -> str:
return f"{self._name}\n{INDENT * 2}{self._doc}"
def is_feature(self) -> bool:
return self._name.startswith("is_")
| Column |
python | doocs__leetcode | solution/0600-0699/0690.Employee Importance/Solution.py | {
"start": 227,
"end": 490
} | class ____:
def getImportance(self, employees: List["Employee"], id: int) -> int:
def dfs(i: int) -> int:
return d[i].importance + sum(dfs(j) for j in d[i].subordinates)
d = {e.id: e for e in employees}
return dfs(id)
| Solution |
python | huggingface__transformers | tests/models/data2vec/test_modeling_data2vec_text.py | {
"start": 26182,
"end": 27497
} | class ____(TestCasePlus):
@slow
def test_inference_masked_lm(self):
model = Data2VecTextForMaskedLM.from_pretrained("facebook/data2vec-text-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor([[[0.2328, 0.0000, 1.1710], [2.2525, 0.0000, 1.9937], [2.1280, 0.0000, 1.8691]]])
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
model = Data2VecTextModel.from_pretrained("facebook/data2vec-text-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[0.1998, -0.0379, 0.0024], [-0.0971, -0.2214, -0.1798], [-0.0789, -0.2400, -0.1898]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| Data2VecTextModelIntegrationTest |
python | sqlalchemy__sqlalchemy | test/orm/test_versioning.py | {
"start": 24061,
"end": 26445
} | class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("version_id", Integer),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", ForeignKey("a.id")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
def _run_test(self, auto_version_counter=True):
A, B = self.classes("A", "B")
s = fixture_session(future=True)
if auto_version_counter:
a1 = A()
else:
a1 = A(version_id=1)
s.add(a1)
s.commit()
eq_(a1.version_id, 1)
b1 = B()
b1.a = a1
s.add(b1)
s.commit()
eq_(a1.version_id, 1)
def test_plain_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test()
def test_functional_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
version_id_generator=lambda num: (num or 0) + 1,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test()
def test_no_counter(self):
A, B = self.classes("A", "B")
a, b = self.tables("a", "b")
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, backref="a")},
version_id_col=a.c.version_id,
version_id_generator=False,
)
self.mapper_registry.map_imperatively(B, b)
self._run_test(False)
| NoBumpOnRelationshipTest |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 38767,
"end": 43105
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"Parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(128)),
)
Table(
"Children",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("Parent.id")),
Column("foo", String(128)),
Column("name", String(128)),
)
@classmethod
def setup_mappers(cls):
class Parent(cls.Basic):
children = association_proxy("_children", "name")
def __init__(self, name):
self.name = name
class Child(cls.Basic):
def __init__(self, name):
self.name = name
cls.mapper_registry.map_imperatively(Child, cls.tables.Children)
def roundtrip(self, obj):
self.session.add(obj)
self.session.flush()
id_, type_ = obj.id, type(obj)
self.session.expunge_all()
return self.session.get(type_, id_)
def test_lazy_list(self):
Parent, Child = self.classes("Parent", "Child")
self.session = fixture_session()
self.mapper_registry.map_imperatively(
Parent,
self.tables.Parent,
properties={
"_children": relationship(
Child, lazy="select", collection_class=list
)
},
)
p = Parent("p")
p.children = ["a", "b", "c"]
p = self.roundtrip(p)
# Is there a better way to ensure that the association_proxy
# didn't convert a lazy load to an eager load? This does work though.
self.assert_("_children" not in p.__dict__)
self.assert_(len(p._children) == 3)
self.assert_("_children" in p.__dict__)
def test_eager_list(self):
Parent, Child = self.classes("Parent", "Child")
self.session = fixture_session()
self.mapper_registry.map_imperatively(
Parent,
self.tables.Parent,
properties={
"_children": relationship(
Child, lazy="joined", collection_class=list
)
},
)
p = Parent("p")
p.children = ["a", "b", "c"]
p = self.roundtrip(p)
self.assert_("_children" in p.__dict__)
self.assert_(len(p._children) == 3)
def test_slicing_list(self):
Parent, Child = self.classes("Parent", "Child")
self.session = fixture_session()
self.mapper_registry.map_imperatively(
Parent,
self.tables.Parent,
properties={
"_children": relationship(
Child, lazy="select", collection_class=list
)
},
)
p = Parent("p")
p.children = ["a", "b", "c"]
p = self.roundtrip(p)
self.assert_(len(p._children) == 3)
eq_("b", p.children[1])
eq_(["b", "c"], p.children[-2:])
def test_lazy_scalar(self):
Parent, Child = self.classes("Parent", "Child")
self.session = fixture_session()
self.mapper_registry.map_imperatively(
Parent,
self.tables.Parent,
properties={
"_children": relationship(Child, lazy="select", uselist=False)
},
)
p = Parent("p")
p.children = "value"
p = self.roundtrip(p)
self.assert_("_children" not in p.__dict__)
self.assert_(p._children is not None)
def test_eager_scalar(self):
Parent, Child = self.classes("Parent", "Child")
self.session = fixture_session()
self.mapper_registry.map_imperatively(
Parent,
self.tables.Parent,
properties={
"_children": relationship(Child, lazy="joined", uselist=False)
},
)
p = Parent("p")
p.children = "value"
p = self.roundtrip(p)
self.assert_("_children" in p.__dict__)
self.assert_(p._children is not None)
| LazyLoadTest |
python | facelessuser__pymdown-extensions | pymdownx/tabbed.py | {
"start": 14955,
"end": 16168
} | class ____(Extension):
"""Add Tabbed extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'alternate_style': [False, "Use alternate style - Default: False"],
'slugify': [0, "Slugify function used to create tab specific IDs - Default: None"],
'combine_header_slug': [False, "Combine the tab slug with the slug of the parent header - Default: False"],
'separator': ['-', "Slug separator - Default: '-'"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Add Tabbed to Markdown instance."""
md.registerExtension(self)
config = self.getConfigs()
self.tab_processor = TabbedProcessor(md.parser, config)
md.parser.blockprocessors.register(self.tab_processor, "tabbed", 105)
if config['slugify']:
slugs = TabbedTreeprocessor(md, config)
md.treeprocessors.register(slugs, 'tab_slugs', 4)
def reset(self):
"""Reset."""
self.tab_processor.tab_group_count = 0
def makeExtension(*args, **kwargs):
"""Return extension."""
return TabbedExtension(*args, **kwargs)
| TabbedExtension |
python | numba__numba | numba/cuda/stubs.py | {
"start": 7483,
"end": 7751
} | class ____(Stub):
"""
selp(a, b, c)
Select between source operands, based on the value of the predicate source
operand.
"""
#-------------------------------------------------------------------------------
# single / double precision arithmetic
| selp |
python | ansible__ansible | test/units/utils/test_encrypt.py | {
"start": 9497,
"end": 11104
} | class ____:
"""
Tests for the CryptHash class.
These tests are hitting code paths that are otherwise impossible to reach
through integration tests, but necessary for more complete code coverage.
"""
def test_invalid_instantiation(self, mocker: MockerFixture) -> None:
"""Should not be able to instantiate a CryptHash class without libxcrypt/libcrypt."""
mocker.patch('ansible.utils.encrypt.HAS_CRYPT', False)
with pytest.raises(AnsibleError, match=r"crypt cannot be used as the 'libxcrypt' library is not installed or is unusable\."):
encrypt.CryptHash("sha256_crypt")
def test_ansible_unsupported_algorithm(self) -> None:
"""Test AnsibleError is raised when Ansible does not support requested algorithm."""
with pytest.raises(AnsibleError, match=r"crypt does not support 'foo' algorithm"):
encrypt.CryptHash("foo")
def test_library_unsupported_algorithm(self, mocker: MockerFixture) -> None:
"""Test AnsibleError is raised when crypt library does not support an Ansible supported algorithm."""
# Pretend we have a crypt lib that doesn't like our algo
mocker.patch('ansible.utils.encrypt.HAS_CRYPT', True)
mocker.patch('ansible._internal._encryption._crypt.CryptFacade.crypt', side_effect=ValueError)
# instantiate with an Ansible supported algo
crypt_hash = encrypt.CryptHash("sha256_crypt")
with pytest.raises(AnsibleError, match=r"crypt does not support 'sha256_crypt' algorithm"):
crypt_hash.hash("123", salt="12345678")
| TestCryptHash |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels23.py | {
"start": 315,
"end": 1839
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels23.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45705856, 45740416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"font": {
"name": "Consolas",
"baseline": 1 * -1,
"pitch_family": 49,
"charset": 0,
},
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {"value": 1, "position": "inside_base"},
}
)
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dask__distributed | distributed/http/scheduler/json.py | {
"start": 129,
"end": 1646
} | class ____(RequestHandler):
def get(self):
scheduler = self.server
erred = 0
nbytes = 0
nthreads = 0
memory = 0
processing = 0
released = 0
waiting = 0
waiting_data = 0
desired_workers = scheduler.adaptive_target()
for ts in scheduler.tasks.values():
if ts.exception_blame is not None:
erred += 1
elif ts.state == "released":
released += 1
if ts.waiting_on:
waiting += 1
if ts.waiters:
waiting_data += 1
for ws in scheduler.workers.values():
nthreads += ws.nthreads
memory += len(ws.has_what)
nbytes += ws.nbytes
processing += len(ws.processing)
response = {
"bytes": nbytes,
"clients": len(scheduler.clients),
"cores": nthreads,
"erred": erred,
"hosts": len(scheduler.host_info),
"idle": len(scheduler.idle),
"memory": memory,
"processing": processing,
"released": released,
"saturated": len(scheduler.saturated),
"tasks": len(scheduler.tasks),
"unrunnable": len(scheduler.unrunnable),
"waiting": waiting,
"waiting_data": waiting_data,
"workers": len(scheduler.workers),
"desired_workers": desired_workers,
}
self.write(response)
| CountsJSON |
python | Netflix__metaflow | metaflow/plugins/datatools/s3/s3.py | {
"start": 2868,
"end": 2952
} | class ____(MetaflowException):
headline = "Invalid address"
| MetaflowS3URLException |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 859910,
"end": 860660
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectV2SortBy."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2SortByEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of(ProjectV2SortBy), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectV2SortByConnection |
python | pytorch__pytorch | torch/_export/non_strict_utils.py | {
"start": 2000,
"end": 35663
} | class ____:
"""
Builds a trie of `KeyPath` prefixes mapping to `Source` leaves.
"""
def __init__(self):
self.root = {}
def add(self, kp: KeyPath, src: Source):
assert len(kp) > 0
*path, leaf = kp
node = self.root
for k in path:
if k not in node:
node[k] = {}
node = node[k]
node[leaf] = src
def get(self, kp: KeyPath) -> tuple[Source, KeyPath]:
node = self.root
while not isinstance(node, Source):
assert len(kp) > 0
k, *kp = kp # type: ignore[assignment]
node = node[k]
# pyrefly: ignore [bad-return]
return node, kp
def make_sourced_prefixes(nn_module, args, kwargs) -> _KeyPathTrie:
kp_args, kp_kwargs = tree_map_with_path(
lambda kp, _: _KeyPath(kp),
(tuple(None for _ in args), {k: None for k in kwargs}), # noqa: C420
)
kp_combined_args = _combine_args(nn_module, kp_args, kp_kwargs)
sourced_prefixes = _KeyPathTrie()
for name, struct in kp_combined_args.items():
src = LocalSource(name)
if isinstance(struct, _KeyPath):
sourced_prefixes.add(struct.kp, src)
elif isinstance(struct, tuple):
for i, prefix in enumerate(struct):
assert isinstance(prefix, _KeyPath)
sourced_prefixes.add(prefix.kp, GetItemSource(src, i))
elif isinstance(struct, dict):
for k, prefix in struct.items():
assert isinstance(prefix, _KeyPath)
sourced_prefixes.add(prefix.kp, GetItemSource(src, k))
return sourced_prefixes
def key_path_to_source(
kp: KeyPath, sourced_prefixes: Optional[_KeyPathTrie] = None
) -> Source:
"""
Given a key path, return the source for the key path.
"""
if sourced_prefixes is None:
source: Source = LocalSource("args")
else:
source, kp = sourced_prefixes.get(kp)
for k in kp:
if isinstance(k, SequenceKey):
source = GetItemSource(source, k.idx)
elif isinstance(k, MappingKey):
source = GetItemSource(source, k.key)
elif isinstance(k, GetAttrKey):
source = AttrSource(source, k.name)
else:
raise ValueError(f"Unknown KeyEntry {k}")
return source
def _is_constant_argument(t):
return t is None or isinstance(t, (float, bool, str))
def fakify(
mode: FakeTensorMode,
kp: KeyPath,
t: Any,
t_constraints: dict[int, dict[int, Constraint]],
sources: dict[tuple[int, int], list[Source]],
sourced_prefixes: Optional[_KeyPathTrie] = None,
):
source = key_path_to_source(kp, sourced_prefixes=sourced_prefixes)
if _is_constant_argument(t) or isinstance(t, (torch.ScriptObject, torch.nn.Module)):
return t
if isinstance(t, _IntWrapper):
if t.dynamism is not None and t.dynamism.type in ( # type: ignore[union-attr]
_DimHintType.DYNAMIC,
_DimHintType.AUTO,
):
symint = mode.shape_env.create_unspecified_symint_and_symbol( # type: ignore[union-attr]
t.val, source, DimDynamic.DYNAMIC
)
context = (
SymIntSymbolicContext(
constraint=RelaxedUnspecConstraint(warn_only=False)
)
if t.dynamism.type == _DimHintType.DYNAMIC # type: ignore[union-attr]
else None
)
mode.shape_env.tracked_fakes.append( # type: ignore[union-attr]
TrackedFake(symint, source, context)
)
return symint
else:
return t.val
if not isinstance(t, torch.Tensor):
raise ValueError(
f"Unsupported input type {type(t)}. "
"Export only supports pytree containers of basic types (Tensor, int, float, ...) as input. "
"To register a custom dataclass, use torch.export.register_dataclass. "
"To register a custom container type, use torch.utils._pytree.register_pytree_node. "
"To register a constant input, use torch.utils._pytree.register_constant"
)
# Create symbolic context (handles subclass recursion internally)
symbolic_context = _create_symbolic_context_for_tensor(
t, source, t_constraints, sources, mode
)
fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context)
mode.shape_env.tracked_fakes.append(TrackedFake(fake, source, symbolic_context)) # type: ignore[union-attr]
return fake
def _create_symbolic_context_for_tensor(t, source, t_constraints, sources, mode):
"""Helper function to create symbolic context for a tensor."""
from torch._dynamo.source import AttrSource
from torch.fx.experimental.symbolic_shapes import (
DimDynamic,
RelaxedUnspecConstraint,
SubclassSymbolicContext,
)
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
# Common dynamic dimension logic for both regular tensors and subclasses
n_dims = len(t.shape)
dynamic_sizes = []
constraint_sizes = [None] * n_dims
for i in range(n_dims):
if i in getattr(t, "_dynamo_weak_dynamic_indices", {}):
dynamic_sizes.append(DimDynamic.DYNAMIC)
elif i in getattr(t, "_dynamo_dynamic_indices", {}):
# bit annoying, but we need to replicate process in _dynamo/variables/builder.py
# where a RelaxedUnspecConstraint is created for Dim.DYNAMIC, so constraint violations
# are raised when specializing.
dynamic_sizes.append(DimDynamic.DYNAMIC)
constraint_sizes[i] = RelaxedUnspecConstraint(warn_only=False) # type: ignore[call-overload]
else:
dynamic_sizes.append(DimDynamic.STATIC)
# Handle nested subclasses
if is_traceable_wrapper_subclass(t):
# Get inner contexts recursively
inner_contexts = {}
attrs, _ = type(t).__tensor_flatten__(t)
# Propagate outer tensor constraints to inner tensors if not already present
for attr in attrs:
inner_tensor = getattr(t, attr)
inner_source = AttrSource(source, attr)
inner_contexts[attr] = _create_symbolic_context_for_tensor(
inner_tensor, inner_source, t_constraints, sources, mode
)
symbolic_context = SubclassSymbolicContext(
dynamic_sizes=dynamic_sizes,
constraint_sizes=constraint_sizes, # type: ignore[arg-type]
view_base_context=None,
tensor_source=source,
shape_env_to_source_to_symbol_cache={},
inner_contexts=inner_contexts,
)
else:
symbolic_context: StatelessSymbolicContext = ( # type: ignore[no-redef]
StatelessSymbolicContext(
dynamic_sizes=dynamic_sizes,
constraint_sizes=constraint_sizes, # type: ignore[arg-type]
)
)
# Apply constraints (common logic)
t_id = id(t)
assert mode.shape_env is not None
if t_id in t_constraints:
for i, constraint in t_constraints[t_id].items():
src = TensorPropertySource(base=source, prop=TensorProperty.SIZE, idx=i)
sources[(t_id, i)].append(src)
if isinstance(constraint, _RelaxedConstraint):
continue
symbolic_context.constraint_sizes[i] = constraint.constraint_range
mode.shape_env.source_name_to_debug_name[src.name()] = constraint.name # type: ignore[assignment]
return symbolic_context
def _is_unbacked_symint(symbol):
if not isinstance(symbol, torch.SymInt):
return False
return symbol.node.shape_env.is_unbacked_symint(symbol.node.expr)
def _tensor_min_max(*args, real_callable, tensor_callable, **kwargs):
"""
This logic is replicated from dynamo/variables/builtin.py
"""
if len(args) == 2 and not kwargs:
arg1, arg2 = args
# Case 1: Both are tensors
if isinstance(arg1, torch.Tensor) and isinstance(arg2, torch.Tensor):
return tensor_callable(arg1, arg2)
# Case 2: One tensor, one scalar
elif isinstance(arg1, torch.Tensor) or isinstance(arg2, torch.Tensor):
if not isinstance(arg1, torch.Tensor):
arg1, arg2 = arg2, arg1
if isinstance(arg2, (int, float)):
kwarg = {"min" if tensor_callable is torch.maximum else "max": arg2}
return torch.clamp(arg1, **kwarg) # type: ignore[call-overload]
else:
return real_callable(arg1, arg2)
# Case 3: SymInts
elif isinstance(arg1, torch.SymInt) or isinstance(arg2, torch.SymInt):
return (
torch.sym_max(arg1, arg2)
if tensor_callable is torch.maximum
else torch.sym_min(arg1, arg2)
)
# Fallback
else:
return real_callable(arg1, arg2)
# Single iterable argument handling
if len(args) == 1 and not kwargs:
iterable = args[0]
if isinstance(iterable, torch.Tensor):
return tensor_callable(iterable)
try:
iterator = iter(iterable)
except TypeError:
pass
else:
items = list(iterator)
if not items:
raise ValueError(f"{real_callable.__name__}() arg is an empty sequence")
return functools.reduce(
lambda a, b: _tensor_min_max(
a, b, real_callable=real_callable, tensor_callable=tensor_callable
),
items,
)
# Fallback to original callable
return real_callable(*args, **kwargs)
@contextmanager
def _override_builtin_ops():
original_max = builtins.max
original_min = builtins.min
original_pow = math.pow
# pyrefly: ignore [bad-assignment]
builtins.max = functools.partial(
_tensor_min_max, real_callable=original_max, tensor_callable=torch.maximum
)
# pyrefly: ignore [bad-assignment]
builtins.min = functools.partial(
_tensor_min_max, real_callable=original_min, tensor_callable=torch.minimum
)
math.pow = lambda x, y: x**y # type: ignore[operator]
try:
yield
finally:
builtins.max = original_max
builtins.min = original_min
math.pow = original_pow
def make_fake_inputs(
nn_module,
args,
kwargs,
dynamic_shapes,
prefer_deferred_runtime_asserts_over_guards=False,
):
"""
Given an nn module, example inputs, and constraints, return a new fake mode,
fake inputs created in that mode whose dynamic shape dimensions are constrained
by the given ranges, and sources for pairs of dynamic shape dimensions that are
constrained to be equal.
"""
# TODO(avik): refactor Dynamo to avoid duplication of the following code
# between non-strict and strict.
# Specifically, here (non-strict) we do the following pre-tracing steps:
# - Fakify inputs.
# - Process input shape equalities.
# In strict, these steps are spread across multiple files:
# - output_graph.py fakifies inputs.
# - [post-tracing] guards.py processes input shape equalities.
import torch._functorch.config as _config
# Map ints to a wrapper structure to help us mark it as dynamic, if it is
# dynamic. We will unwrap ints in fakify later.
args, kwargs = pytree.tree_map_only(int, lambda a: _IntWrapper(a), (args, kwargs))
combined_args = _combine_args(nn_module, args, kwargs)
_check_dynamic_shapes(combined_args, dynamic_shapes)
constraints = _process_dynamic_shapes(combined_args, dynamic_shapes)
t_constraints: dict[int, dict[int, Constraint]] = defaultdict(dict)
for constraint in constraints:
t_constraints[constraint.t_id][constraint.dim] = constraint
context = torch._guards.TracingContext.try_get()
if context is not None:
# This occurs when we are exporting within dynamo. There already exists
# a toplevel TracingContext with a fake mode, so we do not want to
# create another fake mode.
fake_mode = context.fake_mode
assert fake_mode is not None
else:
if isinstance(nn_module.forward, functools.partial):
# functools handles nesting by itself, no need to recurse
code = nn_module.forward.func.__code__
elif (
sys.version_info >= (3, 14)
and (fwd := getattr(nn_module.forward, "__func__", None))
and isinstance(fwd, functools.partial)
):
# functools.partial is now a method descriptor:
# https://docs.python.org/3/whatsnew/3.14.html#changes-in-the-python-api
code = fwd.func.__code__
else:
code = nn_module.forward.__code__
co_fields = {
"co_name": code.co_name,
"co_filename": code.co_filename,
"co_firstlineno": code.co_firstlineno,
}
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
fake_mode = FakeTensorMode(
shape_env=ShapeEnv(
tracked_fakes=[],
co_fields=co_fields,
prefer_deferred_runtime_asserts_over_guards=prefer_deferred_runtime_asserts_over_guards,
trace_asserts=True,
),
allow_non_fake_inputs=True,
export=True,
)
if fake_mode.shape_env is None or fake_mode.shape_env.tracked_fakes is None:
raise ValueError(
"Detected fake_mode does not have a shape_env with tracked fakes. "
"If you constructed the module under a FakeTensorMode, "
"please initialize it like: FakeTensorMode(shape_env=ShapeEnv(tracked_fakes=[]))"
)
with fake_mode:
original_signature = inspect.signature(nn_module.forward)
sources: dict[tuple[int, int], list[Source]] = defaultdict(list)
sourced_prefixes = make_sourced_prefixes(nn_module, args, kwargs)
fake_args, fake_kwargs = tree_map_with_path(
lambda kp, val: fakify(
fake_mode,
kp,
val,
t_constraints,
sources,
sourced_prefixes=sourced_prefixes,
),
(args, kwargs),
)
names: dict[str, tuple[int, int]] = {}
source_pairs: list[tuple[Source, Source]] = []
derived_equalities: list[tuple[Source, Union[Source, Symbol], Callable]] = []
phantom_symbols: dict[str, Symbol] = {}
relaxed_sources: set[Source] = set()
for constraint in constraints:
torch.export.dynamic_shapes._process_equalities(
constraint,
lambda t_id, dim: sources[(t_id, dim)],
fake_mode.shape_env,
names,
source_pairs,
derived_equalities,
phantom_symbols,
relaxed_sources,
)
equalities_inputs = EqualityConstraint(
source_pairs=source_pairs,
derived_equalities=derived_equalities,
phantom_symbols=list(phantom_symbols.values()),
relaxed_sources=relaxed_sources,
warn_only=False,
)
return (
fake_mode,
fake_args,
fake_kwargs,
equalities_inputs,
original_signature,
dynamic_shapes,
)
def _flatten_dynamic_shapes(
combined_args: dict[str, Any],
dynamic_shapes: Union[dict[str, Any], tuple[Any], list[Any]],
) -> list[Any]:
flat_shapes = []
def _tree_map_helper(path, t, shape):
nonlocal flat_shapes
flat_shapes.append(shape)
_tree_map_with_path(_tree_map_helper, combined_args, dynamic_shapes)
return flat_shapes
def _clean_dynamic_markers(tensor: torch.Tensor) -> None:
for attr in [
"_dynamo_weak_dynamic_indices",
"_dynamo_dynamic_indices",
"_dynamo_dynamic_range",
"_dynamo_static_indices",
"_dynamo_unbacked_indices",
]:
if hasattr(tensor, attr):
delattr(tensor, attr)
def produce_guards_and_solve_constraints(
fake_mode: FakeTensorMode,
gm: torch.fx.GraphModule,
dynamic_shapes: Union[dict[str, Any], tuple[Any], list[Any], None],
equalities_inputs: EqualityConstraint,
original_signature: inspect.Signature,
):
"""
Given a fake mode, sources pairs corresponding to equal dynamic shape dimensions,
and a graph module, produce guards on the fake mode's shape env (raising constraint
violations if any), solve (to suggest simplifications or fixes).
Dynamo already performs this, so this is for non-strict mode.
Additional inputs:
equalities_inputs: the equality constraints to use for guards
original_signature: the signature of the forward method
"""
shape_env = fake_mode.shape_env
assert shape_env is not None
assert shape_env.tracked_fakes is not None
placeholders = [tf.fake for tf in shape_env.tracked_fakes]
sources = [tf.source for tf in shape_env.tracked_fakes]
input_contexts = [tf.symbolic_context for tf in shape_env.tracked_fakes]
constraint_violation_error = None
try:
shape_env.produce_guards(
placeholders,
sources,
input_contexts=input_contexts,
equalities_inputs=equalities_inputs,
ignore_static=False,
)
except ConstraintViolationError as e:
constraint_violation_error = e
shape_env.frozen = True
dim_constraints = shape_env.dim_constraints
if dim_constraints is None:
# Expected when shape_env.produce_guards throws an early constraint violation error.
# There is nothing to solve for in this case.
# TODO(avik): Maybe record the constraint violation error instead and replay later?
assert constraint_violation_error
raise constraint_violation_error
dim_constraints.solve()
forced_specializations = dim_constraints.forced_specializations()
msg = dim_constraints.prettify_results(
original_signature,
dynamic_shapes, # type: ignore[arg-type]
constraint_violation_error,
forced_specializations, # type: ignore[arg-type]
)
if constraint_violation_error:
constraint_violation_error.args = (constraint_violation_error.args[0] + msg,)
elif forced_specializations:
constraint_violation_error = ConstraintViolationError(msg)
if constraint_violation_error:
raise constraint_violation_error
def is_int(x: object) -> bool:
return isinstance(x, int) or (isinstance(x, torch.SymInt) and x.node.expr.is_number)
def _constrain_user_specified_dimhint_range(
symint: torch.SymInt,
hint: int,
dim: _DimHint,
range_constraints,
shape_env,
keypath: KeyPath,
i: Optional[int] = None,
) -> Optional[str]:
trace_vr = (
range_constraints[symint.node.expr]
if not is_int(symint)
else ValueRanges(int(symint), int(symint))
)
# warn on 0/1 specialization for Dim.AUTO; not an actual error
if dim.type == _DimHintType.AUTO and trace_vr.is_singleton() and hint in (0, 1):
pathstr = f"inputs{pytree.keystr(keypath)}"
if i is not None:
pathstr += f".shape[{i}]"
msg = (
f"dimension {pathstr} 0/1 specialized; Dim.AUTO was specified along "
+ f"with a sample input with hint = {hint}."
)
log.warning(msg)
try:
user_vr = ValueRanges(
lower=0 if dim.min is None else dim.min,
upper=int_oo if dim.max is None else dim.max,
)
if is_int(symint):
out_vr = trace_vr & user_vr
else:
range_constraints[symint.node.expr] &= user_vr
shape_env.var_to_range[symint.node._expr] &= user_vr
out_vr = range_constraints[symint.node.expr]
# check for Dim.DYNAMIC specializations; special case error message on 0/1
if dim.type == _DimHintType.DYNAMIC and out_vr.is_singleton():
path = f"inputs{pytree.keystr(keypath)}"
if i is not None:
path += f".shape[{i}]"
if (
trace_vr.is_singleton()
and hint in (0, 1)
and not torch.fx.experimental._config.backed_size_oblivious
):
msg = (
f"- Received user-specified dim hint Dim.DYNAMIC(min={dim.min}, max={dim.max}), "
f"but export 0/1 specialized due to hint of {hint} for dimension {path}."
)
else:
msg = (
f"- Received user-specified dim hint Dim.DYNAMIC(min={dim.min}, max={dim.max}), "
f"but tracing inferred a static shape of {out_vr.lower} for dimension {path}."
)
return msg
except torch.utils._sympy.value_ranges.ValueRangeError:
path = f"inputs{pytree.keystr(keypath)}"
if i is not None:
path += f".shape[{i}]"
msg = (
f"- Received user-specified min/max range of [{dim.min}, {dim.max}], "
f"conflicting with the inferred min/max range of [{trace_vr.lower}, {trace_vr.upper}], "
f"for {path}."
)
return msg
return None
def make_constraints(
fake_mode: FakeTensorMode,
gm: torch.fx.GraphModule,
combined_args: dict[str, Any],
dynamic_shapes: Union[dict[str, Any], tuple[Any], list[Any], None],
num_lifted_inputs: int,
):
"""
Given a fake mode's shape env and user-specified dynamic shapes,
return the resulting range constraints and equality constraints.
Additional args:
num_lifted_inputs: the number of non-user-input placeholder nodes in the graph
(used only to enumerate the user-input nodes)
"""
shape_env = fake_mode.shape_env
assert shape_env is not None
inline_constraints = gm.meta.get("inline_constraints", [])
range_constraints = defaultdict(lambda: ValueRanges(0, int_oo)) | inline_constraints
if not dynamic_shapes:
return dict(range_constraints)
# clean up dynamic markers from tensors
flat_paths, flat_args = zip(*pytree.tree_flatten_with_path(combined_args)[0])
for arg in flat_args:
if isinstance(arg, torch.Tensor):
_clean_dynamic_markers(arg)
# get individual dynamic shapes spec for each input
if not isinstance(dynamic_shapes, dict):
assert isinstance(dynamic_shapes, (tuple, list))
combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc]
flat_dynamic_shapes = _flatten_dynamic_shapes(combined_args, dynamic_shapes)
# check number of shapes vs. number of inputs
num_placeholders = [node.op == "placeholder" for node in gm.graph.nodes].count(True)
assert len(flat_dynamic_shapes) == num_placeholders - num_lifted_inputs
free_symbols = set()
range_violations = []
for input_index, node in enumerate(gm.graph.nodes):
meta_val = node.meta.get("val")
if (
input_index < num_lifted_inputs
or node.op != "placeholder"
or meta_val is None
):
continue
elif _is_constant_argument(meta_val) or isinstance(meta_val, CustomObjArgument):
continue
shape_spec = flat_dynamic_shapes[input_index - num_lifted_inputs]
keypath = flat_paths[input_index - num_lifted_inputs]
flat_arg = flat_args[input_index - num_lifted_inputs]
if isinstance(meta_val, int) or (
isinstance(meta_val, torch.SymInt) and meta_val.node.expr.is_number
):
pass
elif isinstance(meta_val, torch.SymInt):
if shape_spec is not None and isinstance(shape_spec, _DimHint):
hint = flat_arg
range_constraints[meta_val.node.expr] &= shape_env.bound_sympy(
meta_val.node._expr
)
violation = _constrain_user_specified_dimhint_range(
meta_val,
hint,
shape_spec,
range_constraints,
shape_env,
keypath,
None,
)
if violation:
range_violations.append(violation)
else:
raise RuntimeError("nyi")
free_symbols.update(meta_val.node.expr.free_symbols)
elif isinstance(meta_val, torch.Tensor):
for i, d in enumerate(node.meta["val"].shape):
dim = None
if isinstance(shape_spec, (list, tuple)):
dim = shape_spec[i]
elif isinstance(shape_spec, dict):
dim = shape_spec.get(i)
if not is_int(d):
# Compute the range constraint for the symbolic expression corresponding
# to this shape dimension and store it.
if dim is None or isinstance(dim, _DimHint):
range_constraints[d.node.expr] &= shape_env.bound_sympy(
d.node.expr
)
else:
range_constraints[d.node.expr] &= ValueRanges(
lower=dim.min, upper=dim.max
)
free_symbols.update(d.node.expr.free_symbols)
# check user-specified min/max range for DimHints;
# we might want to do this even if model tracing inferred a static dimension.
if isinstance(dim, _DimHint):
hint = flat_arg.shape[i]
violation = _constrain_user_specified_dimhint_range(
d, hint, dim, range_constraints, shape_env, keypath, i
)
if violation:
range_violations.append(violation)
else:
raise RuntimeError(f"Unfamiliar meta val: {meta_val}")
if range_violations:
prefix = "Found the following conflicts between user-specified ranges and inferred ranges from model tracing:\n"
raise ValueError(prefix + "\n".join(range_violations))
for symbol in free_symbols:
if symbol not in range_constraints:
# Placeholders can have symbolic shapes that are derived expressions.
# The above code will record direct range constraints for them
# so that we can do runtime assertions. In addition, for serde checks
# we want to record range constraints for their root symbols.
range_constraints[symbol] = shape_env.var_to_range[symbol]
return dict(range_constraints)
def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
"""Search the module hierarchy, gathering up all tensor and ScriptObject constants.
Returns a dictionary mapping hash(value) to the name of the constant. We
have to abuse `hash` here unfortunately, see: [ScriptObject hash].
"""
constants = ConstantAttrMap()
buffers_parameters = set(m.buffers())
buffers_parameters.update(m.parameters())
def inner(m: torch.nn.Module, prefix_atoms: list[str], constants):
for k, v in m.__dict__.items():
if isinstance(
v,
(
torch.Tensor,
torch.ScriptObject,
FakeScriptObject,
),
):
if v in buffers_parameters:
# filter out buffers and parameters, leaving only constants
continue
fqn = ".".join(prefix_atoms + [k])
constants.add(v, fqn)
for k, v in m.named_children():
inner(v, prefix_atoms + [k], constants)
inner(m, [], constants)
return constants
def _get_graph_inputs_of_type_nn_module(
args: Optional[tuple[tuple[Any], dict[Any, Any]]],
) -> set[type[torch.nn.Module]]:
if args is None:
return set()
module_types = set()
for arg in pytree.tree_leaves(args):
if isinstance(arg, torch.nn.Module):
module_types.add(type(arg))
return module_types
def _enter_enable_graph_inputs_of_type_nn_module(
module_types: set[type[torch.nn.Module]],
) -> None:
for t in module_types:
torch._export.utils.register_module_as_pytree_input_node(t)
def _exit_enable_graph_inputs_of_type_nn_module(
module_types: set[type[torch.nn.Module]],
) -> None:
for t in module_types:
torch._export.utils.deregister_module_as_pytree_input_node(t)
@contextlib.contextmanager
def _enable_graph_inputs_of_type_nn_module(
args: Optional[tuple[tuple[Any], dict[Any, Any]]],
):
if args is None:
yield
return
module_types = _get_graph_inputs_of_type_nn_module(args)
_enter_enable_graph_inputs_of_type_nn_module(module_types)
try:
yield
finally:
_exit_enable_graph_inputs_of_type_nn_module(module_types)
@contextlib.contextmanager
def _fakify_module_inputs(
args: tuple[Any],
kwargs: dict[Any, Any],
fake_mode: torch._subclasses.fake_tensor.FakeTensorMode,
):
# This context manager is used to fakify module inputs.
# Inputs:
# args, kwargs: the args and kwargs containing module inputs that haven't been fakified.
# fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
ctxs = [_enable_graph_inputs_of_type_nn_module((args, kwargs))]
for arg in pytree.tree_leaves((args, kwargs)):
if isinstance(arg, torch.nn.Module):
fake_params_buffers = _fakify_params_buffers(fake_mode, arg)
ctxs.append(
torch.nn.utils.stateless._reparametrize_module(
arg,
fake_params_buffers,
tie_weights=True,
strict=True,
stack_weights=True,
)
)
with contextlib.ExitStack() as stack:
for ctx in ctxs:
stack.enter_context(ctx)
yield
@contextlib.contextmanager
def _fakify_script_objects(
mod: torch.nn.Module,
args: Sequence[Any],
kwargs: dict[Any, Any],
fake_mode: Optional[torch._subclasses.fake_tensor.FakeTensorMode],
):
# This context manager is used to fakify script objects into FakeScriptObject.
# Inputs:
# mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified.
# args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified.
# fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
#
# Returns:
# mod: the patched module, its (and its recursive submodules) script object attrs have been fakified.
# fake_args, fake_kwargs: new fakified args and kwargs.
# Script object inputs have been fakified. Don't touch the tensors.
# fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object.
# fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching.
constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod)
assert not any(
isinstance(obj, FakeScriptObject) for obj in constant_attrs.values()
), "Mod shouldn't contain any FakeScriptObject."
assert not pytree.tree_any(
lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs)
), "args and kwargs shouldn't contain any FakeScriptObject."
patched_attr = {}
fake_constant_attrs = ConstantAttrMap()
fake_to_real = {}
def _maybe_fakify_obj(obj):
fake_obj = torch._library.fake_class_registry.maybe_to_fake_obj(fake_mode, obj)
fake_to_real[fake_obj] = obj
return fake_obj
def _leaf_mod_and_attr(
mod: torch.nn.Module, attr_fqn: str
) -> tuple[torch.nn.Module, str]:
*prefix_attr, last_attr = attr_fqn.split(".")
cur_mod = mod
for attr in prefix_attr:
cur_mod = getattr(cur_mod, attr)
return cur_mod, last_attr
try:
for obj, fqns in constant_attrs.items():
if torch._library.fake_class_registry._is_script_object(
obj
) or is_opaque_type(obj):
fake_script_obj = _maybe_fakify_obj(obj)
for fqn in fqns:
cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
assert obj is getattr(cur_mod, attr)
setattr(cur_mod, attr, fake_script_obj)
fake_constant_attrs.add(fake_script_obj, fqn)
patched_attr[fqn] = obj
else:
for fqn in fqns:
fake_constant_attrs.add(obj, fqn)
fake_args, fake_kwargs = pytree.tree_map_only(
torch.ScriptObject, _maybe_fakify_obj, (args, kwargs)
)
yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real)
finally:
for fqn, orig_obj in patched_attr.items():
cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
setattr(cur_mod, attr, orig_obj)
| _KeyPathTrie |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_json_config_base.py | {
"start": 373,
"end": 2254
} | class ____(BaseGroupTypeTest):
def setUp(self) -> None:
super().setUp()
self.correct_config = {
"username": "user123",
"email": "user@example.com",
"fullName": "John Doe",
"age": 30,
"location": "Cityville",
"interests": ["Travel", "Technology"],
}
self.example_schema = {
"$id": "https://example.com/user-profile.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "A representation of a user profile",
"type": "object",
"required": ["username", "email"],
"properties": {
"username": {"type": "string"},
"email": {"type": "string", "format": "email"},
"fullName": {"type": "string"},
"age": {"type": "integer", "minimum": 0},
"location": {"type": "string"},
"interests": {"type": "array", "items": {"type": "string"}},
},
}
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 1
slug = "test"
description = "Test"
category = GroupCategory.ERROR.value
category_v2 = GroupCategory.ERROR.value
detector_settings = DetectorSettings(config_schema=self.example_schema)
@dataclass(frozen=True)
class ExampleGroupType(GroupType):
type_id = 2
slug = "example"
description = "Example"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
detector_settings = DetectorSettings(
config_schema={"type": "object", "additionalProperties": False},
)
# TODO - Move this to the detector model test
| JSONConfigBaseTest |
python | realpython__materials | python-oop/dogbreeds.py | {
"start": 385,
"end": 418
} | class ____(Dog):
pass
| Dachshund |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 2415,
"end": 2718
} | class ____():
def __init__(self):
self.addition = 1 + 2
#? int()
A().addition
# should also work before `=`
#? 8 int()
A().addition = None
#? 8 int()
A(1).addition = None
#? 1 A
A(1).addition = None
a = A()
#? 8 int()
a.addition = None
# -----------------
# inheritance
# -----------------
| A |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 109339,
"end": 111330
} | class ____(fixtures.MappedTest):
"""'viewonly' mappings that contain the same 'remote' column twice"""
@classmethod
def define_tables(cls, metadata):
Table(
"foos",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid1", Integer, ForeignKey("bars.id")),
Column("bid2", Integer, ForeignKey("bars.id")),
)
Table(
"bars",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
def test_relationship_on_or(self):
bars, foos = self.tables.bars, self.tables.foos
class Foo(ComparableEntity):
pass
class Bar(ComparableEntity):
pass
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
primaryjoin=sa.or_(
bars.c.id == foos.c.bid1, bars.c.id == foos.c.bid2
),
uselist=True,
viewonly=True,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
sess = fixture_session()
b1 = Bar(id=1, data="b1")
b2 = Bar(id=2, data="b2")
b3 = Bar(id=3, data="b3")
f1 = Foo(bid1=1, bid2=2)
f2 = Foo(bid1=3, bid2=None)
sess.add_all((b1, b2, b3))
sess.flush()
sess.add_all((f1, f2))
sess.flush()
sess.expunge_all()
eq_(
sess.query(Foo).filter_by(id=f1.id).one(),
Foo(bars=[Bar(data="b1"), Bar(data="b2")]),
)
eq_(
sess.query(Foo).filter_by(id=f2.id).one(),
Foo(bars=[Bar(data="b3")]),
)
| ViewOnlyRepeatedRemoteColumn |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/data_service_ops.py | {
"start": 10478,
"end": 17953
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` that reads elements from the tf.data service."""
def __init__(self,
dataset_id,
processing_mode,
address,
element_spec,
protocol,
data_transfer_protocol,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None,
cross_trainer_cache=None,
target_workers="AUTO"):
"""Constructs a _DataServiceDatasetV2.
Args:
dataset_id: The dataset id for the dataset to read from.
processing_mode: A `tf.data.experimental.service.ShardingPolicy`
specifying how to shard the dataset among tf.data workers. See
`tf.data.experimental.service.ShardingPolicy` for details. For backwards
compatibility, `processing_mode` may also be set to the strings
`"parallel_epochs"` or `"distributed_epoch"`, which are respectively
equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
address: The tf.data service address, e.g. "localhost:5000".
element_spec: The dataset element spec for the dataset to read from.
protocol: The protocol to use for communicating with the tf.data service,
e.g. "grpc".
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. If not provided, a protocol is determined
at runtime.
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string or Tensor. This argument makes it possible for multiple
datasets to share the same job. The default behavior is that the dataset
creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from
`0` to `num_consumers`. Must be specified alongside `num_consumers`.
When specified, consumers will read from the job in a strict round-robin
order, instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead
of the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out
of sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the
amount of memory used, since `distribute` won't use more than
`element_size` * `max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query
the dispatcher for task changes.
cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
provided, dataset iteration will be shared across concurrently running
trainers. See
https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
for details.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`,
tf.data runtime decides which workers to read from. If `"ANY"`, reads
from any tf.data service workers. If `"LOCAL"`, only reads from local
in-process tf.data service workers. `"AUTO"` works well for most cases,
while users can specify other targets. For example, `"LOCAL"` helps
avoid RPCs and data copy if every TF worker colocates with a tf.data
service worker. Consumers of a shared job must use the same
`target_workers`. Defaults to `"AUTO"`.
"""
if consumer_index is None != num_consumers is None:
raise ValueError(
"Must either set both `consumer_index` and `num_consumers`, "
"or neither. ",
f"consumer_index={consumer_index}, num_consumers={num_consumers}")
if num_consumers is not None and job_name is None:
raise ValueError("`job_name` must be set when setting `num_consumers`. "
f"num_consumers was set to {num_consumers}.")
processing_mode_def = data_service_pb2.ProcessingModeDef(
sharding_policy=_get_validated_sharding_policy(
processing_mode)._to_proto())
if job_name is None:
job_name = ""
if max_outstanding_requests is None:
max_outstanding_requests = dataset_ops.AUTOTUNE
if task_refresh_interval_hint_ms is None:
task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE
self._dataset_id = _to_tensor(dataset_id)
self._processing_mode = ops.convert_to_tensor(
processing_mode_def.SerializeToString(),
dtype=dtypes.string,
name="processing_mode")
self._address = ops.convert_to_tensor(
address, dtype=dtypes.string, name="address")
self._protocol = ops.convert_to_tensor(
protocol, dtype=dtypes.string, name="protocol")
self._job_name = ops.convert_to_tensor(
job_name, dtype=dtypes.string, name="job_name")
self._consumer_index = ops.convert_to_tensor(
-1 if consumer_index is None else consumer_index,
dtype=dtypes.int64,
name="consumer_index")
self._num_consumers = ops.convert_to_tensor(
-1 if num_consumers is None else num_consumers,
dtype=dtypes.int64,
name="num_consumers")
self._max_outstanding_requests = ops.convert_to_tensor(
max_outstanding_requests,
dtype=dtypes.int64,
name="max_outstanding_requests")
self._element_spec = element_spec
uncompress_func = _get_uncompress_func(HashableElementSpec(element_spec))
cross_trainer_cache_options = (
cross_trainer_cache._to_proto().SerializeToString()
if cross_trainer_cache else None)
compat_kwargs = {}
if data_transfer_protocol is not None:
compat_kwargs["data_transfer_protocol"] = data_transfer_protocol
# If `uncompress` is `True`, the dataset will query the servers to find
# out the actual compression used. It is always set to `True` the first
# time the graph is built, and set to false when serializing, so we will
# uncompress at most once.
uncompress = True
variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v4(
dataset_id=self._dataset_id,
processing_mode=self._processing_mode,
address=self._address,
protocol=self._protocol,
job_name=self._job_name,
consumer_index=self._consumer_index,
num_consumers=self._num_consumers,
max_outstanding_requests=self._max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
iteration_counter=(
gen_experimental_dataset_ops.dummy_iteration_counter()),
target_workers=target_workers,
uncompress=uncompress,
uncompress_fn=uncompress_func.function,
cross_trainer_cache_options=cross_trainer_cache_options,
**compat_kwargs,
**self._flat_structure)
super(_DataServiceDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
| _DataServiceDatasetV2 |
python | donnemartin__interactive-coding-challenges | arrays_strings/priority_queue/priority_queue.py | {
"start": 13,
"end": 206
} | class ____(object):
def __init__(self, obj, key):
self.obj = obj
self.key = key
def __repr__(self):
return str(self.obj) + ': ' + str(self.key)
| PriorityQueueNode |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 156757,
"end": 157312
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, start_date: str):
"""Airbyte Source for Rki Covid.
Documentation can be found at https://docs.airbyte.com/integrations/sources/rki-covid
Args:
name (str): The name of the destination.
start_date (str): UTC date in the format 2017-01-25. Any data before this date will not be replicated.
"""
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Rki Covid", name)
| RkiCovidSource |
python | pikepdf__pikepdf | tests/test_pdf.py | {
"start": 2223,
"end": 3190
} | class ____:
def test_open_pdf_wrong_password(self, resources):
# The correct passwords are "owner" and "user"
with pytest.raises(PasswordError):
Pdf.open(resources / 'graph-encrypted.pdf', password='wrong')
def test_open_pdf_password_encoding(self, resources):
with pytest.raises(PasswordError):
Pdf.open(resources / 'graph-encrypted.pdf', password=b'\x01\xfe')
def test_open_pdf_no_password_but_needed(self, resources):
with pytest.raises(PasswordError):
Pdf.open(resources / 'graph-encrypted.pdf')
def test_open_pdf_user_password(self, resources):
with Pdf.open(resources / 'graph-encrypted.pdf', password='user'):
pass
def test_unneeded_password_ignored(self, resources):
with pytest.warns(UserWarning, match="no password was needed"):
with Pdf.open(resources / 'graph.pdf', password='open sesame'):
pass
| TestPasswords |
python | django__django | django/db/models/functions/window.py | {
"start": 618,
"end": 1367
} | class ____(Func):
window_compatible = True
def __init__(self, expression, offset=1, default=None, **extra):
if expression is None:
raise ValueError(
"%s requires a non-null source expression." % self.__class__.__name__
)
if offset is None or offset <= 0:
raise ValueError(
"%s requires a positive integer for the offset."
% self.__class__.__name__
)
args = (expression, offset)
if default is not None:
args += (default,)
super().__init__(*args, **extra)
def _resolve_output_field(self):
sources = self.get_source_expressions()
return sources[0].output_field
| LagLeadFunction |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 233976,
"end": 236231
} | class ____(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example:
.. doctest::
>>> real = Word(nums) + '.' + Word(nums)
>>> print(real.parse_string('3.1416'))
['3', '.', '1416']
>>> # will also erroneously match the following
>>> print(real.parse_string('3. 1416'))
['3', '.', '1416']
>>> real = Combine(Word(nums) + '.' + Word(nums))
>>> print(real.parse_string('3.1416'))
['3.1416']
>>> # no match when there are internal spaces
>>> print(real.parse_string('3. 1416'))
Traceback (most recent call last):
ParseException: Expected W:(0123...)
"""
def __init__(
self,
expr: ParserElement,
join_string: str = "",
adjacent: bool = True,
*,
joinString: typing.Optional[str] = None,
) -> None:
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leave_whitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other) -> ParserElement:
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
| Combine |
python | pypa__setuptools | setuptools/config/pyprojecttoml.py | {
"start": 15982,
"end": 17965
} | class ____(_expand.EnsurePackagesDiscovered):
def __init__(
self, distribution: Distribution, project_cfg: dict, setuptools_cfg: dict
) -> None:
super().__init__(distribution)
self._project_cfg = project_cfg
self._setuptools_cfg = setuptools_cfg
def __enter__(self) -> Self:
"""When entering the context, the values of ``packages``, ``py_modules`` and
``package_dir`` that are missing in ``dist`` are copied from ``setuptools_cfg``.
"""
dist, cfg = self._dist, self._setuptools_cfg
package_dir: dict[str, str] = cfg.setdefault("package-dir", {})
package_dir.update(dist.package_dir or {})
dist.package_dir = package_dir # needs to be the same object
dist.set_defaults._ignore_ext_modules() # pyproject.toml-specific behaviour
# Set `name`, `py_modules` and `packages` in dist to short-circuit
# auto-discovery, but avoid overwriting empty lists purposefully set by users.
if dist.metadata.name is None:
dist.metadata.name = self._project_cfg.get("name")
if dist.py_modules is None:
dist.py_modules = cfg.get("py-modules")
if dist.packages is None:
dist.packages = cfg.get("packages")
return super().__enter__()
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
"""When exiting the context, if values of ``packages``, ``py_modules`` and
``package_dir`` are missing in ``setuptools_cfg``, copy from ``dist``.
"""
# If anything was discovered set them back, so they count in the final config.
self._setuptools_cfg.setdefault("packages", self._dist.packages)
self._setuptools_cfg.setdefault("py-modules", self._dist.py_modules)
return super().__exit__(exc_type, exc_value, traceback)
| _EnsurePackagesDiscovered |
python | great-expectations__great_expectations | tests/core/test__docs_decorators.py | {
"start": 4036,
"end": 6427
} | class ____:
@pytest.mark.unit
def test_deprecated_decorator_full_docstring(self):
assert _func_full_docstring_deprecated.__doc__ == (
"My docstring.\n"
"\n"
".. deprecated:: 1.2.3\n"
" This is deprecated!!\n"
"\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg: describe some_arg\n"
" other_arg: describe other_arg\n"
)
assert _func_full_docstring_deprecated.__name__ == "_func_full_docstring_deprecated"
@pytest.mark.unit
def test_deprecated_decorator_full_docstring_no_message(self):
assert _func_full_docstring_deprecated_no_message.__doc__ == (
"My docstring.\n"
"\n"
".. deprecated:: 1.2.3\n"
" \n"
"\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg: describe some_arg\n"
" other_arg: describe other_arg\n"
)
@pytest.mark.unit
def test_deprecated_decorator_only_summary(self):
assert _func_only_summary_deprecated.__doc__ == (
"My docstring.\n\n.. deprecated:: 1.2.3\n This is deprecated!!\n"
)
@pytest.mark.unit
def test_deprecated_decorator_no_docstring(self):
assert _func_no_docstring_deprecated.__doc__ == (
"\n\n.. deprecated:: 1.2.3\n This is deprecated!!\n"
)
# @new_method_or_class
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
def _func_full_docstring_new_method(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
@new_method_or_class(version="1.2.3")
def _func_full_docstring_new_method_no_message(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
def _func_only_summary_new_method(some_arg, other_arg):
"""My docstring."""
pass
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
def _func_no_docstring_new_method(some_arg, other_arg):
pass
| TestDeprecatedMethod |
python | pypa__twine | twine/exceptions.py | {
"start": 4830,
"end": 4934
} | class ____(TwineException):
"""Raised when configuration is invalid."""
pass
| InvalidConfiguration |
python | walkccc__LeetCode | solutions/3026. Maximum Good Subarray Sum/3026.py | {
"start": 0,
"end": 515
} | class ____:
def maximumSubarraySum(self, nums: list[int], k: int) -> int:
ans = -math.inf
prefix = 0
numToMinPrefix = {}
for num in nums:
if num not in numToMinPrefix or numToMinPrefix[num] > prefix:
numToMinPrefix[num] = prefix
prefix += num
if num + k in numToMinPrefix:
ans = max(ans, prefix - numToMinPrefix[num + k])
if num - k in numToMinPrefix:
ans = max(ans, prefix - numToMinPrefix[num - k])
return 0 if ans == -math.inf else ans
| Solution |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 31941,
"end": 32388
} | class ____(Visitor):
"""Replace types that satisfy a matching function."""
def __init__(
self, matcher: Callable[[pytd.Node], bool], replacement: pytd.Node
):
super().__init__()
self._matcher = matcher
self._replacement = replacement
def VisitNamedType(self, node):
return self._replacement if self._matcher(node) else node
def VisitClassType(self, node):
return self.VisitNamedType(node)
| ReplaceTypesByMatcher |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/RemoteGraphicsView.py | {
"start": 2123,
"end": 3571
} | class ____(QtGui.QWheelEvent):
@staticmethod
def get_state(obj, picklable=False):
# {PyQt6, PySide6} have position()
# {PyQt5, PySide2} 5.15 have position()
# {PyQt5, PySide2} 5.15 have posF() (contrary to C++ docs)
# {PyQt5, PySide2} 5.12 have posF()
lpos = obj.position() if hasattr(obj, 'position') else obj.posF()
gpos = obj.globalPosition() if hasattr(obj, 'globalPosition') else obj.globalPosF()
pixdel, angdel, btns = obj.pixelDelta(), obj.angleDelta(), obj.buttons()
mods, phase, inverted = obj.modifiers(), obj.phase(), obj.inverted()
if picklable:
btns, mods, phase = serialize_mouse_enum(btns, mods, phase)
return lpos, gpos, pixdel, angdel, btns, mods, phase, inverted
def __init__(self, rhs):
items = list(self.get_state(rhs))
items[1] = items[0] # gpos = lpos
super().__init__(*items)
def __getstate__(self):
return self.get_state(self, picklable=True)
def __setstate__(self, state):
pos, gpos, pixdel, angdel, btns, mods, phase, inverted = state
if not isinstance(btns, enum.Enum):
btns = QtCore.Qt.MouseButtons(btns)
if not isinstance(mods, enum.Enum):
mods = QtCore.Qt.KeyboardModifiers(mods)
phase = QtCore.Qt.ScrollPhase(phase)
super().__init__(pos, gpos, pixdel, angdel, btns, mods, phase, inverted)
| WheelEvent |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/angle_helper.py | {
"start": 9446,
"end": 13075
} | class ____(ExtremeFinderSimple):
# docstring inherited
def __init__(self, nx, ny,
lon_cycle=360., lat_cycle=None,
lon_minmax=None, lat_minmax=(-90, 90)):
"""
This subclass handles the case where one or both coordinates should be
taken modulo 360, or be restricted to not exceed a specific range.
Parameters
----------
nx, ny : int
The number of samples in each direction.
lon_cycle, lat_cycle : 360 or None
If not None, values in the corresponding direction are taken modulo
*lon_cycle* or *lat_cycle*; in theory this can be any number but
the implementation actually assumes that it is 360 (if not None);
other values give nonsensical results.
This is done by "unwrapping" the transformed grid coordinates so
that jumps are less than a half-cycle; then normalizing the span to
no more than a full cycle.
For example, if values are in the union of the [0, 2] and
[358, 360] intervals (typically, angles measured modulo 360), the
values in the second interval are normalized to [-2, 0] instead so
that the values now cover [-2, 2]. If values are in a range of
[5, 1000], this gets normalized to [5, 365].
lon_minmax, lat_minmax : (float, float) or None
If not None, the computed bounding box is clipped to the given
range in the corresponding direction.
"""
self.nx, self.ny = nx, ny
self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle
self.lon_minmax = lon_minmax
self.lat_minmax = lat_minmax
def _find_transformed_bbox(self, trans, bbox):
# docstring inherited
grid = np.reshape(np.meshgrid(np.linspace(bbox.x0, bbox.x1, self.nx),
np.linspace(bbox.y0, bbox.y1, self.ny)),
(2, -1)).T
lon, lat = trans.transform(grid).T
# iron out jumps, but algorithm should be improved.
# This is just naive way of doing and my fail for some cases.
# Consider replacing this with numpy.unwrap
# We are ignoring invalid warnings. They are triggered when
# comparing arrays with NaNs using > We are already handling
# that correctly using np.nanmin and np.nanmax
with np.errstate(invalid='ignore'):
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
lon -= 360. * ((lon - lon0) > 180.)
if self.lat_cycle is not None:
lat0 = np.nanmin(lat)
lat -= 360. * ((lat - lat0) > 180.)
tbbox = Bbox.null()
tbbox.update_from_data_xy(np.column_stack([lon, lat]))
tbbox = tbbox.expanded(1 + 2 / self.nx, 1 + 2 / self.ny)
lon_min, lat_min, lon_max, lat_max = tbbox.extents
# check cycle
if self.lon_cycle:
lon_max = min(lon_max, lon_min + self.lon_cycle)
if self.lat_cycle:
lat_max = min(lat_max, lat_min + self.lat_cycle)
if self.lon_minmax is not None:
min0 = self.lon_minmax[0]
lon_min = max(min0, lon_min)
max0 = self.lon_minmax[1]
lon_max = min(max0, lon_max)
if self.lat_minmax is not None:
min0 = self.lat_minmax[0]
lat_min = max(min0, lat_min)
max0 = self.lat_minmax[1]
lat_max = min(max0, lat_max)
return Bbox.from_extents(lon_min, lat_min, lon_max, lat_max)
| ExtremeFinderCycle |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 25290,
"end": 36677
} | class ____(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
field,
ID=None,
null=None,
ref=None,
type="legal",
id=None,
config=None,
pos=None,
**extras,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs("VALUES", extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos
)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
"0", self._config, self._pos
)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""Defines the applicability of the domain defined by this VALUES_ element [*required*].
Must be one of the following strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("legal", "actual"):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("VALUES", self.ref), self._config, self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
self._min = self._parse_minmax(min)
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == "yes":
self._min_inclusive = True
elif inclusive == "no":
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
self._max = self._parse_minmax(max)
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == "yes":
self._max_inclusive = True
elif inclusive == "no":
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != "VALUES":
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == "MIN":
if "value" not in data:
vo_raise(E09, "MIN", config, pos)
self.min = data["value"]
self.min_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MIN", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "MAX":
if "value" not in data:
vo_raise(E09, "MAX", config, pos)
self.max = data["value"]
self.max_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MAX", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "OPTION":
if "value" not in data:
vo_raise(E09, "OPTION", config, pos)
xmlutil.check_token(data.get("name"), "name", config, pos)
self.options.append((data.get("name"), data.get("value")))
warn_unknown_attrs(
"OPTION", data.keys(), config, pos, ["value", "name"]
)
elif tag == "VALUES":
break
return self
def _parse_minmax(self, val):
retval = val
if hasattr(self._field, "converter") and val is not None:
parsed_val = None
if self._field.arraysize is None:
# Use the default parser.
parsed_val = self._field.converter.parse(val, config=self._config)[0]
else:
# Set config to ignore verification (prevent warnings and exceptions) on parse.
ignore_warning_config = self._config.copy()
ignore_warning_config["verify"] = "ignore"
# max should be a scalar except for certain xtypes so try scalar parsing first.
try:
parsed_val = self._field.converter.parse_scalar(
val, config=ignore_warning_config
)[0]
except ValueError as ex:
pass # Ignore ValueError returned for array vals by some parsers (like int)
finally:
if parsed_val is None:
# Try the array parsing to support certain xtypes and historical array values.
parsed_val = self._field.converter.parse(
val, config=ignore_warning_config
)[0]
retval = parsed_val
return retval
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?.
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (
self.ref is None
and self.null is None
and self.ID is None
and self.max is None
and self.min is None
and self.options == []
)
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return "yes"
return "no"
if self.is_defaults():
return
if self.ref is not None:
w.element("VALUES", attrib=w.object_attrs(self, ["ref"]))
else:
with w.tag("VALUES", attrib=w.object_attrs(self, ["ID", "null", "ref"])):
if self.min is not None:
w.element(
"MIN",
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive),
)
if self.max is not None:
w.element(
"MAX",
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive),
)
for name, value in self.options:
w.element("OPTION", name=name, value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ["ID", "null"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta["min"] = {"value": self.min, "inclusive": self.min_inclusive}
if self.max is not None:
meta["max"] = {"value": self.max, "inclusive": self.max_inclusive}
if len(self.options):
meta["options"] = dict(self.options)
column.meta["values"] = meta
def from_table_column(self, column):
if column.info.meta is None or "values" not in column.info.meta:
return
meta = column.info.meta["values"]
for key in ["ID", "null"]:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if "min" in meta:
self.min = meta["min"]["value"]
self.min_inclusive = meta["min"]["inclusive"]
if "max" in meta:
self.max = meta["max"]["value"]
self.max_inclusive = meta["max"]["inclusive"]
if "options" in meta:
self._options = list(meta["options"].items())
| Values |
python | PyCQA__isort | isort/exceptions.py | {
"start": 1856,
"end": 2161
} | class ____(FileSkipped):
"""Raised when an entire file is skipped due to a isort skip file comment"""
def __init__(self, file_path: str, **kwargs: str):
super().__init__(
f"{file_path} contains a file skip comment and was skipped.", file_path=file_path
)
| FileSkipComment |
python | dask__distributed | distributed/pytest_resourceleaks.py | {
"start": 3937,
"end": 4407
} | class ____(ResourceChecker, name="demo"):
"""Checker that always leaks. Used to test the core LeakChecker functionality."""
i: int
def __init__(self):
self.i = 0
def measure(self) -> int:
self.i += 1
return self.i
def has_leak(self, before: int, after: int) -> bool:
return after > before
def format(self, before: int, after: int) -> str:
return f"counter increased from {before} to {after}"
| DemoChecker |
python | getsentry__sentry | src/sentry/core/endpoints/organization_member_invite/index.py | {
"start": 2097,
"end": 4021
} | class ____(StaffPermissionMixin, MemberInvitePermission):
pass
def _can_invite_member(
request: Request,
organization: Organization | RpcOrganization | RpcUserOrganizationContext,
) -> bool:
scopes = request.access.scopes
is_role_above_member = "member:admin" in scopes or "member:write" in scopes
if isinstance(organization, RpcUserOrganizationContext):
organization = organization.organization
if is_role_above_member:
return True
if "member:invite" not in scopes:
return False
return not organization.flags.disable_member_invite
def _create_invite_object(
request, organization, result, is_request: bool
) -> OrganizationMemberInvite:
with transaction.atomic(router.db_for_write(OrganizationMemberInvite)):
teams = []
for team in result.get("teams", []):
teams.append({"id": team.id, "slug": team.slug, "role": None})
om = OrganizationMember.objects.create(organization=organization)
omi = OrganizationMemberInvite(
organization=organization,
organization_member=om,
email=result["email"],
role=result["orgRole"],
inviter_id=request.user.id,
organization_member_team_data=teams,
invite_status=(
InviteStatus.REQUESTED_TO_BE_INVITED.value
if is_request
else InviteStatus.APPROVED.value
),
)
omi.save()
create_audit_entry(
request=request,
organization_id=organization.id,
target_object=omi.id,
data=omi.get_audit_log_data(),
event=(
(audit_log.get_event_id("INVITE_REQUEST_ADD"))
if is_request
else (audit_log.get_event_id("MEMBER_INVITE"))
),
)
return omi
@region_silo_endpoint
@extend_schema(tags=["Organizations"])
| MemberInviteAndStaffPermission |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/base.py | {
"start": 112849,
"end": 114976
} | class ____(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"select nextval('%s')"
% self.identifier_preparer.format_sequence(seq)
),
type_,
)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar(
"select %s" % column.server_default.arg, column.type
)
elif column.default is None or (
column.default.is_sequence and column.default.optional
):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0 : 29 + max(0, (29 - len(col)))]
col = col[0 : 29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
if column.table is not None:
effective_schema = self.connection.schema_for_object(
column.table
)
else:
effective_schema = None
if effective_schema is not None:
exc = 'select nextval(\'"%s"."%s"\')' % (
effective_schema,
seq_name,
)
else:
exc = "select nextval('\"%s\"')" % (seq_name,)
return self._execute_scalar(exc, column.type)
return super().get_insert_default(column)
| PGExecutionContext |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 25805,
"end": 30525
} | class ____(Distribution):
def __init__(self, path: SimplePath):
"""Construct a distribution.
:param path: SimplePath indicating the metadata directory.
"""
self._path = path
def read_text(self, filename):
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
@property
def _normalized_name(self):
"""
Performance optimization: where possible, resolve the
normalized name from the file system path.
"""
stem = os.path.basename(str(self._path))
return self._name_from_stem(stem) or super()._normalized_name
def _name_from_stem(self, stem):
name, ext = os.path.splitext(stem)
if ext not in ('.dist-info', '.egg-info'):
return
name, sep, rest = stem.partition('-')
return name
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name) -> _meta.PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
"""Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
For compatibility, returns ``SelectableGroups`` object unless
selection parameters are supplied. In the future, this function
will return ``EntryPoints`` instead of ``SelectableGroups``
even when no selection parameters are supplied.
For maximum future compatibility, pass selection parameters
or invoke ``.select`` with parameters on the result.
:return: EntryPoints or SelectableGroups for all installed packages.
"""
norm_name = operator.attrgetter('_normalized_name')
unique = functools.partial(unique_everseen, key=norm_name)
eps = itertools.chain.from_iterable(
dist.entry_points for dist in unique(distributions())
)
return SelectableGroups.load(eps).select(**params)
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
def packages_distributions() -> Mapping[str, List[str]]:
"""
Return a mapping of top-level packages to their
distributions.
>>> import collections.abc
>>> pkgs = packages_distributions()
>>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
True
"""
pkg_to_dist = collections.defaultdict(list)
for dist in distributions():
for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
pkg_to_dist[pkg].append(dist.metadata['Name'])
return dict(pkg_to_dist)
def _top_level_declared(dist):
return (dist.read_text('top_level.txt') or '').split()
def _top_level_inferred(dist):
return {
f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
for f in always_iterable(dist.files)
if f.suffix == ".py"
}
| PathDistribution |
python | getsentry__sentry | src/sentry/monitors/endpoints/base.py | {
"start": 4412,
"end": 7268
} | class ____(ProjectMonitorEndpoint):
"""
Base endpoint class for monitor environment which will look up the monitor environment and
convert it to a MonitorEnvironment object.
"""
permission_classes: tuple[type[BasePermission], ...] = (ProjectAlertRulePermission,)
def convert_args(
self,
request: Request,
monitor_id_or_slug: str,
environment: str,
*args,
**kwargs,
):
args, kwargs = super().convert_args(request, monitor_id_or_slug, *args, **kwargs)
monitor = kwargs["monitor"]
try:
environment_object = Environment.objects.get(
organization_id=monitor.organization_id, name=environment
)
kwargs["monitor_environment"] = MonitorEnvironment.objects.get(
monitor_id=monitor.id, environment_id=environment_object.id
)
except (Environment.DoesNotExist, MonitorEnvironment.DoesNotExist):
raise ResourceDoesNotExist
return args, kwargs
def get_monitor_by_org_id_or_slug(organization: Organization, monitor_id_or_slug: str) -> Monitor:
# Since we have changed our unique constraints to be on unique on (project, slug) we can
# end up with multiple monitors here. Since we have no idea which project the user wants,
# we just get the oldest monitor and use that.
# This is a temporary measure until we remove these org level endpoints
# Try lookup by slug
monitors = list(
Monitor.objects.filter(organization_id=organization.id, slug=monitor_id_or_slug)
)
if monitors:
return min(monitors, key=lambda m: m.id)
# Try lookup by GUID if the monitor_id_or_slug looks like a UUID
try:
UUID(monitor_id_or_slug, version=4)
monitors = list(
Monitor.objects.filter(organization_id=organization.id, guid=monitor_id_or_slug)
)
if monitors:
return min(monitors, key=lambda m: m.id)
except ValueError:
pass
raise Monitor.DoesNotExist
def try_checkin_lookup(monitor: Monitor, checkin_id: str):
# we support the magic keyword of "latest" to grab the most recent check-in
# which is unfinished (thus still mutable)
if checkin_id == "latest":
checkin = (
MonitorCheckIn.objects.filter(monitor=monitor, status=CheckInStatus.IN_PROGRESS)
.order_by("-date_added")
.first()
)
if not checkin:
raise ResourceDoesNotExist
return checkin
try:
UUID(checkin_id)
except ValueError:
raise ParameterValidationError("Invalid check-in UUID")
try:
return MonitorCheckIn.objects.get(monitor=monitor, guid=checkin_id)
except MonitorCheckIn.DoesNotExist:
raise ResourceDoesNotExist
| ProjectMonitorEnvironmentEndpoint |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 156887,
"end": 157211
} | class ____(TypedDict, total=False):
"""
:class:`altair.Locale` ``TypedDict`` wrapper.
Parameters
----------
number
Locale definition for formatting numbers.
time
Locale definition for formatting dates and times.
"""
number: NumberLocaleKwds
time: TimeLocaleKwds
| LocaleKwds |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 4826,
"end": 5142
} | class ____:
def __call__(self, c):
if isinstance(c.value, (list, tuple)):
if len(c.value) > 0:
if not isinstance(c.value[0], (list, tuple)):
c.value = [c.value]
else:
c.meta["scalar"] = True
c.value = [[c.value]]
| Ensure2DStage |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 12226,
"end": 16106
} | class ____(DepthAnythingPreTrainedModel):
_no_split_modules = ["DPTViTEmbeddings"]
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
self.neck = DepthAnythingNeck(config)
self.head = DepthAnythingDepthEstimationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-small-hf")
>>> model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-small-hf")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```"""
loss = None
if labels is not None:
raise NotImplementedError("Training is not implemented yet")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
hidden_states = outputs.feature_maps
_, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
patch_height = height // patch_size
patch_width = width // patch_size
hidden_states = self.neck(hidden_states, patch_height, patch_width)
predicted_depth = self.head(hidden_states, patch_height, patch_width)
if not return_dict:
if output_hidden_states:
output = (predicted_depth,) + outputs[1:]
else:
output = (predicted_depth,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return DepthEstimatorOutput(
loss=loss,
predicted_depth=predicted_depth,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = ["DepthAnythingForDepthEstimation", "DepthAnythingPreTrainedModel"]
| DepthAnythingForDepthEstimation |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 770012,
"end": 770784
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"guid",
"organization_invitation",
"saml_identity",
"scim_identity",
"user",
)
guid = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="guid")
organization_invitation = sgqlc.types.Field(
"OrganizationInvitation", graphql_name="organizationInvitation"
)
saml_identity = sgqlc.types.Field(
ExternalIdentitySamlAttributes, graphql_name="samlIdentity"
)
scim_identity = sgqlc.types.Field(
ExternalIdentityScimAttributes, graphql_name="scimIdentity"
)
user = sgqlc.types.Field("User", graphql_name="user")
| ExternalIdentity |
python | falconry__falcon | tests/test_httperror.py | {
"start": 3399,
"end": 4145
} | class ____:
def on_get(self, req, resp):
raise falcon.HTTPUnauthorized(
title='Authentication Required',
description='Missing or invalid authorization.',
challenges=['Basic realm="simple"'],
)
def on_post(self, req, resp):
raise falcon.HTTPUnauthorized(
title='Authentication Required',
description='Missing or invalid authorization.',
challenges=['Newauth realm="apps"', 'Basic realm="simple"'],
)
def on_put(self, req, resp):
raise falcon.HTTPUnauthorized(
title='Authentication Required',
description='Missing or invalid authorization.',
challenges=[],
)
| UnauthorizedResource |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/activation_functions.py | {
"start": 694,
"end": 840
} | class ____():
def __call__(self, x):
return np.where(x >= 0, x, 0)
def gradient(self, x):
return np.where(x >= 0, 1, 0)
| ReLU |
python | doocs__leetcode | solution/1600-1699/1641.Count Sorted Vowel Strings/Solution2.py | {
"start": 0,
"end": 237
} | class ____:
def countVowelStrings(self, n: int) -> int:
f = [1] * 5
for _ in range(n - 1):
s = 0
for j in range(5):
s += f[j]
f[j] = s
return sum(f)
| Solution |
python | django__django | tests/custom_pk/models.py | {
"start": 615,
"end": 817
} | class ____(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = "businesses"
| Business |
python | urllib3__urllib3 | test/with_dummyserver/test_connectionpool.py | {
"start": 7696,
"end": 44774
} | class ____(HypercornDummyServerTestCase):
def test_http2_test_error(self, http_version: str) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
if http_version == "h2":
with pytest.raises(
ValueError, match="HTTP/2 support currently only applies to HTTPS.*"
):
r = pool.request("GET", "/")
else:
r = pool.request("GET", "/")
assert r.status == 200
def test_get(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "GET"})
assert r.status == 200, r.data
def test_debug_log(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.DEBUG, logger="urllib3.connectionpool")
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.urlopen("GET", "/")
assert r.status == 200
logs = [record.getMessage() for record in caplog.records]
assert logs == [
f"Starting new HTTP connection (1): {self.host}:{self.port}",
f'http://{self.host}:{self.port} "GET / HTTP/1.1" 200 0',
]
def test_post_url(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "POST"})
assert r.status == 200, r.data
def test_urlopen_put(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.urlopen("PUT", "/specific_method?method=PUT")
assert r.status == 200, r.data
def test_wrong_specific_method(self) -> None:
# To make sure the dummy server is actually returning failed responses
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "POST"})
assert r.status == 400, r.data
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "GET"})
assert r.status == 400, r.data
def test_upload(self) -> None:
data = "I'm in ur multipart form-data, hazing a cheezburgr"
fields: dict[str, _TYPE_FIELD_VALUE_TUPLE] = {
"upload_param": "filefield",
"upload_filename": "lolcat.txt",
"filefield": ("lolcat.txt", data),
}
fields["upload_size"] = len(data) # type: ignore[assignment]
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_one_name_multiple_values(self) -> None:
fields = [("foo", "a"), ("foo", "b")]
with HTTPConnectionPool(self.host, self.port) as pool:
# urlencode
r = pool.request("GET", "/echo", fields=fields)
assert r.data == b"foo=a&foo=b"
# multipart
r = pool.request("POST", "/echo", fields=fields)
assert r.data.count(b'name="foo"') == 2
def test_request_method_body(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
body = b"hi"
r = pool.request("POST", "/echo", body=body)
assert r.data == body
fields = [("hi", "hello")]
with pytest.raises(TypeError):
pool.request("POST", "/echo", body=body, fields=fields)
def test_unicode_upload(self) -> None:
fieldname = "myfile"
filename = "\xe2\x99\xa5.txt"
data = "\xe2\x99\xa5".encode()
size = len(data)
fields: dict[str, _TYPE_FIELD_VALUE_TUPLE] = {
"upload_param": fieldname,
"upload_filename": filename,
fieldname: (filename, data),
}
fields["upload_size"] = size # type: ignore[assignment]
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_nagle(self) -> None:
"""Test that connections have TCP_NODELAY turned on"""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
conn = pool._get_conn()
try:
pool._make_request(conn, "GET", "/")
tcp_nodelay_setting = conn.sock.getsockopt( # type: ignore[attr-defined]
socket.IPPROTO_TCP, socket.TCP_NODELAY
)
assert tcp_nodelay_setting
finally:
conn.close()
@pytest.mark.parametrize(
"socket_options",
[
[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)],
((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),),
],
)
def test_socket_options(self, socket_options: tuple[int, int, int]) -> None:
"""Test that connections accept socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(
self.host,
self.port,
socket_options=socket_options,
) as pool:
# Get the socket of a new connection.
s = pool._new_conn()._new_conn() # type: ignore[attr-defined]
try:
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert using_keepalive
finally:
s.close()
@pytest.mark.parametrize("socket_options", [None, []])
def test_disable_default_socket_options(
self, socket_options: list[int] | None
) -> None:
"""Test that passing None or empty list disables all socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(
self.host, self.port, socket_options=socket_options
) as pool:
s = pool._new_conn()._new_conn() # type: ignore[attr-defined]
try:
using_nagle = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 0
assert using_nagle
finally:
s.close()
def test_defaults_are_applied(self) -> None:
"""Test that modifying the default socket options works."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
# Get the HTTPConnection instance
conn = pool._new_conn()
try:
# Update the default socket options
assert conn.socket_options is not None
conn.socket_options += [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
s = conn._new_conn() # type: ignore[attr-defined]
nagle_disabled = (
s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) > 0
)
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert nagle_disabled
assert using_keepalive
finally:
conn.close()
s.close()
def test_connection_error_retries(self) -> None:
"""ECONNREFUSED error should raise a connection error, with retries"""
port = find_unused_port()
with HTTPConnectionPool(self.host, port) as pool:
with pytest.raises(MaxRetryError) as e:
pool.request("GET", "/", retries=Retry(connect=3))
assert type(e.value.reason) is NewConnectionError
def test_timeout_success(self) -> None:
timeout = Timeout(connect=3, read=5, total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
# This should not raise a "Timeout already started" error
pool.request("GET", "/")
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
# This should also not raise a "Timeout already started" error
pool.request("GET", "/")
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
socket_timeout_reuse_testdata = pytest.mark.parametrize(
["timeout", "expect_settimeout_calls"],
[
(1, (1, 1)),
(None, (None, None)),
(Timeout(read=4), (None, 4)),
(Timeout(read=4, connect=5), (5, 4)),
(Timeout(connect=6), (6, None)),
],
)
@socket_timeout_reuse_testdata
def test_socket_timeout_updated_on_reuse_constructor(
self,
timeout: _TYPE_TIMEOUT,
expect_settimeout_calls: typing.Sequence[float | None],
) -> None:
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
# Make a request to create a new connection.
pool.urlopen("GET", "/")
# Grab the connection and mock the inner socket.
assert pool.pool is not None
conn = pool.pool.get_nowait()
conn_sock = mock.Mock(wraps=conn.sock)
conn.sock = conn_sock
pool._put_conn(conn)
# Assert that sock.settimeout() is called with the new connect timeout, then the read timeout.
pool.urlopen("GET", "/", timeout=timeout)
conn_sock.settimeout.assert_has_calls(
[mock.call(x) for x in expect_settimeout_calls]
)
@socket_timeout_reuse_testdata
def test_socket_timeout_updated_on_reuse_parameter(
self,
timeout: _TYPE_TIMEOUT,
expect_settimeout_calls: typing.Sequence[float | None],
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
# Make a request to create a new connection.
pool.urlopen("GET", "/", timeout=LONG_TIMEOUT)
# Grab the connection and mock the inner socket.
assert pool.pool is not None
conn = pool.pool.get_nowait()
conn_sock = mock.Mock(wraps=conn.sock)
conn.sock = conn_sock
pool._put_conn(conn)
# Assert that sock.settimeout() is called with the new connect timeout, then the read timeout.
pool.urlopen("GET", "/", timeout=timeout)
conn_sock.settimeout.assert_has_calls(
[mock.call(x) for x in expect_settimeout_calls]
)
def test_tunnel(self) -> None:
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
conn.set_tunnel(self.host, self.port)
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
pool._make_request(conn, "GET", "/")
conn_tunnel.assert_called_once_with()
finally:
conn.close()
# test that it's not called when tunnel is not set
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
pool._make_request(conn, "GET", "/")
assert not conn_tunnel.called
finally:
conn.close()
def test_redirect_relative_url_no_deprecation(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
pool.request("GET", "/redirect", fields={"target": "/"})
def test_redirect(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, redirect=False)
assert r.status == 303
r = pool.request("GET", "/redirect", fields={"target": "/"})
assert r.status == 200
assert r.data == b"Dummy server!"
def test_303_redirect_makes_request_lose_body(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request(
"POST",
"/redirect",
fields={"target": "/headers_and_params", "status": "303 See Other"},
)
data = response.json()
assert data["params"] == {}
assert "Content-Type" not in HTTPHeaderDict(data["headers"])
def test_bad_connect(self) -> None:
with HTTPConnectionPool("badhost.invalid", self.port) as pool:
with pytest.raises(MaxRetryError) as e:
pool.request("GET", "/", retries=5)
assert type(e.value.reason) is NameResolutionError
def test_keepalive(self) -> None:
with HTTPConnectionPool(self.host, self.port, block=True, maxsize=1) as pool:
r = pool.request("GET", "/keepalive?close=0")
r = pool.request("GET", "/keepalive?close=0")
assert r.status == 200
assert pool.num_connections == 1
assert pool.num_requests == 2
def test_keepalive_close(self) -> None:
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert pool.num_connections == 1
# The dummyserver will have responded with Connection:close,
# and httplib will properly cleanup the socket.
# We grab the HTTPConnection object straight from the Queue,
# because _get_conn() is where the check & reset occurs
assert pool.pool is not None
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Now with keep-alive
r = pool.request(
"GET",
"/keepalive?close=0",
retries=0,
headers={"Connection": "keep-alive"},
)
# The dummyserver responded with Connection:keep-alive, the connection
# persists.
conn = pool.pool.get()
assert conn.sock is not None
pool._put_conn(conn)
# Another request asking the server to close the connection. This one
# should get cleaned up for the next request.
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert r.status == 200
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Next request
r = pool.request("GET", "/keepalive?close=0")
def test_post_with_urlencode(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=False)
assert r.data.decode("utf-8") == urlencode(data)
def test_post_with_multipart(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=True)
body = r.data.split(b"\r\n")
encoded_data = encode_multipart_formdata(data)[0]
expected_body = encoded_data.split(b"\r\n")
# TODO: Get rid of extra parsing stuff when you can specify
# a custom boundary to encode_multipart_formdata
"""
We need to loop the return lines because a timestamp is attached
from within encode_multipart_formdata. When the server echos back
the data, it has the timestamp from when the data was encoded, which
is not equivalent to when we run encode_multipart_formdata on
the data again.
"""
for i, line in enumerate(body):
if line.startswith(b"--"):
continue
assert body[i] == expected_body[i]
def test_post_with_multipart__iter__(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"hello": "world"}
r = pool.request(
"POST",
"/echo",
fields=data,
preload_content=False,
multipart_boundary="boundary",
encode_multipart=True,
)
chunks = [chunk for chunk in r]
assert chunks == [
b"--boundary\r\n",
b'Content-Disposition: form-data; name="hello"\r\n',
b"\r\n",
b"world\r\n",
b"--boundary--\r\n",
]
def test_check_gzip(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "gzip"}
)
assert r.headers.get("content-encoding") == "gzip"
assert r.data == b"hello, world!"
def test_check_deflate(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "deflate"}
)
assert r.headers.get("content-encoding") == "deflate"
assert r.data == b"hello, world!"
def test_bad_decode(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-deflate"},
)
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-gzip"},
)
def test_connection_count(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
pool.request("GET", "/")
pool.request("GET", "/")
pool.request("GET", "/")
assert pool.num_connections == 1
assert pool.num_requests == 3
def test_connection_count_bigpool(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=16) as http_pool:
http_pool.request("GET", "/")
http_pool.request("GET", "/")
http_pool.request("GET", "/")
assert http_pool.num_connections == 1
assert http_pool.num_requests == 3
def test_partial_response(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
req_data = {"lol": "cat"}
resp_data = urlencode(req_data).encode("utf-8")
r = pool.request("GET", "/echo", fields=req_data, preload_content=False)
assert r.read(5) == resp_data[:5]
assert r.read() == resp_data[5:]
def test_lazy_load_twice(self) -> None:
# This test is sad and confusing. Need to figure out what's
# going on with partial reads and socket reuse.
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
payload_size = 1024 * 2
first_chunk = 512
boundary = "foo"
req_data = {"count": "a" * payload_size}
resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]
req2_data = {"count": "b" * payload_size}
resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]
r1 = pool.request(
"POST",
"/echo",
fields=req_data,
multipart_boundary=boundary,
preload_content=False,
)
assert r1.read(first_chunk) == resp_data[:first_chunk]
try:
r2 = pool.request(
"POST",
"/echo",
fields=req2_data,
multipart_boundary=boundary,
preload_content=False,
pool_timeout=0.001,
)
# This branch should generally bail here, but maybe someday it will
# work? Perhaps by some sort of magic. Consider it a TODO.
assert r2.read(first_chunk) == resp2_data[:first_chunk]
assert r1.read() == resp_data[first_chunk:]
assert r2.read() == resp2_data[first_chunk:]
assert pool.num_requests == 2
except EmptyPoolError:
assert r1.read() == resp_data[first_chunk:]
assert pool.num_requests == 1
assert pool.num_connections == 1
def test_for_double_release(self) -> None:
MAXSIZE = 5
# Check default state
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.num_connections == 0
assert pool.pool is not None
assert pool.pool.qsize() == MAXSIZE
# Make an empty slot for testing
pool.pool.get()
assert pool.pool.qsize() == MAXSIZE - 1
# Check state after simple request
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 1
# Check state without release
pool.urlopen("GET", "/", preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
# Check state after read
pool.urlopen("GET", "/").data
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
def test_release_conn_parameter(self) -> None:
MAXSIZE = 5
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.pool is not None
assert pool.pool.qsize() == MAXSIZE
# Make request without releasing connection
pool.request("GET", "/", release_conn=False, preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 1
def test_dns_error(self) -> None:
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/test", retries=2)
@pytest.mark.parametrize("char", [" ", "\r", "\n", "\x00"])
def test_invalid_method_not_allowed(self, char: str) -> None:
with pytest.raises(ValueError):
with HTTPConnectionPool(self.host, self.port) as pool:
pool.request("GET" + char, "/")
def test_percent_encode_invalid_target_chars(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/echo_params?q=\r&k=\n \n")
assert r.data == b"[('k', '\\n \\n'), ('q', '\\r')]"
def test_source_address(self) -> None:
for addr, is_ipv6 in VALID_SOURCE_ADDRESSES:
if is_ipv6:
# TODO enable if HAS_IPV6_AND_DNS when this is fixed:
# https://github.com/pgjones/hypercorn/issues/160
warnings.warn("No IPv6 support: skipping.", NoIPv6Warning)
continue
with HTTPConnectionPool(
self.host, self.port, source_address=addr, retries=False
) as pool:
r = pool.request("GET", "/source_address")
assert r.data == addr[0].encode()
@pytest.mark.parametrize(
"invalid_source_address, is_ipv6", INVALID_SOURCE_ADDRESSES
)
def test_source_address_error(
self, invalid_source_address: tuple[str, int], is_ipv6: bool
) -> None:
with HTTPConnectionPool(
self.host, self.port, source_address=invalid_source_address, retries=False
) as pool:
if is_ipv6:
# with pytest.raises(NameResolutionError):
with pytest.raises(NewConnectionError):
pool.request("GET", f"/source_address?{invalid_source_address}")
else:
with pytest.raises(NewConnectionError):
pool.request("GET", f"/source_address?{invalid_source_address}")
def test_stream_keepalive(self) -> None:
x = 2
with HTTPConnectionPool(self.host, self.port) as pool:
for _ in range(x):
response = pool.request(
"GET",
"/chunked",
headers={"Connection": "keep-alive"},
preload_content=False,
retries=False,
)
for chunk in response.stream():
assert chunk == b"123"
assert pool.num_connections == 1
assert pool.num_requests == x
def test_read_chunked_short_circuit(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.read()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_read_chunked_on_closed_response(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.close()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_chunked_gzip(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request(
"GET", "/chunked_gzip", preload_content=False, decode_content=True
)
assert b"123" * 4 == response.read()
def test_cleanup_on_connection_error(self) -> None:
"""
Test that connections are recycled to the pool on
connection errors where no http response is received.
"""
poolsize = 3
with HTTPConnectionPool(
self.host, self.port, maxsize=poolsize, block=True
) as http:
assert http.pool is not None
assert http.pool.qsize() == poolsize
# force a connection error by supplying a non-existent
# url. We won't get a response for this and so the
# conn won't be implicitly returned to the pool.
with pytest.raises(MaxRetryError):
http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=0,
)
r = http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=1,
)
r.release_conn()
# the pool should still contain poolsize elements
assert http.pool.qsize() == http.pool.maxsize
def test_shutdown_on_connection_released_to_pool(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.urlopen("GET", "/", preload_content=False)
resp.drain_conn()
resp.release_conn()
with pytest.raises(
RuntimeError,
match="Cannot shutdown as connection has already been released to the pool",
):
resp.shutdown()
def test_mixed_case_hostname(self) -> None:
with HTTPConnectionPool("LoCaLhOsT", self.port) as pool:
response = pool.request("GET", f"http://LoCaLhOsT:{self.port}/")
assert response.status == 200
def test_preserves_path_dot_segments(self) -> None:
"""ConnectionPool preserves dot segments in the URI"""
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/echo_uri/seg0/../seg2")
assert response.data == b"/echo_uri/seg0/../seg2?"
def test_default_user_agent_header(self) -> None:
"""ConnectionPool has a default user agent"""
default_ua = _get_default_user_agent()
custom_ua = "I'm not a web scraper, what are you talking about?"
custom_ua2 = "Yet Another User Agent"
with HTTPConnectionPool(self.host, self.port) as pool:
# Use default user agent if no user agent was specified.
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == _get_default_user_agent()
# Prefer the request user agent over the default.
headers = {"UsEr-AGENt": custom_ua}
r = pool.request("GET", "/headers", headers=headers)
request_headers = r.json()
assert request_headers.get("User-Agent") == custom_ua
# Do not modify pool headers when using the default user agent.
pool_headers = {"foo": "bar"}
pool.headers = pool_headers
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == default_ua
assert "User-Agent" not in pool_headers
pool.headers.update({"User-Agent": custom_ua2})
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == custom_ua2
@pytest.mark.parametrize(
"headers",
[
None,
{},
{"User-Agent": "key"},
{"user-agent": "key"},
{b"uSeR-AgEnT": b"key"},
{b"user-agent": "key"},
],
)
@pytest.mark.parametrize("chunked", [True, False])
def test_user_agent_header_not_sent_twice(
self, headers: dict[str, str] | None, chunked: bool
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/headers", headers=headers, chunked=chunked)
request_headers = r.json()
if not headers:
assert request_headers["User-Agent"].startswith("python-urllib3/")
assert "key" not in request_headers["User-Agent"]
else:
assert request_headers["User-Agent"] == "key"
def test_no_user_agent_header(self) -> None:
"""ConnectionPool can suppress sending a user agent header"""
custom_ua = "I'm not a web scraper, what are you talking about?"
with HTTPConnectionPool(self.host, self.port) as pool:
# Suppress user agent in the request headers.
no_ua_headers = {"User-Agent": SKIP_HEADER}
r = pool.request("GET", "/headers", headers=no_ua_headers)
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
# Suppress user agent in the pool headers.
pool.headers = no_ua_headers
r = pool.request("GET", "/headers")
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
# Request headers override pool headers.
pool_headers = {"User-Agent": custom_ua}
pool.headers = pool_headers
r = pool.request("GET", "/headers", headers=no_ua_headers)
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
assert pool_headers.get("User-Agent") == custom_ua
@pytest.mark.parametrize("header", ["Content-Length", "content-length"])
@pytest.mark.parametrize("chunked", [True, False])
def test_skip_header_non_supported(self, header: str, chunked: bool) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(
ValueError,
match="urllib3.util.SKIP_HEADER only supports 'Accept-Encoding', 'Host', 'User-Agent'",
) as e:
pool.request(
"GET", "/headers", headers={header: SKIP_HEADER}, chunked=chunked
)
# Ensure that the error message stays up to date with 'SKIP_HEADER_SUPPORTED_HEADERS'
assert all(
("'" + header.title() + "'") in str(e.value)
for header in SKIPPABLE_HEADERS
)
@pytest.mark.parametrize("chunked", [True, False])
@pytest.mark.parametrize("pool_request", [True, False])
@pytest.mark.parametrize("header_type", [dict, HTTPHeaderDict])
def test_headers_not_modified_by_request(
self,
chunked: bool,
pool_request: bool,
header_type: type[dict[str, str] | HTTPHeaderDict],
) -> None:
# Test that the .request*() methods of ConnectionPool and HTTPConnection
# don't modify the given 'headers' structure, instead they should
# make their own internal copies at request time.
headers = header_type()
headers["key"] = "val"
with HTTPConnectionPool(self.host, self.port) as pool:
pool.headers = headers
if pool_request:
pool.request("GET", "/headers", chunked=chunked)
else:
conn = pool._get_conn()
conn.request("GET", "/headers", chunked=chunked)
conn.getresponse().close()
conn.close()
assert pool.headers == {"key": "val"}
assert type(pool.headers) is header_type
with HTTPConnectionPool(self.host, self.port) as pool:
if pool_request:
pool.request("GET", "/headers", headers=headers, chunked=chunked)
else:
conn = pool._get_conn()
conn.request("GET", "/headers", headers=headers, chunked=chunked)
conn.getresponse().close()
conn.close()
assert headers == {"key": "val"}
def test_request_chunked_is_deprecated(
self,
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
conn = pool._get_conn()
with pytest.warns(DeprecationWarning) as w:
conn.request_chunked("GET", "/headers") # type: ignore[attr-defined]
assert len(w) == 1 and str(w[0].message) == (
"HTTPConnection.request_chunked() is deprecated and will be removed in urllib3 v2.1.0. "
"Instead use HTTPConnection.request(..., chunked=True)."
)
resp = conn.getresponse()
assert resp.status == 200
assert resp.json()["Transfer-Encoding"] == "chunked"
conn.close()
def test_bytes_header(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"User-Agent": "test header"}
r = pool.request("GET", "/headers", headers=headers)
request_headers = r.json()
assert "User-Agent" in request_headers
assert request_headers["User-Agent"] == "test header"
@pytest.mark.parametrize(
"user_agent", ["Schönefeld/1.18.0", "Schönefeld/1.18.0".encode("iso-8859-1")]
)
def test_user_agent_non_ascii_user_agent(self, user_agent: str) -> None:
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
r = pool.urlopen(
"GET",
"/headers",
headers={"User-Agent": user_agent},
)
request_headers = r.json()
assert "User-Agent" in request_headers
assert request_headers["User-Agent"] == "Schönefeld/1.18.0"
| TestConnectionPool |
python | Netflix__metaflow | test/unit/test_secrets_decorator.py | {
"start": 4569,
"end": 6609
} | class ____(unittest.TestCase):
def test_validate_env_vars_across_secrets(self):
# overlap
all_secrets_env_vars = [
(SecretSpec.secret_spec_from_str("t.1", None), {"A": "a", "B": "b"}),
(SecretSpec.secret_spec_from_str("t.2", None), {"B": "b", "C": "c"}),
]
with self.assertRaises(MetaflowException):
validate_env_vars_across_secrets(all_secrets_env_vars)
def test_validate_env_vars_vs_existing_env(self):
# assumes there is at least one existing env var - quite reasonable
existing_os_env_k, existing_os_env_v = next(iter(os.environ.items()))
all_secrets_env_vars = [
(
SecretSpec.secret_spec_from_str("t.1", None),
{"A": "a", existing_os_env_k: existing_os_env_v},
),
]
with self.assertRaises(MetaflowException):
validate_env_vars_vs_existing_env(all_secrets_env_vars)
def test_validate_env_vars(self):
# happy path
env_vars = {
"TYPICAL_KEY_1": "TYPICAL_VALUE_1",
"_typical_key_2": "typical_value_2",
}
validate_env_vars(env_vars)
# keys with wrong type
mistyped_keys = [1, tuple(), b"old_school"]
for k in mistyped_keys:
with self.assertRaises(MetaflowException):
validate_env_vars({k: "v"})
# values with wrong type
mistyped_values = [1, {}, b"old_school"]
for i, v in enumerate(mistyped_values):
with self.assertRaises(MetaflowException):
validate_env_vars({f"K{i}": v})
# weird keys
weird_keys = [
"1_",
"hello world",
"hey_arnold!",
"I_\u2665_NY",
"door-",
"METAFLOW_SOMETHING_OR_OTHER",
]
for k in weird_keys:
with self.assertRaises(MetaflowException):
validate_env_vars({k: "v"})
if __name__ == "__main__":
unittest.main()
| TestEnvVarValidations |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass12.py | {
"start": 258,
"end": 305
} | class ____:
x: int
y: int
@dataclass
| Base |
python | doocs__leetcode | solution/1000-1099/1067.Digit Count in Range/Solution.py | {
"start": 0,
"end": 747
} | class ____:
def digitsCount(self, d: int, low: int, high: int) -> int:
return self.f(high, d) - self.f(low - 1, d)
def f(self, n, d):
@cache
def dfs(pos, cnt, lead, limit):
if pos <= 0:
return cnt
up = a[pos] if limit else 9
ans = 0
for i in range(up + 1):
if i == 0 and lead:
ans += dfs(pos - 1, cnt, lead, limit and i == up)
else:
ans += dfs(pos - 1, cnt + (i == d), False, limit and i == up)
return ans
a = [0] * 11
l = 0
while n:
l += 1
a[l] = n % 10
n //= 10
return dfs(l, 0, True, True)
| Solution |
python | huggingface__transformers | src/transformers/pipelines/feature_extraction.py | {
"start": 534,
"end": 3406
} | class ____(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
_load_processor = False
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, **tokenize_kwargs) -> dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors="pt", **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
return model_outputs[0].tolist()
def __call__(self, *args: str | list[str], **kwargs: Any) -> Any | list[Any]:
"""
Extract the features of the input(s) text.
Args:
args (`str` or `list[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
| FeatureExtractionPipeline |
python | django__django | tests/apps/query_performing_app/apps.py | {
"start": 70,
"end": 433
} | class ____(AppConfig):
name = "apps.query_performing_app"
database = "default"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.query_results = []
def ready(self):
self.query_results = []
self._perform_query()
def _perform_query(self):
raise NotImplementedError
| BaseAppConfig |
python | more-itertools__more-itertools | more_itertools/more.py | {
"start": 7432,
"end": 32078
} | class ____:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
For example, there are 168 prime numbers below 1,000:
>>> ilen(sieve(1000))
168
Equivalent to, but faster than::
def ilen(iterable):
count = 0
for _ in iterable:
count += 1
return count
This fully consumes the iterable, so handle with care.
"""
# This is the "most beautiful of the fast variants" of this function.
# If you think you can improve on it, please ensure that your version
# is both 10x faster and 10x more beautiful.
return sum(compress(repeat(1), zip(iterable)))
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
Produces an infinite iterator. To add a stopping condition,
use :func:`take`, ``takewhile``, or :func:`takewhile_inclusive`:.
>>> take(10, iterate(lambda x: 2*x, 1))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
>>> collatz = lambda x: 3*x + 1 if x%2==1 else x // 2
>>> list(takewhile_inclusive(lambda x: x!=1, iterate(collatz, 10)))
[10, 5, 16, 8, 4, 2, 1]
"""
with suppress(StopIteration):
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
iterator = iter(iterable)
for first in iterator:
for second in iterator:
msg = (
f'Expected exactly one item in iterable, but got {first!r}, '
f'{second!r}, and perhaps more.'
)
raise too_long or ValueError(msg)
return first
raise too_short or ValueError('too few items in iterable (expected 1)')
def raise_(exception, *args):
raise exception(*args)
def strictly_n(iterable, n, too_short=None, too_long=None):
"""Validate that *iterable* has exactly *n* items and return them if
it does. If it has fewer than *n* items, call function *too_short*
with the actual number of items. If it has more than *n* items, call function
*too_long* with the number ``n + 1``.
>>> iterable = ['a', 'b', 'c', 'd']
>>> n = 4
>>> list(strictly_n(iterable, n))
['a', 'b', 'c', 'd']
Note that the returned iterable must be consumed in order for the check to
be made.
By default, *too_short* and *too_long* are functions that raise
``ValueError``.
>>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (got 2)
>>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (got at least 3)
You can instead supply functions that do something else.
*too_short* will be called with the number of items in *iterable*.
*too_long* will be called with `n + 1`.
>>> def too_short(item_count):
... raise RuntimeError
>>> it = strictly_n('abcd', 6, too_short=too_short)
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
>>> def too_long(item_count):
... print('The boss is going to hear about this')
>>> it = strictly_n('abcdef', 4, too_long=too_long)
>>> list(it)
The boss is going to hear about this
['a', 'b', 'c', 'd']
"""
if too_short is None:
too_short = lambda item_count: raise_(
ValueError,
f'Too few items in iterable (got {item_count})',
)
if too_long is None:
too_long = lambda item_count: raise_(
ValueError,
f'Too many items in iterable (got at least {item_count})',
)
it = iter(iterable)
sent = 0
for item in islice(it, n):
yield item
sent += 1
if sent < n:
too_short(sent)
return
for item in it:
too_long(n + 1)
return
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to yielding from ``set(permutations(iterable))``, except
duplicates are not generated and thrown away. For larger input sequences
this is much more efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence. The function :func:`multinomial` computes this directly.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
*iterable* need not be sortable, but note that using equal (``x == y``)
but non-identical (``id(x) != id(y)``) elements may produce surprising
behavior. For example, ``1`` and ``True`` are equal but non-identical:
>>> list(distinct_permutations([1, True, '3'])) # doctest: +SKIP
[
(1, True, '3'),
(1, '3', True),
('3', 1, True)
]
>>> list(distinct_permutations([1, 2, '3'])) # doctest: +SKIP
[
(1, 2, '3'),
(1, '3', 2),
(2, 1, '3'),
(2, '3', 1),
('3', 1, 2),
('3', 2, 1)
]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = list(iterable)
try:
items.sort()
sortable = True
except TypeError:
sortable = False
indices_dict = defaultdict(list)
for item in items:
indices_dict[items.index(item)].append(item)
indices = [items.index(item) for item in items]
indices.sort()
equivalent_items = {k: cycle(v) for k, v in indices_dict.items()}
def permuted_items(permuted_indices):
return tuple(
next(equivalent_items[index]) for index in permuted_indices
)
size = len(items)
if r is None:
r = size
# functools.partial(_partial, ... )
algorithm = _full if (r == size) else partial(_partial, r=r)
if 0 < r <= size:
if sortable:
return algorithm(items)
else:
return (
permuted_items(permuted_indices)
for permuted_indices in algorithm(indices)
)
return iter(() if r else ((),))
def derangements(iterable, r=None):
"""Yield successive derangements of the elements in *iterable*.
A derangement is a permutation in which no element appears at its original
index. In other words, a derangement is a permutation that has no fixed points.
Suppose Alice, Bob, Carol, and Dave are playing Secret Santa.
The code below outputs all of the different ways to assign gift recipients
such that nobody is assigned to himself or herself:
>>> for d in derangements(['Alice', 'Bob', 'Carol', 'Dave']):
... print(', '.join(d))
Bob, Alice, Dave, Carol
Bob, Carol, Dave, Alice
Bob, Dave, Alice, Carol
Carol, Alice, Dave, Bob
Carol, Dave, Alice, Bob
Carol, Dave, Bob, Alice
Dave, Alice, Bob, Carol
Dave, Carol, Alice, Bob
Dave, Carol, Bob, Alice
If *r* is given, only the *r*-length derangements are yielded.
>>> sorted(derangements(range(3), 2))
[(1, 0), (1, 2), (2, 0)]
>>> sorted(derangements([0, 2, 3], 2))
[(2, 0), (2, 3), (3, 0)]
Elements are treated as unique based on their position, not on their value.
Consider the Secret Santa example with two *different* people who have
the *same* name. Then there are two valid gift assignments even though
it might appear that a person is assigned to themselves:
>>> names = ['Alice', 'Bob', 'Bob']
>>> list(derangements(names))
[('Bob', 'Bob', 'Alice'), ('Bob', 'Alice', 'Bob')]
To avoid confusion, make the inputs distinct:
>>> deduped = [f'{name}{index}' for index, name in enumerate(names)]
>>> list(derangements(deduped))
[('Bob1', 'Bob2', 'Alice0'), ('Bob2', 'Alice0', 'Bob1')]
The number of derangements of a set of size *n* is known as the
"subfactorial of n". For n > 0, the subfactorial is:
``round(math.factorial(n) / math.e)``.
References:
* Article: https://www.numberanalytics.com/blog/ultimate-guide-to-derangements-in-combinatorics
* Sizes: https://oeis.org/A000166
"""
xs = tuple(iterable)
ys = tuple(range(len(xs)))
return compress(
permutations(xs, r=r),
map(all, map(map, repeat(is_not), repeat(ys), permutations(ys, r=r))),
)
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield ()
return
if step < 1:
raise ValueError('step must be >= 1')
iterator = iter(seq)
# Generate first window
window = deque(islice(iterator, n), maxlen=n)
# Deal with the first window not being full
if not window:
return
if len(window) < n:
yield tuple(window) + ((fillvalue,) * (n - len(window)))
return
yield tuple(window)
# Create the filler for the next windows. The padding ensures
# we have just enough elements to fill the last window.
padding = (fillvalue,) * (n - 1 if step >= n else step - 1)
filler = map(window.append, chain(iterator, padding))
# Generate the rest of the windows
for _ in islice(filler, step - 1, None, step):
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
Like subslices() but returns tuples instead of lists
and returns the shortest substrings first.
"""
seq = tuple(iterable)
item_count = len(seq)
for n in range(1, item_count + 1):
slices = map(slice, range(item_count), range(n, item_count + 1))
yield from map(getitem, repeat(seq), slices)
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
| peekable |
python | google__jax | docs/sphinxext/jax_extensions.py | {
"start": 1742,
"end": 2190
} | class ____(Directive):
"""A noop variant of the ``legacy`` directive from SciPy."""
has_content = True
node_class = nodes.admonition
optional_arguments = 1
def run(self):
return []
def setup(app):
app.add_directive("legacy", LegacyDirective)
app.add_role('jax-issue', jax_issue_role)
app.add_role('doi', doi_role)
return {
'version': 1.0,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| LegacyDirective |
python | getsentry__sentry-python | sentry_sdk/integrations/unleash.py | {
"start": 297,
"end": 1058
} | class ____(Integration):
identifier = "unleash"
@staticmethod
def setup_once():
# type: () -> None
# Wrap and patch evaluation methods (class methods)
old_is_enabled = UnleashClient.is_enabled
@wraps(old_is_enabled)
def sentry_is_enabled(self, feature, *args, **kwargs):
# type: (UnleashClient, str, *Any, **Any) -> Any
enabled = old_is_enabled(self, feature, *args, **kwargs)
# We have no way of knowing what type of unleash feature this is, so we have to treat
# it as a boolean / toggle feature.
add_feature_flag(feature, enabled)
return enabled
UnleashClient.is_enabled = sentry_is_enabled # type: ignore
| UnleashIntegration |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/feature_flags.py | {
"start": 206,
"end": 1477
} | class ____(Enum):
SHOW_SINGLE_RUN_BACKFILL_TOGGLE = "SHOW_SINGLE_RUN_BACKFILL_TOGGLE"
def get_feature_flags_for_location(
code_location_entry: "CodeLocationEntry",
) -> Mapping[CodeLocationFeatureFlags, bool]:
return {
CodeLocationFeatureFlags.SHOW_SINGLE_RUN_BACKFILL_TOGGLE: (
get_should_show_single_run_backfill_toggle(code_location_entry)
)
}
def get_should_show_single_run_backfill_toggle(code_location_entry: "CodeLocationEntry"):
# Starting in version 1.5 we stopped showing the single-run backfill toggle in the UI -
# instead it is now set in code
if not code_location_entry.code_location:
# Error or loading status
return False
dagster_library_version = (
code_location_entry.code_location.get_dagster_library_versions() or {}
).get("dagster")
if not dagster_library_version:
# Old enough version that it wasn't being stored
return True
if dagster_library_version == "1!0+dev":
return False
try:
version = packaging.version.parse(dagster_library_version)
return version.major < 1 or (version.major == 1 and version.minor < 5)
except packaging.version.InvalidVersion:
return False
| CodeLocationFeatureFlags |
python | Textualize__rich | rich/markup.py | {
"start": 453,
"end": 8427
} | class ____(NamedTuple):
"""A tag in console markup."""
name: str
"""The tag name. e.g. 'bold'."""
parameters: Optional[str]
"""Any additional parameters after the name."""
def __str__(self) -> str:
return (
self.name if self.parameters is None else f"{self.name} {self.parameters}"
)
@property
def markup(self) -> str:
"""Get the string representation of this tag."""
return (
f"[{self.name}]"
if self.parameters is None
else f"[{self.name}={self.parameters}]"
)
_ReStringMatch = Match[str] # regex match object
_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
def escape(
markup: str,
_escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
) -> str:
"""Escapes text so that it won't be interpreted as markup.
Args:
markup (str): Content to be inserted in to markup.
Returns:
str: Markup with square brackets escaped.
"""
def escape_backslashes(match: Match[str]) -> str:
"""Called by re.sub replace matches."""
backslashes, text = match.groups()
return f"{backslashes}{backslashes}\\{text}"
markup = _escape(escape_backslashes, markup)
if markup.endswith("\\") and not markup.endswith("\\\\"):
return markup + "\\"
return markup
def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
"""Parse markup in to an iterable of tuples of (position, text, tag).
Args:
markup (str): A string containing console markup
"""
position = 0
_divmod = divmod
_Tag = Tag
for match in RE_TAGS.finditer(markup):
full_text, escapes, tag_text = match.groups()
start, end = match.span()
if start > position:
yield start, markup[position:start], None
if escapes:
backslashes, escaped = _divmod(len(escapes), 2)
if backslashes:
# Literal backslashes
yield start, "\\" * backslashes, None
start += backslashes * 2
if escaped:
# Escape of tag
yield start, full_text[len(escapes) :], None
position = end
continue
text, equals, parameters = tag_text.partition("=")
yield start, None, _Tag(text, parameters if equals else None)
position = end
if position < len(markup):
yield position, markup[position:], None
def render(
markup: str,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
"""Render console markup in to a Text instance.
Args:
markup (str): A string containing console markup.
style: (Union[str, Style]): The style to use.
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
Raises:
MarkupError: If there is a syntax error in the markup.
Returns:
Text: A test instance.
"""
emoji_replace = _emoji_replace
if "[" not in markup:
return Text(
emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
style=style,
)
text = Text(style=style)
append = text.append
normalize = Style.normalize
style_stack: List[Tuple[int, Tag]] = []
pop = style_stack.pop
spans: List[Span] = []
append_span = spans.append
_Span = Span
_Tag = Tag
def pop_style(style_name: str) -> Tuple[int, Tag]:
"""Pop tag matching given style name."""
for index, (_, tag) in enumerate(reversed(style_stack), 1):
if tag.name == style_name:
return pop(-index)
raise KeyError(style_name)
for position, plain_text, tag in _parse(markup):
if plain_text is not None:
# Handle open brace escapes, where the brace is not part of a tag.
plain_text = plain_text.replace("\\[", "[")
append(emoji_replace(plain_text) if emoji else plain_text)
elif tag is not None:
if tag.name.startswith("/"): # Closing tag
style_name = tag.name[1:].strip()
if style_name: # explicit close
style_name = normalize(style_name)
try:
start, open_tag = pop_style(style_name)
except KeyError:
raise MarkupError(
f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
) from None
else: # implicit close
try:
start, open_tag = pop()
except IndexError:
raise MarkupError(
f"closing tag '[/]' at position {position} has nothing to close"
) from None
if open_tag.name.startswith("@"):
if open_tag.parameters:
handler_name = ""
parameters = open_tag.parameters.strip()
handler_match = RE_HANDLER.match(parameters)
if handler_match is not None:
handler_name, match_parameters = handler_match.groups()
parameters = (
"()" if match_parameters is None else match_parameters
)
try:
meta_params = literal_eval(parameters)
except SyntaxError as error:
raise MarkupError(
f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
)
except Exception as error:
raise MarkupError(
f"error parsing {open_tag.parameters!r}; {error}"
) from None
if handler_name:
meta_params = (
handler_name,
meta_params
if isinstance(meta_params, tuple)
else (meta_params,),
)
else:
meta_params = ()
append_span(
_Span(
start, len(text), Style(meta={open_tag.name: meta_params})
)
)
else:
append_span(_Span(start, len(text), str(open_tag)))
else: # Opening tag
normalized_tag = _Tag(normalize(tag.name), tag.parameters)
style_stack.append((len(text), normalized_tag))
text_length = len(text)
while style_stack:
start, tag = style_stack.pop()
style = str(tag)
if style:
append_span(_Span(start, text_length, style))
text.spans = sorted(spans[::-1], key=attrgetter("start"))
return text
if __name__ == "__main__": # pragma: no cover
MARKUP = [
"[red]Hello World[/red]",
"[magenta]Hello [b]World[/b]",
"[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
":warning-emoji: [bold red blink] DANGER![/]",
]
from rich import print
from rich.table import Table
grid = Table("Markup", "Result", padding=(0, 1))
for markup in MARKUP:
grid.add_row(Text(markup), markup)
print(grid)
| Tag |
python | django__django | tests/model_fields/models.py | {
"start": 16957,
"end": 17291
} | class ____(models.Model):
field = models.GeneratedField(
expression=Value("Constant", output_field=models.CharField(max_length=10)),
output_field=models.CharField(max_length=10),
db_persist=True,
)
class Meta:
required_db_features = {"supports_stored_generated_columns"}
| GeneratedModelParams |
python | mlflow__mlflow | mlflow/types/schema.py | {
"start": 17264,
"end": 20875
} | class ____(BaseType):
"""
Specification used to represent a json-convertible array.
"""
def __init__(
self,
dtype: ALLOWED_DTYPES,
) -> None:
try:
self._dtype = DataType[dtype] if isinstance(dtype, str) else dtype
except KeyError:
raise MlflowException(
f"Unsupported type '{dtype}', expected instance of DataType, Array, Object, Map or "
f"one of {[t.name for t in DataType]}"
)
if not isinstance(self.dtype, (Array, DataType, Object, Map, AnyType)):
raise MlflowException(
EXPECTED_TYPE_MESSAGE.format(arg_name="dtype", passed_type=self.dtype)
)
@property
def dtype(self) -> "Array" | DataType | Object | "Map" | "AnyType":
"""The array data type."""
return self._dtype
def __eq__(self, other) -> bool:
if isinstance(other, Array):
return self.dtype == other.dtype
return False
def to_dict(self):
items = (
{"type": self.dtype.name} if isinstance(self.dtype, DataType) else self.dtype.to_dict()
)
return {"type": ARRAY_TYPE, "items": items}
@classmethod
def from_json_dict(cls, **kwargs):
"""
Deserialize from a json loaded dictionary.
The dictionary is expected to contain `type` and
`items` keys.
Example: {"type": "array", "items": "string"}
"""
if not {"items", "type"} <= set(kwargs.keys()):
raise MlflowException(
"Missing keys in Array JSON. Expected to find keys `items` and `type`"
)
if kwargs["type"] != ARRAY_TYPE:
raise MlflowException("Type mismatch, Array expects `array` as the type")
if not isinstance(kwargs["items"], dict):
raise MlflowException("Expected items to be a dictionary of Object JSON")
if not {"type"} <= set(kwargs["items"].keys()):
raise MlflowException("Missing keys in Array's items JSON. Expected to find key `type`")
if kwargs["items"]["type"] == OBJECT_TYPE:
item_type = Object.from_json_dict(**kwargs["items"])
elif kwargs["items"]["type"] == ARRAY_TYPE:
item_type = Array.from_json_dict(**kwargs["items"])
elif kwargs["items"]["type"] == SPARKML_VECTOR_TYPE:
item_type = SparkMLVector()
elif kwargs["items"]["type"] == MAP_TYPE:
item_type = Map.from_json_dict(**kwargs["items"])
elif kwargs["items"]["type"] == ANY_TYPE:
item_type = AnyType()
else:
item_type = kwargs["items"]["type"]
return cls(dtype=item_type)
def __repr__(self) -> str:
return f"Array({self.dtype!r})"
def _merge(self, other: BaseType) -> Array:
if isinstance(other, AnyType) or self == other:
return deepcopy(self)
if not isinstance(other, Array):
raise MlflowException(f"Can't merge array with non-array type: {type(other).__name__}")
if isinstance(self.dtype, DataType):
if self.dtype == other.dtype:
return Array(dtype=self.dtype)
raise MlflowException(
f"Array types are incompatible for {self} with dtype={self.dtype} and "
f"{other} with dtype={other.dtype}"
)
if isinstance(self.dtype, (Array, Object, Map, AnyType)):
return Array(dtype=self.dtype._merge(other.dtype))
raise MlflowException(f"Array type {self!r} and {other!r} are incompatible")
| Array |
python | getsentry__sentry | src/sentry/identity/vsts/provider.py | {
"start": 6285,
"end": 8728
} | class ____(OAuth2Provider):
key = "vsts_new"
name = "Azure DevOps"
oauth_access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token"
oauth_authorize_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
# Using a new option
def get_oauth_client_id(self):
return options.get("vsts_new.client-id")
def get_oauth_client_secret(self):
return options.get("vsts_new.client-secret")
def get_refresh_token_url(self) -> str:
return self.oauth_access_token_url
def get_pipeline_views(self):
return [
# made a new view to override `get_authorize_params` for the new params needed for the oauth
VSTSOAuth2LoginView(
authorize_url=self.oauth_authorize_url,
client_id=self.get_oauth_client_id(),
scope=" ".join(self.get_oauth_scopes()),
),
VSTSNewOAuth2CallbackView(
access_token_url=self.oauth_access_token_url,
client_id=self.get_oauth_client_id(),
client_secret=self.get_oauth_client_secret(),
),
]
def get_refresh_token_headers(self):
return {"Content-Type": "application/x-www-form-urlencoded", "Content-Length": "1654"}
def get_refresh_token_params(
self, refresh_token: str, identity: Identity | RpcIdentity, **kwargs: Any
) -> dict[str, str | None]:
oauth_redirect_url = kwargs.get("redirect_url")
if oauth_redirect_url is None:
raise ValueError("VSTS requires oauth redirect url when refreshing identity")
return {
"grant_type": "refresh_token",
"client_id": self.get_oauth_client_id(),
"client_secret": self.get_oauth_client_secret(),
"refresh_token": refresh_token,
}
def build_identity(self, data):
data = data["data"]
access_token = data.get("access_token")
if not access_token:
raise PermissionDenied()
user = get_user_info(access_token)
return {
"type": IntegrationProviderSlug.AZURE_DEVOPS.value,
"id": user["id"],
"email": user["emailAddress"],
"email_verified": True,
"name": user["displayName"],
"scopes": sorted(self.oauth_scopes),
"data": self.get_oauth_data(data),
}
| VSTSNewIdentityProvider |
python | tiangolo__fastapi | tests/test_union_forms.py | {
"start": 250,
"end": 5319
} | class ____(BaseModel):
company_name: str
industry: str
@app.post("/form-union/")
def post_union_form(data: Annotated[Union[UserForm, CompanyForm], Form()]):
return {"received": data}
client = TestClient(app)
def test_post_user_form():
response = client.post(
"/form-union/", data={"name": "John Doe", "email": "john@example.com"}
)
assert response.status_code == 200, response.text
assert response.json() == {
"received": {"name": "John Doe", "email": "john@example.com"}
}
def test_post_company_form():
response = client.post(
"/form-union/", data={"company_name": "Tech Corp", "industry": "Technology"}
)
assert response.status_code == 200, response.text
assert response.json() == {
"received": {"company_name": "Tech Corp", "industry": "Technology"}
}
def test_invalid_form_data():
response = client.post(
"/form-union/",
data={"name": "John", "company_name": "Tech Corp"},
)
assert response.status_code == 422, response.text
def test_empty_form():
response = client.post("/form-union/")
assert response.status_code == 422, response.text
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/form-union/": {
"post": {
"summary": "Post Union Form",
"operationId": "post_union_form_form_union__post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"anyOf": [
{"$ref": "#/components/schemas/UserForm"},
{"$ref": "#/components/schemas/CompanyForm"},
],
"title": "Data",
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"CompanyForm": {
"properties": {
"company_name": {"type": "string", "title": "Company Name"},
"industry": {"type": "string", "title": "Industry"},
},
"type": "object",
"required": ["company_name", "industry"],
"title": "CompanyForm",
},
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"UserForm": {
"properties": {
"name": {"type": "string", "title": "Name"},
"email": {"type": "string", "title": "Email"},
},
"type": "object",
"required": ["name", "email"],
"title": "UserForm",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
| CompanyForm |
python | jamielennox__requests-mock | requests_mock/adapter.py | {
"start": 1967,
"end": 7105
} | class ____(_RequestHistoryTracker):
"""Contains all the information about a provided URL to match."""
def __init__(self, method, url, responses, complete_qs, request_headers,
additional_matcher, real_http, case_sensitive):
"""
:param bool complete_qs: Match the entire query string. By default URLs
match if all the provided matcher query arguments are matched and
extra query arguments are ignored. Set complete_qs to true to
require that the entire query string needs to match.
"""
super(_Matcher, self).__init__()
self._method = method
self._url = url
self._responses = responses
self._complete_qs = complete_qs
self._request_headers = request_headers
self._real_http = real_http
self._additional_matcher = additional_matcher
# url can be a regex object or ANY so don't always run urlparse
if isinstance(url, str):
url_parts = urllib.parse.urlparse(url)
self._scheme = url_parts.scheme.lower()
self._netloc = url_parts.netloc.lower()
self._path = requote_uri(url_parts.path or '/')
self._query = url_parts.query
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
elif isinstance(url, purl_types):
self._scheme = url.scheme()
self._netloc = url.netloc()
self._path = url.path()
self._query = url.query()
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
else:
self._scheme = None
self._netloc = None
self._path = None
self._query = None
def _match_method(self, request):
if self._method is ANY:
return True
if request.method.lower() == self._method.lower():
return True
return False
def _match_url(self, request):
if self._url is ANY:
return True
# regular expression matching
if hasattr(self._url, 'search'):
return self._url.search(request.url) is not None
# scheme is always matched case insensitive
if self._scheme and request.scheme.lower() != self._scheme:
return False
# netloc is always matched case insensitive
if self._netloc and request.netloc.lower() != self._netloc:
return False
if (request.path or '/') != self._path:
return False
# construct our own qs structure as we remove items from it below
request_qs = urllib.parse.parse_qs(request.query,
keep_blank_values=True)
matcher_qs = urllib.parse.parse_qs(self._query, keep_blank_values=True)
for k, vals in matcher_qs.items():
for v in vals:
try:
request_qs.get(k, []).remove(v)
except ValueError:
return False
if self._complete_qs:
for v in request_qs.values():
if v:
return False
return True
def _match_headers(self, request):
for k, vals in self._request_headers.items():
try:
header = request.headers[k]
except KeyError:
# NOTE(jamielennox): This seems to be a requests 1.2/2
# difference, in 2 they are just whatever the user inputted in
# 1 they are bytes. Let's optionally handle both and look at
# removing this when we depend on requests 2.
if not isinstance(k, str):
return False
try:
header = request.headers[k.encode('utf-8')]
except KeyError:
return False
if header != vals:
return False
return True
def _match_additional(self, request):
if callable(self._additional_matcher):
return self._additional_matcher(request)
if self._additional_matcher is not None:
raise TypeError("Unexpected format of additional matcher.")
return True
def _match(self, request):
return (self._match_method(request) and
self._match_url(request) and
self._match_headers(request) and
self._match_additional(request))
def __call__(self, request):
if not self._match(request):
return None
# doing this before _add_to_history means real requests are not stored
# in the request history. I'm not sure what is better here.
if self._real_http:
raise _RunRealHTTP()
if len(self._responses) > 1:
response_matcher = self._responses.pop(0)
else:
response_matcher = self._responses[0]
self._add_to_history(request)
return response_matcher.get_response(request)
| _Matcher |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/asset_graph_view/entity_subset.py | {
"start": 1048,
"end": 5596
} | class ____(Generic[T_EntityKey]):
"""An EntitySubset represents a subset of a given EntityKey tied to a particular instance of an
AssetGraphView.
"""
_key: T_EntityKey
_value: EntitySubsetValue
def __init__(
self,
asset_graph_view: "AssetGraphView",
key: T_EntityKey,
value: _ValidatedEntitySubsetValue,
):
self._asset_graph_view = asset_graph_view
self._key = key
self._value = check.inst_param(value, "value", _ValidatedEntitySubsetValue).inner
@property
def key(self) -> T_EntityKey:
return self._key
@property
def partitions_def(self) -> Optional[PartitionsDefinition]:
return self._asset_graph_view.asset_graph.get(self._key).partitions_def
def convert_to_serializable_subset(self) -> SerializableEntitySubset[T_EntityKey]:
return SerializableEntitySubset(key=self._key, value=self._value)
def expensively_compute_partition_keys(self) -> AbstractSet[str]:
return {
check.not_none(akpk.partition_key, "No None partition keys")
for akpk in self.expensively_compute_asset_partitions()
}
def expensively_compute_asset_partitions(self) -> AbstractSet[AssetKeyPartitionKey]:
if not isinstance(self.key, AssetKey):
check.failed(f"Unsupported operation for type {type(self.key)}")
internal_value = self.get_internal_value()
if isinstance(internal_value, PartitionsSubset):
partition_keys = internal_value.get_partition_keys()
else:
partition_keys = {None} if internal_value else set()
return {AssetKeyPartitionKey(self.key, pk) for pk in partition_keys}
def _oper(self, other: Self, oper: Callable[..., Any]) -> Self:
value = oper(self.get_internal_value(), other.get_internal_value())
return self.__class__(
self._asset_graph_view, key=self._key, value=_ValidatedEntitySubsetValue(value)
)
def compute_difference(self, other: Self) -> Self:
if isinstance(self._value, bool):
value = self.get_internal_bool_value() and not other.get_internal_bool_value()
return self.__class__(
self._asset_graph_view, key=self._key, value=_ValidatedEntitySubsetValue(value)
)
else:
return self._oper(other, operator.sub)
def compute_union(self, other: Self) -> Self:
return self._oper(other, operator.or_)
def compute_intersection(self, other: Self) -> Self:
return self._oper(other, operator.and_)
def compute_intersection_with_partition_keys(
self: "EntitySubset[AssetKey]", partition_keys: AbstractSet[str]
) -> "EntitySubset[AssetKey]":
key = check.inst(self.key, AssetKey)
partition_subset = self._asset_graph_view.get_asset_subset_from_asset_partitions(
self.key, {AssetKeyPartitionKey(key, pk) for pk in partition_keys}
)
return self.compute_intersection(partition_subset)
@cached_method
def compute_parent_subset(self, parent_key: AssetKey) -> "EntitySubset[AssetKey]":
return self._asset_graph_view.compute_parent_subset(parent_key, self)
@cached_method
def compute_child_subset(self, child_key: U_EntityKey) -> "EntitySubset[U_EntityKey]":
return self._asset_graph_view.compute_child_subset(child_key, self)
@cached_method
def compute_mapped_subset(
self, to_key: U_EntityKey, direction: Literal["up", "down"]
) -> "EntitySubset[U_EntityKey]":
return self._asset_graph_view.compute_mapped_subset(to_key, self, direction=direction)
@property
def size(self) -> int:
if isinstance(self._value, bool):
return int(self._value)
else:
return len(self._value)
@property
def is_empty(self) -> bool:
if isinstance(self._value, bool):
return not self._value
else:
return self._value.is_empty
@property
def is_partitioned(self) -> bool:
return isinstance(self._value, PartitionsSubset)
def get_internal_value(self) -> Union[bool, PartitionsSubset]:
return self._value
def get_internal_subset_value(self) -> PartitionsSubset:
return check.inst(self._value, PartitionsSubset)
def get_internal_bool_value(self) -> bool:
return check.inst(self._value, bool)
def __repr__(self) -> str:
return f"{self.__class__.__name__}<{self.key}>({self.get_internal_value()})"
| EntitySubset |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.